diff --git a/CHANGELOG.md b/CHANGELOG.md index a78471f..dc23ee1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # @vladmandic/face-api -Version: **1.2.4** +Version: **1.2.5** Description: **FaceAPI: AI-powered Face Detection & Rotation Tracking, Face Description & Recognition, Age & Gender & Emotion Prediction for Browser and NodeJS using TensorFlow/JS** Author: **Vladimir Mandic ** @@ -9,12 +9,15 @@ Repository: **** ## Changelog +### **1.2.5** 2021/05/27 mandic00@live.com + + +### **origin/master** 2021/05/27 admin@bettysteger.com + + ### **1.2.4** 2021/05/18 mandic00@live.com -### **origin/master** 2021/05/04 mandic00@live.com - - ### **1.2.3** 2021/05/04 mandic00@live.com diff --git a/dist/face-api.esm-nobundle.js b/dist/face-api.esm-nobundle.js index e4c7dd0..9953019 100644 --- a/dist/face-api.esm-nobundle.js +++ b/dist/face-api.esm-nobundle.js @@ -16,13 +16,13 @@ var __require = (x) => { }; var __export = (target, all) => { for (var name in all) - __defProp(target, name, {get: all[name], enumerable: true}); + __defProp(target, name, { get: all[name], enumerable: true }); }; var __reExport = (target, module2, desc) => { if (module2 && typeof module2 === "object" || typeof module2 === "function") { for (let key of __getOwnPropNames(module2)) if (!__hasOwnProp.call(target, key) && key !== "default") - __defProp(target, key, {get: () => module2[key], enumerable: !(desc = __getOwnPropDesc(module2, key)) || desc.enumerable}); + __defProp(target, key, { get: () => module2[key], enumerable: !(desc = __getOwnPropDesc(module2, key)) || desc.enumerable }); } return target; }; @@ -53,7 +53,7 @@ __export(draw_exports, { // src/draw/drawContour.ts function drawContour(ctx, points, isClosed = false) { ctx.beginPath(); - points.slice(1).forEach(({x, y}, prevIdx) => { + points.slice(1).forEach(({ x, y }, prevIdx) => { const from = points[prevIdx]; ctx.moveTo(from.x, from.y); ctx.lineTo(x, y); @@ -93,7 +93,7 @@ __export(utils_exports, { var Dimensions = class { constructor(width, height) { if (!isValidNumber(width) || !isValidNumber(height)) { - throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({width, height})}`); + throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({ width, height })}`); } this._width = width; this._height = height; @@ -138,7 +138,7 @@ function round(num, prec = 2) { function isDimensions(obj) { return obj && obj.width && obj.height; } -function computeReshapedDimensions({width, height}, inputSize) { +function computeReshapedDimensions({ width, height }, inputSize) { const scale2 = inputSize / Math.max(height, width); return new Dimensions(Math.round(width * scale2), Math.round(height * scale2)); } @@ -295,7 +295,7 @@ var Box = class { y -= diff / 2; height += diff; } - return new Box({x, y, width, height}); + return new Box({ x, y, width, height }); } rescale(s) { const scaleX = isDimensions(s) ? s.width : s; @@ -322,7 +322,7 @@ var Box = class { }); } clipAtImageBorders(imgWidth, imgHeight) { - const {x, y, right, bottom} = this; + const { x, y, right, bottom } = this; const clippedX = Math.max(x, 0); const clippedY = Math.max(y, 0); const newWidth = right - clippedX; @@ -337,7 +337,7 @@ var Box = class { }).floor(); } shift(sx, sy) { - const {width, height} = this; + const { width, height } = this; const x = this.x + sx; const y = this.y + sy; return new Box({ @@ -443,7 +443,7 @@ var ObjectDetection = class { return new Box(this._box).rescale(this.imageDims.reverse()); } forSize(width, height) { - return new ObjectDetection(this.score, this.classScore, this.className, this.relativeBox, {width, height}); + return new ObjectDetection(this.score, this.classScore, this.className, this.relativeBox, { width, height }); } }; @@ -453,7 +453,7 @@ var FaceDetection = class extends ObjectDetection { super(score, score, "", relativeBox, imageDims); } forSize(width, height) { - const {score, relativeBox, imageDims} = super.forSize(width, height); + const { score, relativeBox, imageDims } = super.forSize(width, height); return new FaceDetection(score, relativeBox, imageDims); } }; @@ -479,7 +479,7 @@ function minBbox(pts) { // src/ops/nonMaxSuppression.ts function nonMaxSuppression(boxes, scores, iouThreshold, isIOU = true) { - let indicesSortedByScore = scores.map((score, boxIndex) => ({score, boxIndex})).sort((c1, c2) => c1.score - c2.score).map((c) => c.boxIndex); + let indicesSortedByScore = scores.map((score, boxIndex) => ({ score, boxIndex })).sort((c1, c2) => c1.score - c2.score).map((c) => c.boxIndex); const pick = []; while (indicesSortedByScore.length > 0) { const curr = indicesSortedByScore.pop(); @@ -574,7 +574,7 @@ var relY = 0.43; var relScale = 0.45; var FaceLandmarks = class { constructor(relativeFaceLandmarkPositions, imgDims, shift = new Point(0, 0)) { - const {width, height} = imgDims; + const { width, height } = imgDims; this._imgDims = new Dimensions(width, height); this._shift = shift; this._positions = relativeFaceLandmarkPositions.map((pt) => pt.mul(new Point(width, height)).add(shift)); @@ -595,7 +595,7 @@ var FaceLandmarks = class { return this._positions.map((pt) => pt.sub(this._shift).div(new Point(this.imageWidth, this.imageHeight))); } forSize(width, height) { - return new this.constructor(this.relativePositions, {width, height}); + return new this.constructor(this.relativePositions, { width, height }); } shiftBy(x, y) { return new this.constructor(this.relativePositions, this._imgDims, new Point(x, y)); @@ -608,7 +608,7 @@ var FaceLandmarks = class { const box = detection instanceof FaceDetection ? detection.box.floor() : new Box(detection); return this.shiftBy(box.x, box.y).align(null, options); } - const {useDlibAlignment, minBoxPadding} = {useDlibAlignment: false, minBoxPadding: 0.2, ...options}; + const { useDlibAlignment, minBoxPadding } = { useDlibAlignment: false, minBoxPadding: 0.2, ...options }; if (useDlibAlignment) { return this.alignDlib(); } @@ -768,8 +768,8 @@ function isWithFaceDetection(obj) { return obj.detection instanceof FaceDetection; } function extendWithFaceDetection(sourceObj, detection) { - const extension = {detection}; - return {...sourceObj, ...extension}; + const extension = { detection }; + return { ...sourceObj, ...extension }; } // src/env/createBrowserEnv.ts @@ -788,6 +788,7 @@ function createBrowserEnv() { Video: HTMLVideoElement, createCanvasElement: () => document.createElement("canvas"), createImageElement: () => document.createElement("img"), + createVideoElement: () => document.createElement("video"), fetch, readFile }; @@ -817,6 +818,7 @@ function createFileSystem(fs) { function createNodejsEnv() { const Canvas = global["Canvas"] || global.HTMLCanvasElement; const Image = global.Image || global.HTMLImageElement; + const Video = global["Video"] || global.HTMLVideoElement; const createCanvasElement = () => { if (Canvas) return new Canvas(); @@ -827,6 +829,11 @@ function createNodejsEnv() { return new Image(); throw new Error("createImageElement - missing Image implementation for nodejs environment"); }; + const createVideoElement = () => { + if (Video) + return new Video(); + throw new Error("createVideoElement - missing Video implementation for nodejs environment"); + }; const fetch = global.fetch; const fileSystem = createFileSystem(); return { @@ -842,6 +849,7 @@ function createNodejsEnv() { }, createCanvasElement, createImageElement, + createVideoElement, fetch, ...fileSystem }; @@ -882,7 +890,7 @@ function monkeyPatch(env2) { if (!environment) { throw new Error("monkeyPatch - environment is not defined, check isNodejs() and isBrowser()"); } - const {Canvas = environment.Canvas, Image = environment.Image} = env2; + const { Canvas = environment.Canvas, Image = environment.Image } = env2; environment.Canvas = Canvas; environment.Image = Image; environment.createCanvasElement = env2.createCanvasElement || (() => new Canvas()); @@ -915,7 +923,7 @@ function resolveInput(arg) { // src/dom/getContext2dOrThrow.ts function getContext2dOrThrow(canvasArg) { - const {Canvas, CanvasRenderingContext2D: CanvasRenderingContext2D2} = env.getEnv(); + const { Canvas, CanvasRenderingContext2D: CanvasRenderingContext2D2 } = env.getEnv(); if (canvasArg instanceof CanvasRenderingContext2D2) { return canvasArg; } @@ -963,15 +971,15 @@ var DrawTextField = class { this.options = new DrawTextFieldOptions(options); } measureWidth(ctx) { - const {padding} = this.options; + const { padding } = this.options; return this.text.map((l) => ctx.measureText(l).width).reduce((w0, w1) => w0 < w1 ? w1 : w0, 0) + 2 * padding; } measureHeight() { - const {fontSize, padding} = this.options; + const { fontSize, padding } = this.options; return this.text.length * fontSize + 2 * padding; } getUpperLeft(ctx, canvasDims) { - const {anchorPosition} = this.options; + const { anchorPosition } = this.options; const isShiftLeft = anchorPosition === AnchorPosition.BOTTOM_RIGHT || anchorPosition === AnchorPosition.TOP_RIGHT; const isShiftTop = anchorPosition === AnchorPosition.BOTTOM_LEFT || anchorPosition === AnchorPosition.BOTTOM_RIGHT; const textFieldWidth = this.measureWidth(ctx); @@ -979,12 +987,12 @@ var DrawTextField = class { const x = isShiftLeft ? this.anchor.x - textFieldWidth : this.anchor.x; const y = isShiftTop ? this.anchor.y - textFieldHeight : this.anchor.y; if (canvasDims) { - const {width, height} = canvasDims; + const { width, height } = canvasDims; const newX = Math.max(Math.min(x, width - textFieldWidth), 0); const newY = Math.max(Math.min(y, height - textFieldHeight), 0); - return {x: newX, y: newY}; + return { x: newX, y: newY }; } - return {x, y}; + return { x, y }; } draw(canvasArg) { const canvas = resolveInput(canvasArg); @@ -1027,7 +1035,7 @@ var DrawBoxOptions = class { anchorPosition: AnchorPosition.BOTTOM_LEFT, backgroundColor: this.boxColor }; - this.drawLabelOptions = new DrawTextFieldOptions({...defaultDrawLabelOptions, ...drawLabelOptions}); + this.drawLabelOptions = new DrawTextFieldOptions({ ...defaultDrawLabelOptions, ...drawLabelOptions }); } }; var DrawBox = class { @@ -1037,7 +1045,7 @@ var DrawBox = class { } draw(canvasArg) { const ctx = getContext2dOrThrow(canvasArg); - const {boxColor, lineWidth} = this.options; + const { boxColor, lineWidth } = this.options; const { x, y, @@ -1047,9 +1055,9 @@ var DrawBox = class { ctx.strokeStyle = boxColor; ctx.lineWidth = lineWidth; ctx.strokeRect(x, y, width, height); - const {label} = this.options; + const { label } = this.options; if (label) { - new DrawTextField([label], {x: x - lineWidth / 2, y}, this.options.drawLabelOptions).draw(canvasArg); + new DrawTextField([label], { x: x - lineWidth / 2, y }, this.options.drawLabelOptions).draw(canvasArg); } } }; @@ -1061,13 +1069,13 @@ function drawDetections(canvasArg, detections) { const score = det instanceof FaceDetection ? det.score : isWithFaceDetection(det) ? det.detection.score : void 0; const box = det instanceof FaceDetection ? det.box : isWithFaceDetection(det) ? det.detection.box : new Box(det); const label = score ? `${round(score)}` : void 0; - new DrawBox(box, {label}).draw(canvasArg); + new DrawBox(box, { label }).draw(canvasArg); }); } // src/dom/isMediaLoaded.ts function isMediaLoaded(media) { - const {Image, Video} = env.getEnv(); + const { Image, Video } = env.getEnv(); return media instanceof Image && media.complete || media instanceof Video && media.readyState >= 3; } @@ -1116,7 +1124,7 @@ function bufferToImage(buf) { // src/dom/getMediaDimensions.ts function getMediaDimensions(input) { - const {Image, Video} = env.getEnv(); + const { Image, Video } = env.getEnv(); if (input instanceof Image) { return new Dimensions(input.naturalWidth, input.naturalHeight); } @@ -1127,20 +1135,20 @@ function getMediaDimensions(input) { } // src/dom/createCanvas.ts -function createCanvas({width, height}) { - const {createCanvasElement} = env.getEnv(); +function createCanvas({ width, height }) { + const { createCanvasElement } = env.getEnv(); const canvas = createCanvasElement(); canvas.width = width; canvas.height = height; return canvas; } function createCanvasFromMedia(media, dims) { - const {ImageData: ImageData2} = env.getEnv(); + const { ImageData: ImageData2 } = env.getEnv(); if (!(media instanceof ImageData2) && !isMediaLoaded(media)) { throw new Error("createCanvasFromMedia - media has not finished loading yet"); } - const {width, height} = dims || getMediaDimensions(media); - const canvas = createCanvas({width, height}); + const { width, height } = dims || getMediaDimensions(media); + const canvas = createCanvas({ width, height }); if (media instanceof ImageData2) { getContext2dOrThrow(canvas).putImageData(media, 0, 0); } else { @@ -1161,23 +1169,23 @@ async function imageTensorToCanvas(imgTensor, canvas) { // src/dom/isMediaElement.ts function isMediaElement(input) { - const {Image, Canvas, Video} = env.getEnv(); + const { Image, Canvas, Video } = env.getEnv(); return input instanceof Image || input instanceof Canvas || input instanceof Video; } // src/dom/imageToSquare.ts function imageToSquare(input, inputSize, centerImage = false) { - const {Image, Canvas} = env.getEnv(); + const { Image, Canvas } = env.getEnv(); if (!(input instanceof Image || input instanceof Canvas)) { throw new Error("imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement"); } if (inputSize <= 0) - return createCanvas({width: 1, height: 1}); + return createCanvas({ width: 1, height: 1 }); const dims = getMediaDimensions(input); const scale2 = inputSize / Math.max(dims.height, dims.width); const width = scale2 * dims.width; const height = scale2 * dims.height; - const targetCanvas = createCanvas({width: inputSize, height: inputSize}); + const targetCanvas = createCanvas({ width: inputSize, height: inputSize }); const inputCanvas = input instanceof Canvas ? input : createCanvasFromMedia(input); const offset = Math.abs(width - height) / 2; const dx = centerImage && width < height ? offset : 0; @@ -1258,7 +1266,7 @@ var NetInput = class { } const width = this.getInputWidth(batchIdx); const height = this.getInputHeight(batchIdx); - return computeReshapedDimensions({width, height}, this.inputSize); + return computeReshapedDimensions({ width, height }, this.inputSize); } toBatchTensor(inputSize, isCenterInputs = true) { this._inputSize = inputSize; @@ -1311,7 +1319,7 @@ async function toNetInput(inputs) { // src/dom/extractFaces.ts async function extractFaces(input, detections) { - const {Canvas} = env.getEnv(); + const { Canvas } = env.getEnv(); let canvas = input; if (!(input instanceof Canvas)) { const netInput = await toNetInput(input); @@ -1322,8 +1330,8 @@ async function extractFaces(input, detections) { } const ctx = getContext2dOrThrow(canvas); const boxes = detections.map((det) => det instanceof FaceDetection ? det.forSize(canvas.width, canvas.height).box.floor() : det).map((box) => box.clipAtImageBorders(canvas.width, canvas.height)); - return boxes.map(({x, y, width, height}) => { - const faceImg = createCanvas({width, height}); + return boxes.map(({ x, y, width, height }) => { + const faceImg = createCanvas({ width, height }); if (width > 0 && height > 0) getContext2dOrThrow(faceImg).putImageData(ctx.getImageData(x, y, width, height), 0, 0); return faceImg; @@ -1353,7 +1361,7 @@ async function extractFaceTensors(imageTensor, detections) { // src/dom/fetchOrThrow.ts async function fetchOrThrow(url, init) { - const {fetch} = env.getEnv(); + const { fetch } = env.getEnv(); const res = await fetch(url, init); if (!(res.status < 400)) { throw new Error(`failed to fetch: (${res.status}) ${res.statusText}, from url: ${res.url}`); @@ -1381,6 +1389,31 @@ async function fetchNetWeights(uri) { return new Float32Array(await (await fetchOrThrow(uri)).arrayBuffer()); } +// src/dom/bufferToVideo.ts +function bufferToVideo(buf) { + return new Promise((resolve, reject) => { + if (!(buf instanceof Blob)) + reject(new Error("bufferToVideo - expected buf to be of type: Blob")); + const video = env.getEnv().createVideoElement(); + video.oncanplay = () => resolve(video); + video.onerror = reject; + video.playsInline = true; + video.autoplay = true; + video.muted = true; + video.src = URL.createObjectURL(buf); + }); +} + +// src/dom/fetchVideo.ts +async function fetchVideo(uri) { + const res = await fetchOrThrow(uri); + const blob = await res.blob(); + if (!blob.type.startsWith("video/")) { + throw new Error(`fetchVideo - expected blob type to be of type video/*, instead have: ${blob.type}, for url: ${res.url}`); + } + return bufferToVideo(blob); +} + // src/common/getModelUris.ts function getModelUris(uri, defaultModelName) { const defaultManifestFilename = `${defaultModelName}-weights_manifest.json`; @@ -1410,17 +1443,17 @@ function getModelUris(uri, defaultModelName) { // src/dom/loadWeightMap.ts async function loadWeightMap(uri, defaultModelName) { - const {manifestUri, modelBaseUri} = getModelUris(uri, defaultModelName); + const { manifestUri, modelBaseUri } = getModelUris(uri, defaultModelName); const manifest = await fetchJson(manifestUri); return tfjs_esm_exports.io.loadWeights(manifest, modelBaseUri); } // src/dom/matchDimensions.ts function matchDimensions(input, reference, useMediaDimensions = false) { - const {width, height} = useMediaDimensions ? getMediaDimensions(reference) : reference; + const { width, height } = useMediaDimensions ? getMediaDimensions(reference) : reference; input.width = width; input.height = height; - return {width, height}; + return { width, height }; } // src/NeuralNetwork.ts @@ -1440,16 +1473,16 @@ var NeuralNetwork = class { return !!this.params; } getParamFromPath(paramPath) { - const {obj, objProp} = this.traversePropertyPath(paramPath); + const { obj, objProp } = this.traversePropertyPath(paramPath); return obj[objProp]; } reassignParamFromPath(paramPath, tensor2) { - const {obj, objProp} = this.traversePropertyPath(paramPath); + const { obj, objProp } = this.traversePropertyPath(paramPath); obj[objProp].dispose(); obj[objProp] = tensor2; } getParamList() { - return this._paramMappings.map(({paramPath}) => ({ + return this._paramMappings.map(({ paramPath }) => ({ path: paramPath, tensor: this.getParamFromPath(paramPath) })); @@ -1461,12 +1494,12 @@ var NeuralNetwork = class { return this.getParamList().filter((param) => !(param.tensor instanceof tfjs_esm_exports.Variable)); } variable() { - this.getFrozenParams().forEach(({path, tensor: tensor2}) => { + this.getFrozenParams().forEach(({ path, tensor: tensor2 }) => { this.reassignParamFromPath(path, tensor2.variable()); }); } freeze() { - this.getTrainableParams().forEach(({path, tensor: variable}) => { + this.getTrainableParams().forEach(({ path, tensor: variable }) => { const tensor2 = tfjs_esm_exports.tensor(variable.dataSync()); variable.dispose(); this.reassignParamFromPath(path, tensor2); @@ -1482,7 +1515,7 @@ var NeuralNetwork = class { this._params = void 0; } serializeParams() { - return new Float32Array(this.getParamList().map(({tensor: tensor2}) => Array.from(tensor2.dataSync())).reduce((flat, arr) => flat.concat(arr))); + return new Float32Array(this.getParamList().map(({ tensor: tensor2 }) => Array.from(tensor2.dataSync())).reduce((flat, arr) => flat.concat(arr))); } async load(weightsOrUrl) { if (weightsOrUrl instanceof Float32Array) { @@ -1502,8 +1535,8 @@ var NeuralNetwork = class { if (filePath && typeof filePath !== "string") { throw new Error(`${this._name}.loadFromDisk - expected model file path`); } - const {readFile} = env.getEnv(); - const {manifestUri, modelBaseUri} = getModelUris(filePath, this.getDefaultModelName()); + const { readFile } = env.getEnv(); + const { manifestUri, modelBaseUri } = getModelUris(filePath, this.getDefaultModelName()); const fetchWeightsFromDisk = (filePaths) => Promise.all(filePaths.map((fp) => readFile(fp).then((buf) => buf.buffer))); const loadWeights = tfjs_esm_exports.io.weightsLoaderFactory(fetchWeightsFromDisk); const manifest = JSON.parse((await readFile(manifestUri)).toString()); @@ -1511,12 +1544,12 @@ var NeuralNetwork = class { this.loadFromWeightMap(weightMap); } loadFromWeightMap(weightMap) { - const {paramMappings, params} = this.extractParamsFromWeightMap(weightMap); + const { paramMappings, params } = this.extractParamsFromWeightMap(weightMap); this._paramMappings = paramMappings; this._params = params; } extractWeights(weights) { - const {paramMappings, params} = this.extractParams(weights); + const { paramMappings, params } = this.extractParams(weights); this._paramMappings = paramMappings; this._params = params; } @@ -1528,13 +1561,13 @@ var NeuralNetwork = class { if (!res.nextObj.hasOwnProperty(objProp2)) { throw new Error(`traversePropertyPath - object does not have property ${objProp2}, for path ${paramPath}`); } - return {obj: res.nextObj, objProp: objProp2, nextObj: res.nextObj[objProp2]}; - }, {nextObj: this.params}); - const {obj, objProp} = result; + return { obj: res.nextObj, objProp: objProp2, nextObj: res.nextObj[objProp2] }; + }, { nextObj: this.params }); + const { obj, objProp } = result; if (!obj || !objProp || !(obj[objProp] instanceof tfjs_esm_exports.Tensor)) { throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${paramPath}`); } - return {obj, objProp}; + return { obj, objProp }; } }; @@ -1591,8 +1624,8 @@ function extractConvParamsFactory(extractWeights, paramMappings) { return (channelsIn, channelsOut, filterSize, mappedPrefix) => { const filters = tfjs_esm_exports.tensor4d(extractWeights(channelsIn * channelsOut * filterSize * filterSize), [filterSize, filterSize, channelsIn, channelsOut]); const bias = tfjs_esm_exports.tensor1d(extractWeights(channelsOut)); - paramMappings.push({paramPath: `${mappedPrefix}/filters`}, {paramPath: `${mappedPrefix}/bias`}); - return {filters, bias}; + paramMappings.push({ paramPath: `${mappedPrefix}/filters` }, { paramPath: `${mappedPrefix}/bias` }); + return { filters, bias }; }; } @@ -1601,7 +1634,7 @@ function extractFCParamsFactory(extractWeights, paramMappings) { return (channelsIn, channelsOut, mappedPrefix) => { const fc_weights = tfjs_esm_exports.tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut]); const fc_bias = tfjs_esm_exports.tensor1d(extractWeights(channelsOut)); - paramMappings.push({paramPath: `${mappedPrefix}/weights`}, {paramPath: `${mappedPrefix}/bias`}); + paramMappings.push({ paramPath: `${mappedPrefix}/weights` }, { paramPath: `${mappedPrefix}/bias` }); return { weights: fc_weights, bias: fc_bias @@ -1624,7 +1657,7 @@ function extractSeparableConvParamsFactory(extractWeights, paramMappings) { const depthwise_filter = tfjs_esm_exports.tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1]); const pointwise_filter = tfjs_esm_exports.tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut]); const bias = tfjs_esm_exports.tensor1d(extractWeights(channelsOut)); - paramMappings.push({paramPath: `${mappedPrefix}/depthwise_filter`}, {paramPath: `${mappedPrefix}/pointwise_filter`}, {paramPath: `${mappedPrefix}/bias`}); + paramMappings.push({ paramPath: `${mappedPrefix}/depthwise_filter` }, { paramPath: `${mappedPrefix}/pointwise_filter` }, { paramPath: `${mappedPrefix}/bias` }); return new SeparableConvParams(depthwise_filter, pointwise_filter, bias); }; } @@ -1644,7 +1677,7 @@ function extractWeightEntryFactory(weightMap, paramMappings) { if (!isTensor(tensor2, paramRank)) { throw new Error(`expected weightMap[${originalPath}] to be a Tensor${paramRank}D, instead have ${tensor2}`); } - paramMappings.push({originalPath, paramPath: mappedPath || originalPath}); + paramMappings.push({ originalPath, paramPath: mappedPath || originalPath }); return tensor2; }; } @@ -1674,10 +1707,10 @@ function extractorsFactory(extractWeights, paramMappings) { const conv0 = isFirstLayer ? extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv0`) : extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/conv0`); const conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv1`); const conv22 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv2`); - return {conv0, conv1, conv2: conv22}; + return { conv0, conv1, conv2: conv22 }; } function extractDenseBlock4Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer = false) { - const {conv0, conv1, conv2: conv22} = extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer); + const { conv0, conv1, conv2: conv22 } = extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer); const conv3 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv3`); return { conv0, @@ -1725,7 +1758,7 @@ function loadConvParamsFactory(extractWeightEntry) { return (prefix) => { const filters = extractWeightEntry(`${prefix}/filters`, 4); const bias = extractWeightEntry(`${prefix}/bias`, 1); - return {filters, bias}; + return { filters, bias }; }; } @@ -1738,7 +1771,7 @@ function loadParamsFactory(weightMap, paramMappings) { const conv0 = isFirstLayer ? extractConvParams(`${prefix}/conv0`) : extractSeparableConvParams(`${prefix}/conv0`); const conv1 = extractSeparableConvParams(`${prefix}/conv1`); const conv22 = extractSeparableConvParams(`${prefix}/conv2`); - return {conv0, conv1, conv2: conv22}; + return { conv0, conv1, conv2: conv22 }; } function extractDenseBlock4Params(prefix, isFirstLayer = false) { const conv0 = isFirstLayer ? extractConvParams(`${prefix}/conv0`) : extractSeparableConvParams(`${prefix}/conv0`); @@ -1771,7 +1804,7 @@ function extractParamsFromWeightMap(weightMap) { dense3: extractDenseBlock4Params("dense3") }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/faceFeatureExtractor/FaceFeatureExtractor.ts @@ -1780,7 +1813,7 @@ var FaceFeatureExtractor = class extends NeuralNetwork { super("FaceFeatureExtractor"); } forwardInput(input) { - const {params} = this; + const { params } = this; if (!params) { throw new Error("FaceFeatureExtractor - load model before inference"); } @@ -1829,7 +1862,7 @@ function extractParams2(weights, channelsIn, channelsOut) { } return { paramMappings, - params: {fc} + params: { fc } }; } @@ -1840,13 +1873,13 @@ function extractParamsFromWeightMap2(weightMap) { function extractFcParams(prefix) { const weights = extractWeightEntry(`${prefix}/weights`, 2); const bias = extractWeightEntry(`${prefix}/bias`, 1); - return {weights, bias}; + return { weights, bias }; } const params = { fc: extractFcParams("fc") }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/faceProcessor/util.ts @@ -1857,7 +1890,7 @@ function seperateWeightMaps(weightMap) { const map = key.startsWith("fc") ? classifierMap : featureExtractorMap; map[key] = weightMap[key]; }); - return {featureExtractorMap, classifierMap}; + return { featureExtractorMap, classifierMap }; } // src/faceProcessor/FaceProcessor.ts @@ -1870,7 +1903,7 @@ var FaceProcessor = class extends NeuralNetwork { return this._faceFeatureExtractor; } runNet(input) { - const {params} = this; + const { params } = this; if (!params) { throw new Error(`${this._name} - load model before inference`); } @@ -1884,7 +1917,7 @@ var FaceProcessor = class extends NeuralNetwork { super.dispose(throwOnRedispose); } loadClassifierParams(weights) { - const {params, paramMappings} = this.extractClassifierParams(weights); + const { params, paramMappings } = this.extractClassifierParams(weights); this._params = params; this._paramMappings = paramMappings; } @@ -1892,7 +1925,7 @@ var FaceProcessor = class extends NeuralNetwork { return extractParams2(weights, this.getClassifierChannelsIn(), this.getClassifierChannelsOut()); } extractParamsFromWeightMap(weightMap) { - const {featureExtractorMap, classifierMap} = seperateWeightMaps(weightMap); + const { featureExtractorMap, classifierMap } = seperateWeightMaps(weightMap); this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap); return extractParamsFromWeightMap2(classifierMap); } @@ -1919,7 +1952,7 @@ var FaceExpressions = class { }); } asSortedArray() { - return FACE_EXPRESSION_LABELS.map((expression) => ({expression, probability: this[expression]})).sort((e0, e1) => e1.probability - e0.probability); + return FACE_EXPRESSION_LABELS.map((expression) => ({ expression, probability: this[expression] })).sort((e0, e1) => e1.probability - e0.probability); } }; @@ -1962,8 +1995,8 @@ function isWithFaceExpressions(obj) { return obj.expressions instanceof FaceExpressions; } function extendWithFaceExpressions(sourceObj, expressions) { - const extension = {expressions}; - return {...sourceObj, ...extension}; + const extension = { expressions }; + return { ...sourceObj, ...extension }; } // src/draw/drawFaceExpressions.ts @@ -1989,7 +2022,7 @@ function isWithFaceLandmarks(obj) { function calculateFaceAngle(mesh) { const radians = (a1, a2, b1, b2) => Math.atan2(b2 - a2, b1 - a1) % Math.PI; const degrees = (theta) => theta * 180 / Math.PI; - const angle = {roll: void 0, pitch: void 0, yaw: void 0}; + const angle = { roll: void 0, pitch: void 0, yaw: void 0 }; if (!mesh || !mesh._positions || mesh._positions.length !== 68) return angle; const pt = mesh._positions; @@ -2001,10 +2034,10 @@ function calculateFaceAngle(mesh) { return angle; } function extendWithFaceLandmarks(sourceObj, unshiftedLandmarks) { - const {box: shift} = sourceObj.detection; + const { box: shift } = sourceObj.detection; const landmarks = unshiftedLandmarks.shiftBy(shift.x, shift.y); const rect = landmarks.align(); - const {imageDims} = sourceObj.detection; + const { imageDims } = sourceObj.detection; const alignedRect = new FaceDetection(sourceObj.detection.score, rect.rescale(imageDims.reverse()), imageDims); const angle = calculateFaceAngle(unshiftedLandmarks); const extension = { @@ -2013,7 +2046,7 @@ function extendWithFaceLandmarks(sourceObj, unshiftedLandmarks) { alignedRect, angle }; - return {...sourceObj, ...extension}; + return { ...sourceObj, ...extension }; } // src/draw/DrawFaceLandmarks.ts @@ -2085,7 +2118,7 @@ function drawFaceLandmarks(canvasArg, faceLandmarks) { } // package.json -var version = "1.2.4"; +var version = "1.2.5"; // src/xception/extractParams.ts function extractorsFactory2(extractWeights, paramMappings) { @@ -2095,13 +2128,13 @@ function extractorsFactory2(extractWeights, paramMappings) { const separable_conv0 = extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/separable_conv1`); const expansion_conv = extractConvParams(channelsIn, channelsOut, 1, `${mappedPrefix}/expansion_conv`); - return {separable_conv0, separable_conv1, expansion_conv}; + return { separable_conv0, separable_conv1, expansion_conv }; } function extractMainBlockParams(channels, mappedPrefix) { const separable_conv0 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv1`); const separable_conv2 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv2`); - return {separable_conv0, separable_conv1, separable_conv2}; + return { separable_conv0, separable_conv1, separable_conv2 }; } return { extractConvParams, @@ -2145,7 +2178,7 @@ function extractParams3(weights, numMainBlocks) { } return { paramMappings, - params: {entry_flow, middle_flow, exit_flow} + params: { entry_flow, middle_flow, exit_flow } }; } @@ -2158,13 +2191,13 @@ function loadParamsFactory2(weightMap, paramMappings) { const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`); const expansion_conv = extractConvParams(`${mappedPrefix}/expansion_conv`); - return {separable_conv0, separable_conv1, expansion_conv}; + return { separable_conv0, separable_conv1, expansion_conv }; } function extractMainBlockParams(mappedPrefix) { const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`); const separable_conv2 = extractSeparableConvParams(`${mappedPrefix}/separable_conv2`); - return {separable_conv0, separable_conv1, separable_conv2}; + return { separable_conv0, separable_conv1, separable_conv2 }; } return { extractConvParams, @@ -2200,7 +2233,7 @@ function extractParamsFromWeightMap3(weightMap, numMainBlocks) { separable_conv: exit_flow_separable_conv }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params: {entry_flow, middle_flow, exit_flow}, paramMappings}; + return { params: { entry_flow, middle_flow, exit_flow }, paramMappings }; } // src/xception/TinyXception.ts @@ -2228,7 +2261,7 @@ var TinyXception = class extends NeuralNetwork { this._numMainBlocks = numMainBlocks; } forwardInput(input) { - const {params} = this; + const { params } = this; if (!params) { throw new Error("TinyXception - load model before inference"); } @@ -2276,7 +2309,7 @@ function extractParams4(weights) { } return { paramMappings, - params: {fc: {age, gender}} + params: { fc: { age, gender } } }; } @@ -2287,7 +2320,7 @@ function extractParamsFromWeightMap4(weightMap) { function extractFcParams(prefix) { const weights = extractWeightEntry(`${prefix}/weights`, 2); const bias = extractWeightEntry(`${prefix}/bias`, 1); - return {weights, bias}; + return { weights, bias }; } const params = { fc: { @@ -2296,7 +2329,7 @@ function extractParamsFromWeightMap4(weightMap) { } }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/ageGenderNet/types.ts @@ -2316,7 +2349,7 @@ var AgeGenderNet = class extends NeuralNetwork { return this._faceFeatureExtractor; } runNet(input) { - const {params} = this; + const { params } = this; if (!params) { throw new Error(`${this._name} - load model before inference`); } @@ -2325,13 +2358,13 @@ var AgeGenderNet = class extends NeuralNetwork { const pooled = tfjs_esm_exports.avgPool(bottleneckFeatures, [7, 7], [2, 2], "valid").as2D(bottleneckFeatures.shape[0], -1); const age = fullyConnectedLayer(pooled, params.fc.age).as1D(); const gender = fullyConnectedLayer(pooled, params.fc.gender); - return {age, gender}; + return { age, gender }; }); } forwardInput(input) { return tfjs_esm_exports.tidy(() => { - const {age, gender} = this.runNet(input); - return {age, gender: tfjs_esm_exports.softmax(gender)}; + const { age, gender } = this.runNet(input); + return { age, gender: tfjs_esm_exports.softmax(gender) }; }); } async forward(input) { @@ -2346,7 +2379,7 @@ var AgeGenderNet = class extends NeuralNetwork { ageTensor, genderTensor: genders[i] })); - const predictionsByBatch = await Promise.all(ageAndGenderTensors.map(async ({ageTensor, genderTensor}) => { + const predictionsByBatch = await Promise.all(ageAndGenderTensors.map(async ({ ageTensor, genderTensor }) => { const age = ageTensor.dataSync()[0]; const probMale = genderTensor.dataSync()[0]; const isMale = probMale > 0.5; @@ -2354,7 +2387,7 @@ var AgeGenderNet = class extends NeuralNetwork { const genderProbability = isMale ? probMale : 1 - probMale; ageTensor.dispose(); genderTensor.dispose(); - return {age, gender, genderProbability}; + return { age, gender, genderProbability }; })); out.age.dispose(); out.gender.dispose(); @@ -2368,7 +2401,7 @@ var AgeGenderNet = class extends NeuralNetwork { super.dispose(throwOnRedispose); } loadClassifierParams(weights) { - const {params, paramMappings} = this.extractClassifierParams(weights); + const { params, paramMappings } = this.extractClassifierParams(weights); this._params = params; this._paramMappings = paramMappings; } @@ -2376,7 +2409,7 @@ var AgeGenderNet = class extends NeuralNetwork { return extractParams4(weights); } extractParamsFromWeightMap(weightMap) { - const {featureExtractorMap, classifierMap} = seperateWeightMaps(weightMap); + const { featureExtractorMap, classifierMap } = seperateWeightMaps(weightMap); this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap); return extractParamsFromWeightMap4(classifierMap); } @@ -2392,7 +2425,7 @@ var AgeGenderNet = class extends NeuralNetwork { // src/faceLandmarkNet/FaceLandmark68NetBase.ts var FaceLandmark68NetBase = class extends FaceProcessor { postProcess(output, inputSize, originalDimensions) { - const inputDimensions = originalDimensions.map(({width, height}) => { + const inputDimensions = originalDimensions.map(({ width, height }) => { const scale2 = inputSize / Math.max(height, width); return { width: width * scale2, @@ -2403,7 +2436,7 @@ var FaceLandmark68NetBase = class extends FaceProcessor { return tfjs_esm_exports.tidy(() => { const createInterleavedTensor = (fillX, fillY) => tfjs_esm_exports.stack([tfjs_esm_exports.fill([68], fillX, "float32"), tfjs_esm_exports.fill([68], fillY, "float32")], 1).as2D(1, 136).as1D(); const getPadding = (batchIdx, cond) => { - const {width, height} = inputDimensions[batchIdx]; + const { width, height } = inputDimensions[batchIdx]; return cond(width, height) ? Math.abs(width - height) / 2 : 0; }; const getPaddingX = (batchIdx) => getPadding(batchIdx, (w, h) => w < h); @@ -2415,7 +2448,7 @@ var FaceLandmark68NetBase = class extends FaceProcessor { forwardInput(input) { return tfjs_esm_exports.tidy(() => { const out = this.runNet(input); - return this.postProcess(out, input.inputSize, input.inputDimensions.map(([height, width]) => ({height, width}))); + return this.postProcess(out, input.inputSize, input.inputDimensions.map(([height, width]) => ({ height, width }))); }); } async forward(input) { @@ -2466,7 +2499,7 @@ function extractParamsFromWeightMapTiny(weightMap) { dense2: extractDenseBlock3Params("dense2") }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/faceFeatureExtractor/extractParamsTiny.ts @@ -2487,7 +2520,7 @@ function extractParamsTiny(weights) { } return { paramMappings, - params: {dense0, dense1, dense2} + params: { dense0, dense1, dense2 } }; } @@ -2497,7 +2530,7 @@ var TinyFaceFeatureExtractor = class extends NeuralNetwork { super("TinyFaceFeatureExtractor"); } forwardInput(input) { - const {params} = this; + const { params } = this; if (!params) { throw new Error("TinyFaceFeatureExtractor - load model before inference"); } @@ -2550,7 +2583,7 @@ function scale(x, params) { // src/faceRecognitionNet/convLayer.ts function convLayer2(x, params, strides, withRelu, padding = "same") { - const {filters, bias} = params.conv; + const { filters, bias } = params.conv; let out = tfjs_esm_exports.conv2d(x, filters, strides, padding); out = tfjs_esm_exports.add(out, bias); out = scale(out, params.scale); @@ -2579,13 +2612,13 @@ function extractorsFactory3(extractWeights, paramMappings) { function extractConvParams(numFilterValues, numFilters, filterSize, mappedPrefix) { const filters = extractFilterValues(numFilterValues, numFilters, filterSize); const bias = tfjs_esm_exports.tensor1d(extractWeights(numFilters)); - paramMappings.push({paramPath: `${mappedPrefix}/filters`}, {paramPath: `${mappedPrefix}/bias`}); - return {filters, bias}; + paramMappings.push({ paramPath: `${mappedPrefix}/filters` }, { paramPath: `${mappedPrefix}/bias` }); + return { filters, bias }; } function extractScaleLayerParams(numWeights, mappedPrefix) { const weights = tfjs_esm_exports.tensor1d(extractWeights(numWeights)); const biases = tfjs_esm_exports.tensor1d(extractWeights(numWeights)); - paramMappings.push({paramPath: `${mappedPrefix}/weights`}, {paramPath: `${mappedPrefix}/biases`}); + paramMappings.push({ paramPath: `${mappedPrefix}/weights` }, { paramPath: `${mappedPrefix}/biases` }); return { weights, biases @@ -2594,12 +2627,12 @@ function extractorsFactory3(extractWeights, paramMappings) { function extractConvLayerParams(numFilterValues, numFilters, filterSize, mappedPrefix) { const conv3 = extractConvParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv`); const scale2 = extractScaleLayerParams(numFilters, `${mappedPrefix}/scale`); - return {conv: conv3, scale: scale2}; + return { conv: conv3, scale: scale2 }; } function extractResidualLayerParams(numFilterValues, numFilters, filterSize, mappedPrefix, isDown = false) { const conv1 = extractConvLayerParams((isDown ? 0.5 : 1) * numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv1`); const conv22 = extractConvLayerParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv2`); - return {conv1, conv2: conv22}; + return { conv1, conv2: conv22 }; } return { extractConvLayerParams, @@ -2632,7 +2665,7 @@ function extractParams5(weights) { const conv256_2 = extractResidualLayerParams(589824, 256, 3, "conv256_2"); const conv256_down_out = extractResidualLayerParams(589824, 256, 3, "conv256_down_out"); const fc = tfjs_esm_exports.tidy(() => tfjs_esm_exports.transpose(tfjs_esm_exports.tensor2d(extractWeights(256 * 128), [128, 256]), [1, 0])); - paramMappings.push({paramPath: "fc"}); + paramMappings.push({ paramPath: "fc" }); if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } @@ -2654,7 +2687,7 @@ function extractParams5(weights) { conv256_down_out, fc }; - return {params, paramMappings}; + return { params, paramMappings }; } // src/faceRecognitionNet/extractParamsFromWeightMap.ts @@ -2663,13 +2696,13 @@ function extractorsFactory4(weightMap, paramMappings) { function extractScaleLayerParams(prefix) { const weights = extractWeightEntry(`${prefix}/scale/weights`, 1); const biases = extractWeightEntry(`${prefix}/scale/biases`, 1); - return {weights, biases}; + return { weights, biases }; } function extractConvLayerParams(prefix) { const filters = extractWeightEntry(`${prefix}/conv/filters`, 4); const bias = extractWeightEntry(`${prefix}/conv/bias`, 1); const scale2 = extractScaleLayerParams(prefix); - return {conv: {filters, bias}, scale: scale2}; + return { conv: { filters, bias }, scale: scale2 }; } function extractResidualLayerParams(prefix) { return { @@ -2703,8 +2736,8 @@ function extractParamsFromWeightMap5(weightMap) { const conv256_1 = extractResidualLayerParams("conv256_1"); const conv256_2 = extractResidualLayerParams("conv256_2"); const conv256_down_out = extractResidualLayerParams("conv256_down_out"); - const {fc} = weightMap; - paramMappings.push({originalPath: "fc", paramPath: "fc"}); + const { fc } = weightMap; + paramMappings.push({ originalPath: "fc", paramPath: "fc" }); if (!isTensor2D(fc)) { throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${fc}`); } @@ -2727,7 +2760,7 @@ function extractParamsFromWeightMap5(weightMap) { fc }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/faceRecognitionNet/residualLayer.ts @@ -2767,7 +2800,7 @@ var FaceRecognitionNet = class extends NeuralNetwork { super("FaceRecognitionNet"); } forwardInput(input) { - const {params} = this; + const { params } = this; if (!params) { throw new Error("FaceRecognitionNet - load model before inference"); } @@ -2829,8 +2862,8 @@ function createFaceRecognitionNet(weights) { // src/factories/WithFaceDescriptor.ts function extendWithFaceDescriptor(sourceObj, descriptor) { - const extension = {descriptor}; - return {...sourceObj, ...extension}; + const extension = { descriptor }; + return { ...sourceObj, ...extension }; } // src/factories/WithAge.ts @@ -2838,8 +2871,8 @@ function isWithAge(obj) { return typeof obj.age === "number"; } function extendWithAge(sourceObj, age) { - const extension = {age}; - return {...sourceObj, ...extension}; + const extension = { age }; + return { ...sourceObj, ...extension }; } // src/factories/WithGender.ts @@ -2847,8 +2880,8 @@ function isWithGender(obj) { return (obj.gender === Gender.MALE || obj.gender === Gender.FEMALE) && isValidProbablitiy(obj.genderProbability); } function extendWithGender(sourceObj, gender, genderProbability) { - const extension = {gender, genderProbability}; - return {...sourceObj, ...extension}; + const extension = { gender, genderProbability }; + return { ...sourceObj, ...extension }; } // src/ssdMobilenetv1/extractParams.ts @@ -2859,7 +2892,7 @@ function extractorsFactory5(extractWeights, paramMappings) { const batch_norm_offset = tfjs_esm_exports.tensor1d(extractWeights(numChannels)); const batch_norm_mean = tfjs_esm_exports.tensor1d(extractWeights(numChannels)); const batch_norm_variance = tfjs_esm_exports.tensor1d(extractWeights(numChannels)); - paramMappings.push({paramPath: `${mappedPrefix}/filters`}, {paramPath: `${mappedPrefix}/batch_norm_scale`}, {paramPath: `${mappedPrefix}/batch_norm_offset`}, {paramPath: `${mappedPrefix}/batch_norm_mean`}, {paramPath: `${mappedPrefix}/batch_norm_variance`}); + paramMappings.push({ paramPath: `${mappedPrefix}/filters` }, { paramPath: `${mappedPrefix}/batch_norm_scale` }, { paramPath: `${mappedPrefix}/batch_norm_offset` }, { paramPath: `${mappedPrefix}/batch_norm_mean` }, { paramPath: `${mappedPrefix}/batch_norm_variance` }); return { filters, batch_norm_scale, @@ -2871,8 +2904,8 @@ function extractorsFactory5(extractWeights, paramMappings) { function extractConvParams(channelsIn, channelsOut, filterSize, mappedPrefix, isPointwiseConv) { const filters = tfjs_esm_exports.tensor4d(extractWeights(channelsIn * channelsOut * filterSize * filterSize), [filterSize, filterSize, channelsIn, channelsOut]); const bias = tfjs_esm_exports.tensor1d(extractWeights(channelsOut)); - paramMappings.push({paramPath: `${mappedPrefix}/filters`}, {paramPath: `${mappedPrefix}/${isPointwiseConv ? "batch_norm_offset" : "bias"}`}); - return {filters, bias}; + paramMappings.push({ paramPath: `${mappedPrefix}/filters` }, { paramPath: `${mappedPrefix}/${isPointwiseConv ? "batch_norm_offset" : "bias"}` }); + return { filters, bias }; } function extractPointwiseConvParams(channelsIn, channelsOut, filterSize, mappedPrefix) { const { @@ -2887,7 +2920,7 @@ function extractorsFactory5(extractWeights, paramMappings) { function extractConvPairParams(channelsIn, channelsOut, mappedPrefix) { const depthwise_conv = extractDepthwiseConvParams(channelsIn, `${mappedPrefix}/depthwise_conv`); const pointwise_conv = extractPointwiseConvParams(channelsIn, channelsOut, 1, `${mappedPrefix}/pointwise_conv`); - return {depthwise_conv, pointwise_conv}; + return { depthwise_conv, pointwise_conv }; } function extractMobilenetV1Params() { const conv_0 = extractPointwiseConvParams(3, 32, 3, "mobilenetv1/conv_0"); @@ -3004,7 +3037,7 @@ function extractParams6(weights) { const output_layer = { extra_dim }; - paramMappings.push({paramPath: "output_layer/extra_dim"}); + paramMappings.push({ paramPath: "output_layer/extra_dim" }); if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } @@ -3024,7 +3057,7 @@ function extractorsFactory6(weightMap, paramMappings) { function extractPointwiseConvParams(prefix, idx, mappedPrefix) { const filters = extractWeightEntry(`${prefix}/Conv2d_${idx}_pointwise/weights`, 4, `${mappedPrefix}/filters`); const batch_norm_offset = extractWeightEntry(`${prefix}/Conv2d_${idx}_pointwise/convolution_bn_offset`, 1, `${mappedPrefix}/batch_norm_offset`); - return {filters, batch_norm_offset}; + return { filters, batch_norm_offset }; } function extractConvPairParams(idx) { const mappedPrefix = `mobilenetv1/conv_${idx}`; @@ -3068,12 +3101,12 @@ function extractorsFactory6(weightMap, paramMappings) { function extractConvParams(prefix, mappedPrefix) { const filters = extractWeightEntry(`${prefix}/weights`, 4, `${mappedPrefix}/filters`); const bias = extractWeightEntry(`${prefix}/biases`, 1, `${mappedPrefix}/bias`); - return {filters, bias}; + return { filters, bias }; } function extractBoxPredictorParams(idx) { const box_encoding_predictor = extractConvParams(`Prediction/BoxPredictor_${idx}/BoxEncodingPredictor`, `prediction_layer/box_predictor_${idx}/box_encoding_predictor`); const class_predictor = extractConvParams(`Prediction/BoxPredictor_${idx}/ClassPredictor`, `prediction_layer/box_predictor_${idx}/class_predictor`); - return {box_encoding_predictor, class_predictor}; + return { box_encoding_predictor, class_predictor }; } function extractPredictionLayerParams() { return { @@ -3105,7 +3138,7 @@ function extractParamsFromWeightMap6(weightMap) { extractPredictionLayerParams } = extractorsFactory6(weightMap, paramMappings); const extra_dim = weightMap["Output/extra_dim"]; - paramMappings.push({originalPath: "Output/extra_dim", paramPath: "output_layer/extra_dim"}); + paramMappings.push({ originalPath: "Output/extra_dim", paramPath: "output_layer/extra_dim" }); if (!isTensor3D(extra_dim)) { throw new Error(`expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have ${extra_dim}`); } @@ -3117,7 +3150,7 @@ function extractParamsFromWeightMap6(weightMap) { } }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/ssdMobilenetv1/pointwiseConvLayer.ts @@ -3203,7 +3236,7 @@ function IOU(boxes, i, j) { function nonMaxSuppression2(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) { const numBoxes = boxes.shape[0]; const outputSize = Math.min(maxOutputSize, numBoxes); - const candidates = scores.map((score, boxIndex) => ({score, boxIndex})).filter((c) => c.score > scoreThreshold).sort((c1, c2) => c2.score - c1.score); + const candidates = scores.map((score, boxIndex) => ({ score, boxIndex })).filter((c) => c.score > scoreThreshold).sort((c1, c2) => c2.score - c1.score); const suppressFunc = (x) => x <= iouThreshold ? 1 : 0; const selected = []; candidates.forEach((c) => { @@ -3236,10 +3269,10 @@ function getCenterCoordinatesAndSizesLayer(x) { tfjs_esm_exports.add(vec[0], tfjs_esm_exports.div(sizes[0], 2)), tfjs_esm_exports.add(vec[1], tfjs_esm_exports.div(sizes[1], 2)) ]; - return {sizes, centers}; + return { sizes, centers }; } function decodeBoxesLayer(x0, x1) { - const {sizes, centers} = getCenterCoordinatesAndSizesLayer(x0); + const { sizes, centers } = getCenterCoordinatesAndSizesLayer(x0); const vec = tfjs_esm_exports.unstack(tfjs_esm_exports.transpose(x1, [1, 0])); const div0_out = tfjs_esm_exports.div(tfjs_esm_exports.mul(tfjs_esm_exports.exp(tfjs_esm_exports.div(vec[2], 5)), sizes[0]), 2); const add0_out = tfjs_esm_exports.add(tfjs_esm_exports.mul(tfjs_esm_exports.div(vec[0], 10), sizes[0]), centers[0]); @@ -3262,7 +3295,7 @@ function outputLayer(boxPredictions, classPredictions, params) { scores = tfjs_esm_exports.reshape(scores, [batchSize, scores.shape[1]]); const boxesByBatch = tfjs_esm_exports.unstack(boxes); const scoresByBatch = tfjs_esm_exports.unstack(scores); - return {boxes: boxesByBatch, scores: scoresByBatch}; + return { boxes: boxesByBatch, scores: scoresByBatch }; }); } @@ -3272,7 +3305,7 @@ function boxPredictionLayer(x, params) { const batchSize = x.shape[0]; const boxPredictionEncoding = tfjs_esm_exports.reshape(convLayer(x, params.box_encoding_predictor), [batchSize, -1, 1, 4]); const classPrediction = tfjs_esm_exports.reshape(convLayer(x, params.class_predictor), [batchSize, -1, 3]); - return {boxPredictionEncoding, classPrediction}; + return { boxPredictionEncoding, classPrediction }; }); } @@ -3318,7 +3351,7 @@ function predictionLayer(x, conv11, params) { // src/ssdMobilenetv1/SsdMobilenetv1Options.ts var SsdMobilenetv1Options = class { - constructor({minConfidence, maxResults} = {}) { + constructor({ minConfidence, maxResults } = {}) { this._name = "SsdMobilenetv1Options"; this._minConfidence = minConfidence || 0.5; this._maxResults = maxResults || 100; @@ -3343,14 +3376,14 @@ var SsdMobilenetv1 = class extends NeuralNetwork { super("SsdMobilenetv1"); } forwardInput(input) { - const {params} = this; + const { params } = this; if (!params) throw new Error("SsdMobilenetv1 - load model before inference"); return tfjs_esm_exports.tidy(() => { const batchTensor = tfjs_esm_exports.cast(input.toBatchTensor(512, false), "float32"); const x = tfjs_esm_exports.sub(tfjs_esm_exports.div(batchTensor, 127.5), 1); const features = mobileNetV1(x, params.mobilenetv1); - const {boxPredictions, classPredictions} = predictionLayer(features.out, features.conv11, params.prediction_layer); + const { boxPredictions, classPredictions } = predictionLayer(features.out, features.conv11, params.prediction_layer); return outputLayer(boxPredictions, classPredictions, params.output_layer); }); } @@ -3358,9 +3391,9 @@ var SsdMobilenetv1 = class extends NeuralNetwork { return this.forwardInput(await toNetInput(input)); } async locateFaces(input, options = {}) { - const {maxResults, minConfidence} = new SsdMobilenetv1Options(options); + const { maxResults, minConfidence } = new SsdMobilenetv1Options(options); const netInput = await toNetInput(input); - const {boxes: _boxes, scores: _scores} = this.forwardInput(netInput); + const { boxes: _boxes, scores: _scores } = this.forwardInput(netInput); const boxes = _boxes[0]; const scores = _scores[0]; for (let i = 1; i < _boxes.length; i++) { @@ -3384,7 +3417,7 @@ var SsdMobilenetv1 = class extends NeuralNetwork { Math.max(0, boxesData[idx][1]), Math.min(1, boxesData[idx][3]) ].map((val) => val * padX); - return new FaceDetection(scoresData[idx], new Rect(left, top, right - left, bottom - top), {height: netInput.getInputHeight(0), width: netInput.getInputWidth(0)}); + return new FaceDetection(scoresData[idx], new Rect(left, top, right - left, bottom - top), { height: netInput.getInputHeight(0), width: netInput.getInputWidth(0) }); }); boxes.dispose(); scores.dispose(); @@ -3492,13 +3525,13 @@ function extractorsFactory7(extractWeights, paramMappings) { function extractBatchNormParams(size, mappedPrefix) { const sub6 = tfjs_esm_exports.tensor1d(extractWeights(size)); const truediv = tfjs_esm_exports.tensor1d(extractWeights(size)); - paramMappings.push({paramPath: `${mappedPrefix}/sub`}, {paramPath: `${mappedPrefix}/truediv`}); - return {sub: sub6, truediv}; + paramMappings.push({ paramPath: `${mappedPrefix}/sub` }, { paramPath: `${mappedPrefix}/truediv` }); + return { sub: sub6, truediv }; } function extractConvWithBatchNormParams(channelsIn, channelsOut, mappedPrefix) { const conv3 = extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv`); const bn = extractBatchNormParams(channelsOut, `${mappedPrefix}/bn`); - return {conv: conv3, bn}; + return { conv: conv3, bn }; } const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings); return { @@ -3567,7 +3600,7 @@ function extractParams7(weights, config, boxEncodingSize, filterSizes) { if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } - return {params, paramMappings}; + return { params, paramMappings }; } // src/tinyYolov2/extractParamsFromWeightMap.ts @@ -3576,17 +3609,17 @@ function extractorsFactory8(weightMap, paramMappings) { function extractBatchNormParams(prefix) { const sub6 = extractWeightEntry(`${prefix}/sub`, 1); const truediv = extractWeightEntry(`${prefix}/truediv`, 1); - return {sub: sub6, truediv}; + return { sub: sub6, truediv }; } function extractConvParams(prefix) { const filters = extractWeightEntry(`${prefix}/filters`, 4); const bias = extractWeightEntry(`${prefix}/bias`, 1); - return {filters, bias}; + return { filters, bias }; } function extractConvWithBatchNormParams(prefix) { const conv3 = extractConvParams(`${prefix}/conv`); const bn = extractBatchNormParams(`${prefix}/bn`); - return {conv: conv3, bn}; + return { conv: conv3, bn }; } const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry); return { @@ -3630,12 +3663,12 @@ function extractParamsFromWeightMap7(weightMap, config) { }; } disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/tinyYolov2/TinyYolov2Options.ts var TinyYolov2Options = class { - constructor({inputSize, scoreThreshold} = {}) { + constructor({ inputSize, scoreThreshold } = {}) { this._name = "TinyYolov2Options"; this._inputSize = inputSize || 416; this._scoreThreshold = scoreThreshold || 0.5; @@ -3705,7 +3738,7 @@ var _TinyYolov2Base = class extends NeuralNetwork { return convLayer(out, params.conv8, "valid", false); } forwardInput(input, inputSize) { - const {params} = this; + const { params } = this; if (!params) { throw new Error("TinyYolov2 - load model before inference"); } @@ -3720,7 +3753,7 @@ var _TinyYolov2Base = class extends NeuralNetwork { return this.forwardInput(await toNetInput(input), inputSize); } async detect(input, forwardParams = {}) { - const {inputSize, scoreThreshold} = new TinyYolov2Options(forwardParams); + const { inputSize, scoreThreshold } = new TinyYolov2Options(forwardParams); const netInput = await toNetInput(input); const out = await this.forwardInput(netInput, inputSize); const out0 = tfjs_esm_exports.tidy(() => tfjs_esm_exports.unstack(out)[0].expandDims()); @@ -3754,7 +3787,7 @@ var _TinyYolov2Base = class extends NeuralNetwork { return extractParams7(weights, this.config, this.boxEncodingSize, filterSizes); } async extractBoxes(outputTensor, inputBlobDimensions, scoreThreshold) { - const {width, height} = inputBlobDimensions; + const { width, height } = inputBlobDimensions; const inputSize = Math.max(width, height); const correctionFactorX = inputSize / width; const correctionFactorY = inputSize / height; @@ -3781,8 +3814,8 @@ var _TinyYolov2Base = class extends NeuralNetwork { const heightLocal = Math.exp(boxesData[row][col][anchor][3]) * this.config.anchors[anchor].y / numCells * correctionFactorY; const x = ctX - widthLocal / 2; const y = ctY - heightLocal / 2; - const pos = {row, col, anchor}; - const {classScore, label} = this.withClassScores ? await this.extractPredictedClass(classScoresTensor, pos) : {classScore: 1, label: 0}; + const pos = { row, col, anchor }; + const { classScore, label } = this.withClassScores ? await this.extractPredictedClass(classScoresTensor, pos) : { classScore: 1, label: 0 }; results.push({ box: new BoundingBox(x, y, x + widthLocal, y + heightLocal), score, @@ -3800,7 +3833,7 @@ var _TinyYolov2Base = class extends NeuralNetwork { return results; } async extractPredictedClass(classesTensor, pos) { - const {row, col, anchor} = pos; + const { row, col, anchor } = pos; const classesData = await classesTensor.array(); return Array(this.config.classes.length).fill(0).map((_, i) => classesData[row][col][anchor][i]).map((classScore, label) => ({ classScore, @@ -3836,7 +3869,7 @@ var TinyYolov2 = class extends TinyYolov2Base { } async locateFaces(input, forwardParams) { const objectDetections = await this.detect(input, forwardParams); - return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, {width: det.imageWidth, height: det.imageHeight})); + return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight })); } getDefaultModelName() { return this.withSeparableConvs ? DEFAULT_MODEL_NAME_SEPARABLE_CONV : DEFAULT_MODEL_NAME; @@ -3872,7 +3905,7 @@ var ComposableTask = class { }; // src/globalApi/extractFacesAndComputeResults.ts -async function extractAllFacesAndComputeResults(parentResults, input, computeResults, extractedFaces, getRectForAlignment = ({alignedRect}) => alignedRect) { +async function extractAllFacesAndComputeResults(parentResults, input, computeResults, extractedFaces, getRectForAlignment = ({ alignedRect }) => alignedRect) { const faceBoxes = parentResults.map((parentResult) => isWithFaceLandmarks(parentResult) ? getRectForAlignment(parentResult) : parentResult.detection); const faces = extractedFaces || (input instanceof tfjs_esm_exports.Tensor ? await extractFaceTensors(input, faceBoxes) : await extractFaces(input, faceBoxes)); const results = await computeResults(faces); @@ -3913,7 +3946,7 @@ var TinyFaceDetector = class extends TinyYolov2Base { } async locateFaces(input, forwardParams) { const objectDetections = await this.detect(input, forwardParams); - return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, {width: det.imageWidth, height: det.imageHeight})); + return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight })); } getDefaultModelName() { return "tiny_face_detector_model"; @@ -4017,7 +4050,7 @@ var PredictAllAgeAndGenderTask = class extends PredictAgeAndGenderTaskBase { const parentResults = await this.parentTask; const ageAndGenderByFace = await extractAllFacesAndComputeResults(parentResults, this.input, async (faces) => Promise.all(faces.map((face) => nets.ageGenderNet.predictAgeAndGender(face))), this.extractedFaces); return parentResults.map((parentResult, i) => { - const {age, gender, genderProbability} = ageAndGenderByFace[i]; + const { age, gender, genderProbability } = ageAndGenderByFace[i]; return extendWithAge(extendWithGender(parentResult, gender, genderProbability), age); }); } @@ -4030,7 +4063,7 @@ var PredictSingleAgeAndGenderTask = class extends PredictAgeAndGenderTaskBase { const parentResult = await this.parentTask; if (!parentResult) return void 0; - const {age, gender, genderProbability} = await extractSingleFaceAndComputeResult(parentResult, this.input, (face) => nets.ageGenderNet.predictAgeAndGender(face), this.extractedFaces); + const { age, gender, genderProbability } = await extractSingleFaceAndComputeResult(parentResult, this.input, (face) => nets.ageGenderNet.predictAgeAndGender(face), this.extractedFaces); return extendWithAge(extendWithGender(parentResult, gender, genderProbability), age); } withFaceExpressions() { @@ -4065,7 +4098,7 @@ var ComputeFaceDescriptorsTaskBase = class extends ComposableTask { var ComputeAllFaceDescriptorsTask = class extends ComputeFaceDescriptorsTaskBase { async run() { const parentResults = await this.parentTask; - const descriptors = await extractAllFacesAndComputeResults(parentResults, this.input, (faces) => Promise.all(faces.map((face) => nets.faceRecognitionNet.computeFaceDescriptor(face))), null, (parentResult) => parentResult.landmarks.align(null, {useDlibAlignment: true})); + const descriptors = await extractAllFacesAndComputeResults(parentResults, this.input, (faces) => Promise.all(faces.map((face) => nets.faceRecognitionNet.computeFaceDescriptor(face))), null, (parentResult) => parentResult.landmarks.align(null, { useDlibAlignment: true })); return descriptors.map((descriptor, i) => extendWithFaceDescriptor(parentResults[i], descriptor)); } withFaceExpressions() { @@ -4081,7 +4114,7 @@ var ComputeSingleFaceDescriptorTask = class extends ComputeFaceDescriptorsTaskBa if (!parentResult) { return void 0; } - const descriptor = await extractSingleFaceAndComputeResult(parentResult, this.input, (face) => nets.faceRecognitionNet.computeFaceDescriptor(face), null, (parentResult2) => parentResult2.landmarks.align(null, {useDlibAlignment: true})); + const descriptor = await extractSingleFaceAndComputeResult(parentResult, this.input, (face) => nets.faceRecognitionNet.computeFaceDescriptor(face), null, (parentResult2) => parentResult2.landmarks.align(null, { useDlibAlignment: true })); return extendWithFaceDescriptor(parentResult, descriptor); } withFaceExpressions() { @@ -4129,7 +4162,7 @@ var DetectSingleFaceLandmarksTask = class extends DetectFaceLandmarksTaskBase { if (!parentResult) { return void 0; } - const {detection} = parentResult; + const { detection } = parentResult; const faces = this.input instanceof tfjs_esm_exports.Tensor ? await extractFaceTensors(this.input, [detection]) : await extractFaces(this.input, [detection]); const landmarks = await this.landmarkNet.detectLandmarks(faces[0]); faces.forEach((f) => f instanceof tfjs_esm_exports.Tensor && f.dispose()); @@ -4156,7 +4189,7 @@ var DetectFacesTaskBase = class extends ComposableTask { }; var DetectAllFacesTask = class extends DetectFacesTaskBase { async run() { - const {input, options} = this; + const { input, options } = this; let result; if (options instanceof TinyFaceDetectorOptions) result = nets.tinyFaceDetector.locateFaces(input, options); @@ -4220,7 +4253,7 @@ function detectAllFaces(input, options = new SsdMobilenetv1Options()) { // src/globalApi/allFaces.ts async function allFacesSsdMobilenetv1(input, minConfidence) { - return detectAllFaces(input, new SsdMobilenetv1Options(minConfidence ? {minConfidence} : {})).withFaceLandmarks().withFaceDescriptors(); + return detectAllFaces(input, new SsdMobilenetv1Options(minConfidence ? { minConfidence } : {})).withFaceLandmarks().withFaceDescriptors(); } async function allFacesTinyYolov2(input, forwardParams = {}) { return detectAllFaces(input, new TinyYolov2Options(forwardParams)).withFaceLandmarks().withFaceDescriptors(); @@ -4269,7 +4302,7 @@ var FaceMatcher = class { return descriptors.map((d) => euclideanDistance(d, queryDescriptor)).reduce((d1, d2) => d1 + d2, 0) / (descriptors.length || 1); } matchDescriptor(queryDescriptor) { - return this.labeledDescriptors.map(({descriptors, label}) => new FaceMatch(label, this.computeMeanDistance(queryDescriptor, descriptors))).reduce((best, curr) => best.distance < curr.distance ? best : curr); + return this.labeledDescriptors.map(({ descriptors, label }) => new FaceMatch(label, this.computeMeanDistance(queryDescriptor, descriptors))).reduce((best, curr) => best.distance < curr.distance ? best : curr); } findBestMatch(queryDescriptor) { const bestMatch = this.matchDescriptor(queryDescriptor); @@ -4296,12 +4329,12 @@ function createTinyFaceDetector(weights) { // src/resizeResults.ts function resizeResults(results, dimensions) { - const {width, height} = new Dimensions(dimensions.width, dimensions.height); + const { width, height } = new Dimensions(dimensions.width, dimensions.height); if (width <= 0 || height <= 0) { - throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({width, height})}`); + throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({ width, height })}`); } if (Array.isArray(results)) { - return results.map((obj) => resizeResults(obj, {width, height})); + return results.map((obj) => resizeResults(obj, { width, height })); } if (isWithFaceLandmarks(results)) { const resizedDetection = results.detection.forSize(width, height); @@ -4320,7 +4353,7 @@ function resizeResults(results, dimensions) { // src/index.ts var node = typeof process !== "undefined"; var browser3 = typeof navigator !== "undefined" && typeof navigator.userAgent !== "undefined"; -var version2 = {faceapi: version, node, browser: browser3}; +var version2 = { faceapi: version, node, browser: browser3 }; export { AgeGenderNet, BoundingBox, @@ -4398,6 +4431,7 @@ export { fetchJson, fetchNetWeights, fetchOrThrow, + fetchVideo, getContext2dOrThrow, getMediaDimensions, imageTensorToCanvas, diff --git a/dist/face-api.esm-nobundle.js.map b/dist/face-api.esm-nobundle.js.map index 48c57d1..63273e5 100644 --- a/dist/face-api.esm-nobundle.js.map +++ b/dist/face-api.esm-nobundle.js.map @@ -1,7 +1,7 @@ { "version": 3, - "sources": ["../src/tfjs/tf-browser.ts", "../src/draw/index.ts", "../src/draw/drawContour.ts", "../src/utils/index.ts", "../src/classes/Dimensions.ts", "../src/classes/Point.ts", "../src/classes/Box.ts", "../src/classes/BoundingBox.ts", "../src/classes/ObjectDetection.ts", "../src/classes/FaceDetection.ts", "../src/ops/iou.ts", "../src/ops/minBbox.ts", "../src/ops/nonMaxSuppression.ts", "../src/ops/normalize.ts", "../src/ops/padToSquare.ts", "../src/ops/shuffleArray.ts", "../src/ops/index.ts", "../src/classes/Rect.ts", "../src/classes/FaceLandmarks.ts", "../src/classes/FaceLandmarks5.ts", "../src/classes/FaceLandmarks68.ts", "../src/classes/FaceMatch.ts", "../src/classes/LabeledBox.ts", "../src/classes/LabeledFaceDescriptors.ts", "../src/classes/PredictedBox.ts", "../src/factories/WithFaceDetection.ts", "../src/env/createBrowserEnv.ts", "../src/env/createFileSystem.ts", "../src/env/createNodejsEnv.ts", "../src/env/isBrowser.ts", "../src/env/isNodejs.ts", "../src/env/index.ts", "../src/dom/resolveInput.ts", "../src/dom/getContext2dOrThrow.ts", "../src/draw/DrawTextField.ts", "../src/draw/DrawBox.ts", "../src/draw/drawDetections.ts", "../src/dom/isMediaLoaded.ts", "../src/dom/awaitMediaLoaded.ts", "../src/dom/bufferToImage.ts", "../src/dom/getMediaDimensions.ts", "../src/dom/createCanvas.ts", "../src/dom/imageTensorToCanvas.ts", "../src/dom/isMediaElement.ts", "../src/dom/imageToSquare.ts", "../src/dom/NetInput.ts", "../src/dom/toNetInput.ts", "../src/dom/extractFaces.ts", "../src/dom/extractFaceTensors.ts", "../src/dom/fetchOrThrow.ts", "../src/dom/fetchImage.ts", "../src/dom/fetchJson.ts", "../src/dom/fetchNetWeights.ts", "../src/common/getModelUris.ts", "../src/dom/loadWeightMap.ts", "../src/dom/matchDimensions.ts", "../src/NeuralNetwork.ts", "../src/common/depthwiseSeparableConv.ts", "../src/faceFeatureExtractor/denseBlock.ts", "../src/common/convLayer.ts", "../src/common/disposeUnusedWeightTensors.ts", "../src/common/extractConvParamsFactory.ts", "../src/common/extractFCParamsFactory.ts", "../src/common/types.ts", "../src/common/extractSeparableConvParamsFactory.ts", "../src/common/extractWeightEntryFactory.ts", "../src/common/extractWeightsFactory.ts", "../src/faceFeatureExtractor/extractorsFactory.ts", "../src/faceFeatureExtractor/extractParams.ts", "../src/common/loadConvParamsFactory.ts", "../src/faceFeatureExtractor/loadParamsFactory.ts", "../src/faceFeatureExtractor/extractParamsFromWeightMap.ts", "../src/faceFeatureExtractor/FaceFeatureExtractor.ts", "../src/common/fullyConnectedLayer.ts", "../src/faceProcessor/extractParams.ts", "../src/faceProcessor/extractParamsFromWeightMap.ts", "../src/faceProcessor/util.ts", "../src/faceProcessor/FaceProcessor.ts", "../src/faceExpressionNet/FaceExpressions.ts", "../src/faceExpressionNet/FaceExpressionNet.ts", "../src/factories/WithFaceExpressions.ts", "../src/draw/drawFaceExpressions.ts", "../src/factories/WithFaceLandmarks.ts", "../src/draw/DrawFaceLandmarks.ts", "../src/xception/extractParams.ts", "../src/xception/extractParamsFromWeightMap.ts", "../src/xception/TinyXception.ts", "../src/ageGenderNet/extractParams.ts", "../src/ageGenderNet/extractParamsFromWeightMap.ts", "../src/ageGenderNet/types.ts", "../src/ageGenderNet/AgeGenderNet.ts", "../src/faceLandmarkNet/FaceLandmark68NetBase.ts", "../src/faceLandmarkNet/FaceLandmark68Net.ts", "../src/faceFeatureExtractor/extractParamsFromWeightMapTiny.ts", "../src/faceFeatureExtractor/extractParamsTiny.ts", "../src/faceFeatureExtractor/TinyFaceFeatureExtractor.ts", "../src/faceLandmarkNet/FaceLandmark68TinyNet.ts", "../src/faceLandmarkNet/index.ts", "../src/faceRecognitionNet/scaleLayer.ts", "../src/faceRecognitionNet/convLayer.ts", "../src/faceRecognitionNet/extractParams.ts", "../src/faceRecognitionNet/extractParamsFromWeightMap.ts", "../src/faceRecognitionNet/residualLayer.ts", "../src/faceRecognitionNet/FaceRecognitionNet.ts", "../src/faceRecognitionNet/index.ts", "../src/factories/WithFaceDescriptor.ts", "../src/factories/WithAge.ts", "../src/factories/WithGender.ts", "../src/ssdMobilenetv1/extractParams.ts", "../src/ssdMobilenetv1/extractParamsFromWeightMap.ts", "../src/ssdMobilenetv1/pointwiseConvLayer.ts", "../src/ssdMobilenetv1/mobileNetV1.ts", "../src/ssdMobilenetv1/nonMaxSuppression.ts", "../src/ssdMobilenetv1/outputLayer.ts", "../src/ssdMobilenetv1/boxPredictionLayer.ts", "../src/ssdMobilenetv1/predictionLayer.ts", "../src/ssdMobilenetv1/SsdMobilenetv1Options.ts", "../src/ssdMobilenetv1/SsdMobilenetv1.ts", "../src/ssdMobilenetv1/index.ts", "../src/tinyYolov2/const.ts", "../src/tinyYolov2/config.ts", "../src/tinyYolov2/leaky.ts", "../src/tinyYolov2/convWithBatchNorm.ts", "../src/tinyYolov2/depthwiseSeparableConv.ts", "../src/tinyYolov2/extractParams.ts", "../src/tinyYolov2/extractParamsFromWeightMap.ts", "../src/tinyYolov2/TinyYolov2Options.ts", "../src/tinyYolov2/TinyYolov2Base.ts", "../src/tinyYolov2/TinyYolov2.ts", "../src/tinyYolov2/index.ts", "../src/tinyFaceDetector/TinyFaceDetectorOptions.ts", "../src/globalApi/ComposableTask.ts", "../src/globalApi/extractFacesAndComputeResults.ts", "../src/tinyFaceDetector/const.ts", "../src/tinyFaceDetector/TinyFaceDetector.ts", "../src/globalApi/nets.ts", "../src/globalApi/PredictFaceExpressionsTask.ts", "../src/globalApi/PredictAgeAndGenderTask.ts", "../src/globalApi/ComputeFaceDescriptorsTasks.ts", "../src/globalApi/DetectFaceLandmarksTasks.ts", "../src/globalApi/DetectFacesTasks.ts", "../src/globalApi/detectFaces.ts", "../src/globalApi/allFaces.ts", "../src/euclideanDistance.ts", "../src/globalApi/FaceMatcher.ts", "../src/tinyFaceDetector/index.ts", "../src/resizeResults.ts", "../src/index.ts"], - "sourcesContent": ["/* eslint-disable import/no-extraneous-dependencies */\n/* eslint-disable node/no-unpublished-import */\n\n// wrapper to load tfjs in a single place so version can be changed quickly\n\nexport * from '@tensorflow/tfjs/dist/index.js';\nexport * from '@tensorflow/tfjs-backend-wasm';\n", "export * from './drawContour';\nexport * from './drawDetections';\nexport * from './drawFaceExpressions';\nexport * from './DrawBox';\nexport * from './DrawFaceLandmarks';\nexport * from './DrawTextField';\n", "import { Point } from '../classes/index';\n\nexport function drawContour(\n ctx: CanvasRenderingContext2D,\n points: Point[],\n isClosed: boolean = false,\n) {\n ctx.beginPath();\n\n points.slice(1).forEach(({ x, y }, prevIdx) => {\n const from = points[prevIdx];\n ctx.moveTo(from.x, from.y);\n ctx.lineTo(x, y);\n });\n\n if (isClosed) {\n const from = points[points.length - 1];\n const to = points[0];\n if (!from || !to) {\n return;\n }\n\n ctx.moveTo(from.x, from.y);\n ctx.lineTo(to.x, to.y);\n }\n\n ctx.stroke();\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { Point } from '../classes/index';\nimport { Dimensions, IDimensions } from '../classes/Dimensions';\n\nexport function isTensor(tensor: any, dim: number) {\n return tensor instanceof tf.Tensor && tensor.shape.length === dim;\n}\n\nexport function isTensor1D(tensor: any): tensor is tf.Tensor1D {\n return isTensor(tensor, 1);\n}\n\nexport function isTensor2D(tensor: any): tensor is tf.Tensor2D {\n return isTensor(tensor, 2);\n}\n\nexport function isTensor3D(tensor: any): tensor is tf.Tensor3D {\n return isTensor(tensor, 3);\n}\n\nexport function isTensor4D(tensor: any): tensor is tf.Tensor4D {\n return isTensor(tensor, 4);\n}\n\nexport function isFloat(num: number) {\n return num % 1 !== 0;\n}\n\nexport function isEven(num: number) {\n return num % 2 === 0;\n}\n\nexport function round(num: number, prec: number = 2) {\n const f = 10 ** prec;\n return Math.floor(num * f) / f;\n}\n\nexport function isDimensions(obj: any): boolean {\n return obj && obj.width && obj.height;\n}\n\nexport function computeReshapedDimensions({ width, height }: IDimensions, inputSize: number) {\n const scale = inputSize / Math.max(height, width);\n return new Dimensions(Math.round(width * scale), Math.round(height * scale));\n}\n\nexport function getCenterPoint(pts: Point[]): Point {\n return pts.reduce((sum, pt) => sum.add(pt), new Point(0, 0))\n .div(new Point(pts.length, pts.length));\n}\n\nexport function range(num: number, start: number, step: number): number[] {\n return Array(num).fill(0).map((_, i) => start + (i * step));\n}\n\nexport function isValidNumber(num: any) {\n return !!num && (num !== Infinity) && (num !== -Infinity) && !Number.isNaN(num) || num === 0;\n}\n\nexport function isValidProbablitiy(num: any) {\n return isValidNumber(num) && num >= 0 && num <= 1.0;\n}\n", "import { isValidNumber } from '../utils/index';\n\nexport interface IDimensions {\n width: number\n height: number\n}\n\nexport class Dimensions implements IDimensions {\n private _width: number\n\n private _height: number\n\n constructor(width: number, height: number) {\n if (!isValidNumber(width) || !isValidNumber(height)) {\n throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({ width, height })}`);\n }\n\n this._width = width;\n this._height = height;\n }\n\n public get width(): number { return this._width; }\n\n public get height(): number { return this._height; }\n\n public reverse(): Dimensions {\n return new Dimensions(1 / this.width, 1 / this.height);\n }\n}\n", "export interface IPoint {\n x: number\n y: number\n}\n\nexport class Point implements IPoint {\n private _x: number\n\n private _y: number\n\n constructor(x: number, y: number) {\n this._x = x;\n this._y = y;\n }\n\n get x(): number { return this._x; }\n\n get y(): number { return this._y; }\n\n public add(pt: IPoint): Point {\n return new Point(this.x + pt.x, this.y + pt.y);\n }\n\n public sub(pt: IPoint): Point {\n return new Point(this.x - pt.x, this.y - pt.y);\n }\n\n public mul(pt: IPoint): Point {\n return new Point(this.x * pt.x, this.y * pt.y);\n }\n\n public div(pt: IPoint): Point {\n return new Point(this.x / pt.x, this.y / pt.y);\n }\n\n public abs(): Point {\n return new Point(Math.abs(this.x), Math.abs(this.y));\n }\n\n public magnitude(): number {\n return Math.sqrt((this.x ** 2) + (this.y ** 2));\n }\n\n public floor(): Point {\n return new Point(Math.floor(this.x), Math.floor(this.y));\n }\n}\n", "import { isDimensions, isValidNumber } from '../utils/index';\nimport { IBoundingBox } from './BoundingBox';\nimport { IDimensions } from './Dimensions';\nimport { Point } from './Point';\nimport { IRect } from './Rect';\n\nexport class Box implements IBoundingBox, IRect {\n public static isRect(rect: any): boolean {\n return !!rect && [rect.x, rect.y, rect.width, rect.height].every(isValidNumber);\n }\n\n public static assertIsValidBox(box: any, callee: string, allowNegativeDimensions: boolean = false) {\n if (!Box.isRect(box)) {\n throw new Error(`${callee} - invalid box: ${JSON.stringify(box)}, expected object with properties x, y, width, height`);\n }\n\n if (!allowNegativeDimensions && (box.width < 0 || box.height < 0)) {\n throw new Error(`${callee} - width (${box.width}) and height (${box.height}) must be positive numbers`);\n }\n }\n\n private _x: number\n\n private _y: number\n\n private _width: number\n\n private _height: number\n\n constructor(_box: IBoundingBox | IRect, allowNegativeDimensions: boolean = true) {\n const box = (_box || {}) as any;\n\n const isBbox = [box.left, box.top, box.right, box.bottom].every(isValidNumber);\n const isRect = [box.x, box.y, box.width, box.height].every(isValidNumber);\n\n if (!isRect && !isBbox) {\n throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(box)}`);\n }\n\n const [x, y, width, height] = isRect\n ? [box.x, box.y, box.width, box.height]\n : [box.left, box.top, box.right - box.left, box.bottom - box.top];\n\n Box.assertIsValidBox({\n x, y, width, height,\n }, 'Box.constructor', allowNegativeDimensions);\n\n this._x = x;\n this._y = y;\n this._width = width;\n this._height = height;\n }\n\n public get x(): number { return this._x; }\n\n public get y(): number { return this._y; }\n\n public get width(): number { return this._width; }\n\n public get height(): number { return this._height; }\n\n public get left(): number { return this.x; }\n\n public get top(): number { return this.y; }\n\n public get right(): number { return this.x + this.width; }\n\n public get bottom(): number { return this.y + this.height; }\n\n public get area(): number { return this.width * this.height; }\n\n public get topLeft(): Point { return new Point(this.left, this.top); }\n\n public get topRight(): Point { return new Point(this.right, this.top); }\n\n public get bottomLeft(): Point { return new Point(this.left, this.bottom); }\n\n public get bottomRight(): Point { return new Point(this.right, this.bottom); }\n\n public round(): Box {\n const [x, y, width, height] = [this.x, this.y, this.width, this.height]\n .map((val) => Math.round(val));\n return new Box({\n x, y, width, height,\n });\n }\n\n public floor(): Box {\n const [x, y, width, height] = [this.x, this.y, this.width, this.height]\n .map((val) => Math.floor(val));\n return new Box({\n x, y, width, height,\n });\n }\n\n public toSquare(): Box {\n let {\n x, y, width, height,\n } = this;\n const diff = Math.abs(width - height);\n if (width < height) {\n x -= (diff / 2);\n width += diff;\n }\n if (height < width) {\n y -= (diff / 2);\n height += diff;\n }\n\n return new Box({ x, y, width, height });\n }\n\n public rescale(s: IDimensions | number): Box {\n const scaleX = isDimensions(s) ? (s as IDimensions).width : s as number;\n const scaleY = isDimensions(s) ? (s as IDimensions).height : s as number;\n return new Box({\n x: this.x * scaleX,\n y: this.y * scaleY,\n width: this.width * scaleX,\n height: this.height * scaleY,\n });\n }\n\n public pad(padX: number, padY: number): Box {\n const [x, y, width, height] = [\n this.x - (padX / 2),\n this.y - (padY / 2),\n this.width + padX,\n this.height + padY,\n ];\n return new Box({\n x, y, width, height,\n });\n }\n\n public clipAtImageBorders(imgWidth: number, imgHeight: number): Box {\n const { x, y, right, bottom } = this;\n const clippedX = Math.max(x, 0);\n const clippedY = Math.max(y, 0);\n\n const newWidth = right - clippedX;\n const newHeight = bottom - clippedY;\n const clippedWidth = Math.min(newWidth, imgWidth - clippedX);\n const clippedHeight = Math.min(newHeight, imgHeight - clippedY);\n\n return (new Box({\n x: clippedX, y: clippedY, width: clippedWidth, height: clippedHeight,\n })).floor();\n }\n\n public shift(sx: number, sy: number): Box {\n const { width, height } = this;\n const x = this.x + sx;\n const y = this.y + sy;\n\n return new Box({\n x, y, width, height,\n });\n }\n\n public padAtBorders(imageHeight: number, imageWidth: number) {\n const w = this.width + 1;\n const h = this.height + 1;\n\n const dx = 1;\n const dy = 1;\n let edx = w;\n let edy = h;\n\n let x = this.left;\n let y = this.top;\n let ex = this.right;\n let ey = this.bottom;\n\n if (ex > imageWidth) {\n edx = -ex + imageWidth + w;\n ex = imageWidth;\n }\n if (ey > imageHeight) {\n edy = -ey + imageHeight + h;\n ey = imageHeight;\n }\n if (x < 1) {\n edy = 2 - x;\n x = 1;\n }\n if (y < 1) {\n edy = 2 - y;\n y = 1;\n }\n\n return {\n dy, edy, dx, edx, y, ey, x, ex, w, h,\n };\n }\n\n public calibrate(region: Box) {\n return new Box({\n left: this.left + (region.left * this.width),\n top: this.top + (region.top * this.height),\n right: this.right + (region.right * this.width),\n bottom: this.bottom + (region.bottom * this.height),\n }).toSquare().round();\n }\n}\n", "import { Box } from './Box';\n\nexport interface IBoundingBox {\n left: number\n top: number\n right: number\n bottom: number\n}\n\nexport class BoundingBox extends Box implements IBoundingBox {\n constructor(left: number, top: number, right: number, bottom: number, allowNegativeDimensions: boolean = false) {\n super({\n left, top, right, bottom,\n }, allowNegativeDimensions);\n }\n}\n", "import { Box } from './Box';\nimport { Dimensions, IDimensions } from './Dimensions';\nimport { IRect, Rect } from './Rect';\n\nexport class ObjectDetection {\n private _score: number\n\n private _classScore: number\n\n private _className: string\n\n private _box: Rect\n\n private _imageDims: Dimensions\n\n constructor(\n score: number,\n classScore: number,\n className: string,\n relativeBox: IRect,\n imageDims: IDimensions,\n ) {\n this._imageDims = new Dimensions(imageDims.width, imageDims.height);\n this._score = score;\n this._classScore = classScore;\n this._className = className;\n this._box = new Box(relativeBox).rescale(this._imageDims);\n }\n\n public get score(): number { return this._score; }\n\n public get classScore(): number { return this._classScore; }\n\n public get className(): string { return this._className; }\n\n public get box(): Box { return this._box; }\n\n public get imageDims(): Dimensions { return this._imageDims; }\n\n public get imageWidth(): number { return this.imageDims.width; }\n\n public get imageHeight(): number { return this.imageDims.height; }\n\n public get relativeBox(): Box { return new Box(this._box).rescale(this.imageDims.reverse()); }\n\n public forSize(width: number, height: number): ObjectDetection {\n return new ObjectDetection(\n this.score,\n this.classScore,\n this.className,\n this.relativeBox,\n { width, height },\n );\n }\n}\n", "import { Box } from './Box';\nimport { IDimensions } from './Dimensions';\nimport { ObjectDetection } from './ObjectDetection';\nimport { Rect } from './Rect';\n\nexport interface IFaceDetecion {\n score: number\n box: Box\n}\n\nexport class FaceDetection extends ObjectDetection implements IFaceDetecion {\n constructor(\n score: number,\n relativeBox: Rect,\n imageDims: IDimensions,\n ) {\n super(score, score, '', relativeBox, imageDims);\n }\n\n public forSize(width: number, height: number): FaceDetection {\n const { score, relativeBox, imageDims } = super.forSize(width, height);\n return new FaceDetection(score, relativeBox, imageDims);\n }\n}\n", "import { Box } from '../classes/Box';\n\nexport function iou(box1: Box, box2: Box, isIOU: boolean = true) {\n const width = Math.max(0.0, Math.min(box1.right, box2.right) - Math.max(box1.left, box2.left));\n const height = Math.max(0.0, Math.min(box1.bottom, box2.bottom) - Math.max(box1.top, box2.top));\n const interSection = width * height;\n\n return isIOU\n ? interSection / (box1.area + box2.area - interSection)\n : interSection / Math.min(box1.area, box2.area);\n}\n", "import { BoundingBox, IPoint } from '../classes/index';\n\nexport function minBbox(pts: IPoint[]): BoundingBox {\n const xs = pts.map((pt) => pt.x);\n const ys = pts.map((pt) => pt.y);\n const minX = xs.reduce((min, x) => (x < min ? x : min), Infinity);\n const minY = ys.reduce((min, y) => (y < min ? y : min), Infinity);\n const maxX = xs.reduce((max, x) => (max < x ? x : max), 0);\n const maxY = ys.reduce((max, y) => (max < y ? y : max), 0);\n\n return new BoundingBox(minX, minY, maxX, maxY);\n}\n", "import { Box } from '../classes/Box';\nimport { iou } from './iou';\n\nexport function nonMaxSuppression(\n boxes: Box[],\n scores: number[],\n iouThreshold: number,\n isIOU: boolean = true,\n): number[] {\n let indicesSortedByScore = scores\n .map((score, boxIndex) => ({ score, boxIndex }))\n .sort((c1, c2) => c1.score - c2.score)\n .map((c) => c.boxIndex);\n\n const pick: number[] = [];\n\n while (indicesSortedByScore.length > 0) {\n const curr = indicesSortedByScore.pop() as number;\n pick.push(curr);\n\n const indices = indicesSortedByScore;\n\n const outputs: number[] = [];\n for (let i = 0; i < indices.length; i++) {\n const idx = indices[i];\n\n const currBox = boxes[curr];\n const idxBox = boxes[idx];\n\n outputs.push(iou(currBox, idxBox, isIOU));\n }\n\n indicesSortedByScore = indicesSortedByScore.filter(\n (_, j) => outputs[j] <= iouThreshold,\n );\n }\n\n return pick;\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nexport function normalize(x: tf.Tensor4D, meanRgb: number[]): tf.Tensor4D {\n return tf.tidy(() => {\n const [r, g, b] = meanRgb;\n const avg_r = tf.fill([...x.shape.slice(0, 3), 1], r, 'float32');\n const avg_g = tf.fill([...x.shape.slice(0, 3), 1], g, 'float32');\n const avg_b = tf.fill([...x.shape.slice(0, 3), 1], b, 'float32');\n const avg_rgb = tf.concat([avg_r, avg_g, avg_b], 3);\n\n return tf.sub(x, avg_rgb);\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\n/**\n * Pads the smaller dimension of an image tensor with zeros, such that width === height.\n *\n * @param imgTensor The image tensor.\n * @param isCenterImage (optional, default: false) If true, add an equal amount of padding on\n * both sides of the minor dimension oof the image.\n * @returns The padded tensor with width === height.\n */\nexport function padToSquare(\n imgTensor: tf.Tensor4D,\n isCenterImage: boolean = false,\n): tf.Tensor4D {\n return tf.tidy(() => {\n const [height, width] = imgTensor.shape.slice(1);\n if (height === width) {\n return imgTensor;\n }\n\n const dimDiff = Math.abs(height - width);\n const paddingAmount = Math.round(dimDiff * (isCenterImage ? 0.5 : 1));\n const paddingAxis = height > width ? 2 : 1;\n\n const createPaddingTensor = (paddingAmountLocal: number): tf.Tensor => {\n const paddingTensorShape = imgTensor.shape.slice();\n paddingTensorShape[paddingAxis] = paddingAmountLocal;\n return tf.fill(paddingTensorShape, 0, 'float32');\n };\n\n const paddingTensorAppend = createPaddingTensor(paddingAmount);\n const remainingPaddingAmount = dimDiff - (paddingTensorAppend.shape[paddingAxis] as number);\n\n const paddingTensorPrepend = isCenterImage && remainingPaddingAmount\n ? createPaddingTensor(remainingPaddingAmount)\n : null;\n\n const tensorsToStack = [\n paddingTensorPrepend,\n imgTensor,\n paddingTensorAppend,\n ]\n .filter((t) => !!t)\n .map((t: tf.Tensor) => tf.cast(t, 'float32')) as tf.Tensor4D[];\n return tf.concat(tensorsToStack, paddingAxis);\n });\n}\n", "export function shuffleArray(inputArray: any[]) {\n const array = inputArray.slice();\n for (let i = array.length - 1; i > 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n const x = array[i];\n array[i] = array[j];\n array[j] = x;\n }\n return array;\n}\n", "export * from './iou';\nexport * from './minBbox';\nexport * from './nonMaxSuppression';\nexport * from './normalize';\nexport * from './padToSquare';\nexport * from './shuffleArray';\n\nexport function sigmoid(x: number) {\n return 1 / (1 + Math.exp(-x));\n}\n\nexport function inverseSigmoid(x: number) {\n return Math.log(x / (1 - x));\n}\n", "import { Box } from './Box';\n\nexport interface IRect {\n x: number\n y: number\n width: number\n height: number\n}\n\nexport class Rect extends Box implements IRect {\n constructor(x: number, y: number, width: number, height: number, allowNegativeDimensions: boolean = false) {\n super({\n x, y, width, height,\n }, allowNegativeDimensions);\n }\n}\n", "import { minBbox } from '../ops/index';\nimport { getCenterPoint } from '../utils/index';\nimport { IBoundingBox } from './BoundingBox';\nimport { Box } from './Box';\nimport { Dimensions, IDimensions } from './Dimensions';\nimport { FaceDetection } from './FaceDetection';\nimport { Point } from './Point';\nimport { IRect, Rect } from './Rect';\n\n// face alignment constants\nconst relX = 0.5;\nconst relY = 0.43;\nconst relScale = 0.45;\n\nexport interface IFaceLandmarks {\n positions: Point[]\n shift: Point\n}\n\nexport class FaceLandmarks implements IFaceLandmarks {\n protected _shift: Point\n\n protected _positions: Point[]\n\n protected _imgDims: Dimensions\n\n constructor(\n relativeFaceLandmarkPositions: Point[],\n imgDims: IDimensions,\n shift: Point = new Point(0, 0),\n ) {\n const { width, height } = imgDims;\n this._imgDims = new Dimensions(width, height);\n this._shift = shift;\n this._positions = relativeFaceLandmarkPositions.map(\n (pt) => pt.mul(new Point(width, height)).add(shift),\n );\n }\n\n public get shift(): Point { return new Point(this._shift.x, this._shift.y); }\n\n public get imageWidth(): number { return this._imgDims.width; }\n\n public get imageHeight(): number { return this._imgDims.height; }\n\n public get positions(): Point[] { return this._positions; }\n\n public get relativePositions(): Point[] {\n return this._positions.map(\n (pt) => pt.sub(this._shift).div(new Point(this.imageWidth, this.imageHeight)),\n );\n }\n\n public forSize(width: number, height: number): T {\n return new (this.constructor as any)(\n this.relativePositions,\n { width, height },\n );\n }\n\n public shiftBy(x: number, y: number): T {\n return new (this.constructor as any)(\n this.relativePositions,\n this._imgDims,\n new Point(x, y),\n );\n }\n\n public shiftByPoint(pt: Point): T {\n return this.shiftBy(pt.x, pt.y);\n }\n\n /**\n * Aligns the face landmarks after face detection from the relative positions of the faces\n * bounding box, or it's current shift. This function should be used to align the face images\n * after face detection has been performed, before they are passed to the face recognition net.\n * This will make the computed face descriptor more accurate.\n *\n * @param detection (optional) The bounding box of the face or the face detection result. If\n * no argument was passed the position of the face landmarks are assumed to be relative to\n * it's current shift.\n * @returns The bounding box of the aligned face.\n */\n public align(\n detection?: FaceDetection | IRect | IBoundingBox | null,\n options: { useDlibAlignment?: boolean, minBoxPadding?: number } = { },\n ): Box {\n if (detection) {\n const box = detection instanceof FaceDetection\n ? detection.box.floor()\n : new Box(detection);\n\n return this.shiftBy(box.x, box.y).align(null, options);\n }\n\n const { useDlibAlignment, minBoxPadding } = { useDlibAlignment: false, minBoxPadding: 0.2, ...options };\n\n if (useDlibAlignment) {\n return this.alignDlib();\n }\n\n return this.alignMinBbox(minBoxPadding);\n }\n\n private alignDlib(): Box {\n const centers = this.getRefPointsForAlignment();\n\n const [leftEyeCenter, rightEyeCenter, mouthCenter] = centers;\n const distToMouth = (pt: Point) => mouthCenter.sub(pt).magnitude();\n const eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2;\n\n const size = Math.floor(eyeToMouthDist / relScale);\n\n const refPoint = getCenterPoint(centers);\n // TODO: pad in case rectangle is out of image bounds\n const x = Math.floor(Math.max(0, refPoint.x - (relX * size)));\n const y = Math.floor(Math.max(0, refPoint.y - (relY * size)));\n\n return new Rect(x, y, Math.min(size, this.imageWidth + x), Math.min(size, this.imageHeight + y));\n }\n\n private alignMinBbox(padding: number): Box {\n const box = minBbox(this.positions);\n return box.pad(box.width * padding, box.height * padding);\n }\n\n protected getRefPointsForAlignment(): Point[] {\n throw new Error('getRefPointsForAlignment not implemented by base class');\n }\n}\n", "import { getCenterPoint } from '../utils/index';\nimport { FaceLandmarks } from './FaceLandmarks';\nimport { Point } from './Point';\n\nexport class FaceLandmarks5 extends FaceLandmarks {\n protected getRefPointsForAlignment(): Point[] {\n const pts = this.positions;\n return [\n pts[0],\n pts[1],\n getCenterPoint([pts[3], pts[4]]),\n ];\n }\n}\n", "import { getCenterPoint } from '../utils/index';\nimport { FaceLandmarks } from './FaceLandmarks';\nimport { Point } from './Point';\n\nexport class FaceLandmarks68 extends FaceLandmarks {\n public getJawOutline(): Point[] {\n return this.positions.slice(0, 17);\n }\n\n public getLeftEyeBrow(): Point[] {\n return this.positions.slice(17, 22);\n }\n\n public getRightEyeBrow(): Point[] {\n return this.positions.slice(22, 27);\n }\n\n public getNose(): Point[] {\n return this.positions.slice(27, 36);\n }\n\n public getLeftEye(): Point[] {\n return this.positions.slice(36, 42);\n }\n\n public getRightEye(): Point[] {\n return this.positions.slice(42, 48);\n }\n\n public getMouth(): Point[] {\n return this.positions.slice(48, 68);\n }\n\n protected getRefPointsForAlignment(): Point[] {\n return [\n this.getLeftEye(),\n this.getRightEye(),\n this.getMouth(),\n ].map(getCenterPoint);\n }\n}\n", "import { round } from '../utils/index';\n\nexport interface IFaceMatch {\n label: string\n distance: number\n}\n\nexport class FaceMatch implements IFaceMatch {\n private _label: string\n\n private _distance: number\n\n constructor(label: string, distance: number) {\n this._label = label;\n this._distance = distance;\n }\n\n public get label(): string { return this._label; }\n\n public get distance(): number { return this._distance; }\n\n public toString(withDistance: boolean = true): string {\n return `${this.label}${withDistance ? ` (${round(this.distance)})` : ''}`;\n }\n}\n", "import { isValidNumber } from '../utils/index';\nimport { IBoundingBox } from './BoundingBox';\nimport { Box } from './Box';\nimport { IRect } from './Rect';\n\nexport class LabeledBox extends Box {\n public static assertIsValidLabeledBox(box: any, callee: string) {\n Box.assertIsValidBox(box, callee);\n\n if (!isValidNumber(box.label)) {\n throw new Error(`${callee} - expected property label (${box.label}) to be a number`);\n }\n }\n\n private _label: number\n\n constructor(box: IBoundingBox | IRect | any, label: number) {\n super(box);\n this._label = label;\n }\n\n public get label(): number { return this._label; }\n}\n", "export class LabeledFaceDescriptors {\n private _label: string\n\n private _descriptors: Float32Array[]\n\n constructor(label: string, descriptors: Float32Array[]) {\n if (!(typeof label === 'string')) {\n throw new Error('LabeledFaceDescriptors - constructor expected label to be a string');\n }\n\n if (!Array.isArray(descriptors) || descriptors.some((desc) => !(desc instanceof Float32Array))) {\n throw new Error('LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array');\n }\n\n this._label = label;\n this._descriptors = descriptors;\n }\n\n public get label(): string { return this._label; }\n\n public get descriptors(): Float32Array[] { return this._descriptors; }\n\n public toJSON(): any {\n return {\n label: this.label,\n descriptors: this.descriptors.map((d) => Array.from(d)),\n };\n }\n\n public static fromJSON(json: any): LabeledFaceDescriptors {\n const descriptors = json.descriptors.map((d: any) => new Float32Array(d));\n return new LabeledFaceDescriptors(json.label, descriptors);\n }\n}\n", "import { isValidProbablitiy } from '../utils/index';\nimport { IBoundingBox } from './BoundingBox';\nimport { LabeledBox } from './LabeledBox';\nimport { IRect } from './Rect';\n\nexport class PredictedBox extends LabeledBox {\n public static assertIsValidPredictedBox(box: any, callee: string) {\n LabeledBox.assertIsValidLabeledBox(box, callee);\n\n if (\n !isValidProbablitiy(box.score)\n || !isValidProbablitiy(box.classScore)\n ) {\n throw new Error(`${callee} - expected properties score (${box.score}) and (${box.classScore}) to be a number between [0, 1]`);\n }\n }\n\n private _score: number\n\n private _classScore: number\n\n constructor(box: IBoundingBox | IRect | any, label: number, score: number, classScore: number) {\n super(box, label);\n this._score = score;\n this._classScore = classScore;\n }\n\n public get score(): number { return this._score; }\n\n public get classScore(): number { return this._classScore; }\n}\n", "import { FaceDetection } from '../classes/FaceDetection';\n\nexport type WithFaceDetection = TSource & {\n detection: FaceDetection\n}\n\nexport function isWithFaceDetection(obj: any): obj is WithFaceDetection<{}> {\n return obj.detection instanceof FaceDetection;\n}\n\nexport function extendWithFaceDetection(sourceObj: TSource, detection: FaceDetection): WithFaceDetection {\n const extension = { detection };\n return { ...sourceObj, ...extension };\n}\n", "import { Environment } from './types';\n\nexport function createBrowserEnv(): Environment {\n const fetch = window.fetch;\n if (!fetch) throw new Error('fetch - missing fetch implementation for browser environment');\n\n const readFile = () => {\n throw new Error('readFile - filesystem not available for browser environment');\n };\n\n return {\n Canvas: HTMLCanvasElement,\n CanvasRenderingContext2D,\n Image: HTMLImageElement,\n ImageData,\n Video: HTMLVideoElement,\n createCanvasElement: () => document.createElement('canvas'),\n createImageElement: () => document.createElement('img'),\n fetch,\n readFile,\n };\n}\n", "import { FileSystem } from './types';\n\nexport function createFileSystem(fs?: any): FileSystem {\n let requireFsError = '';\n\n if (!fs) {\n try {\n // eslint-disable-next-line global-require\n fs = require('fs');\n } catch (err) {\n requireFsError = err.toString();\n }\n }\n\n const readFile = fs\n ? (filePath: string) => new Promise((resolve, reject) => {\n fs.readFile(filePath, (err: any, buffer: Buffer) => (err ? reject(err) : resolve(buffer)));\n })\n : () => {\n throw new Error(`readFile - failed to require fs in nodejs environment with error: ${requireFsError}`);\n };\n\n return {\n readFile,\n };\n}\n", "/* eslint-disable max-classes-per-file */\nimport { createFileSystem } from './createFileSystem';\nimport { Environment } from './types';\n\nexport function createNodejsEnv(): Environment {\n // eslint-disable-next-line dot-notation\n const Canvas = global['Canvas'] || global.HTMLCanvasElement;\n const Image = global.Image || global.HTMLImageElement;\n\n const createCanvasElement = () => {\n if (Canvas) return new Canvas();\n throw new Error('createCanvasElement - missing Canvas implementation for nodejs environment');\n };\n\n const createImageElement = () => {\n if (Image) return new Image();\n throw new Error('createImageElement - missing Image implementation for nodejs environment');\n };\n\n const fetch = global.fetch;\n // if (!fetch) throw new Error('fetch - missing fetch implementation for nodejs environment');\n\n const fileSystem = createFileSystem();\n\n return {\n Canvas: Canvas || class {},\n CanvasRenderingContext2D: global.CanvasRenderingContext2D || class {},\n Image: Image || class {},\n ImageData: global.ImageData || class {},\n Video: global.HTMLVideoElement || class {},\n createCanvasElement,\n createImageElement,\n fetch,\n ...fileSystem,\n };\n}\n", "export function isBrowser(): boolean {\n return typeof window === 'object'\n && typeof document !== 'undefined'\n && typeof HTMLImageElement !== 'undefined'\n && typeof HTMLCanvasElement !== 'undefined'\n && typeof HTMLVideoElement !== 'undefined'\n && typeof ImageData !== 'undefined'\n && typeof CanvasRenderingContext2D !== 'undefined';\n}\n", "export function isNodejs(): boolean {\n return typeof global === 'object'\n && typeof require === 'function'\n && typeof module !== 'undefined'\n && typeof process !== 'undefined' && !!process.version;\n}\n", "import { createBrowserEnv } from './createBrowserEnv';\nimport { createFileSystem } from './createFileSystem';\nimport { createNodejsEnv } from './createNodejsEnv';\nimport { isBrowser } from './isBrowser';\nimport { isNodejs } from './isNodejs';\nimport { Environment } from './types';\n\nlet environment: Environment | null;\n\nfunction getEnv(): Environment {\n if (!environment) {\n throw new Error('getEnv - environment is not defined, check isNodejs() and isBrowser()');\n }\n return environment;\n}\n\nfunction setEnv(env: Environment) {\n environment = env;\n}\n\nfunction initialize() {\n // check for isBrowser() first to prevent electron renderer process\n // to be initialized with wrong environment due to isNodejs() returning true\n if (isBrowser()) return setEnv(createBrowserEnv());\n if (isNodejs()) return setEnv(createNodejsEnv());\n return null;\n}\n\nfunction monkeyPatch(env: Partial) {\n if (!environment) {\n initialize();\n }\n\n if (!environment) {\n throw new Error('monkeyPatch - environment is not defined, check isNodejs() and isBrowser()');\n }\n\n const { Canvas = environment.Canvas, Image = environment.Image } = env;\n environment.Canvas = Canvas;\n environment.Image = Image;\n environment.createCanvasElement = env.createCanvasElement || (() => new Canvas());\n environment.createImageElement = env.createImageElement || (() => new Image());\n\n environment.ImageData = env.ImageData || environment.ImageData;\n environment.Video = env.Video || environment.Video;\n environment.fetch = env.fetch || environment.fetch;\n environment.readFile = env.readFile || environment.readFile;\n}\n\nexport const env = {\n getEnv,\n setEnv,\n initialize,\n createBrowserEnv,\n createFileSystem,\n createNodejsEnv,\n monkeyPatch,\n isBrowser,\n isNodejs,\n};\n\ninitialize();\n\nexport * from './types';\n", "import { env } from '../env/index';\n\nexport function resolveInput(arg: string | any) {\n if (!env.isNodejs() && typeof arg === 'string') {\n return document.getElementById(arg);\n }\n return arg;\n}\n", "import { env } from '../env/index';\nimport { resolveInput } from './resolveInput';\n\nexport function getContext2dOrThrow(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): CanvasRenderingContext2D {\n const { Canvas, CanvasRenderingContext2D } = env.getEnv();\n\n if (canvasArg instanceof CanvasRenderingContext2D) {\n return canvasArg;\n }\n\n const canvas = resolveInput(canvasArg);\n\n if (!(canvas instanceof Canvas)) {\n throw new Error('resolveContext2d - expected canvas to be of instance of Canvas');\n }\n\n const ctx = canvas.getContext('2d');\n if (!ctx) {\n throw new Error('resolveContext2d - canvas 2d context is null');\n }\n\n return ctx;\n}\n", "/* eslint-disable max-classes-per-file */\nimport { IDimensions, IPoint } from '../classes/index';\nimport { getContext2dOrThrow } from '../dom/getContext2dOrThrow';\nimport { resolveInput } from '../dom/resolveInput';\n\n// eslint-disable-next-line no-shadow\nexport enum AnchorPosition {\n // eslint-disable-next-line no-unused-vars\n TOP_LEFT = 'TOP_LEFT',\n // eslint-disable-next-line no-unused-vars\n TOP_RIGHT = 'TOP_RIGHT',\n // eslint-disable-next-line no-unused-vars\n BOTTOM_LEFT = 'BOTTOM_LEFT',\n // eslint-disable-next-line no-unused-vars\n BOTTOM_RIGHT = 'BOTTOM_RIGHT'\n}\n\nexport interface IDrawTextFieldOptions {\n anchorPosition?: AnchorPosition\n backgroundColor?: string\n fontColor?: string\n fontSize?: number\n fontStyle?: string\n padding?: number\n}\n\nexport class DrawTextFieldOptions implements IDrawTextFieldOptions {\n public anchorPosition: AnchorPosition\n\n public backgroundColor: string\n\n public fontColor: string\n\n public fontSize: number\n\n public fontStyle: string\n\n public padding: number\n\n constructor(options: IDrawTextFieldOptions = {}) {\n const {\n anchorPosition, backgroundColor, fontColor, fontSize, fontStyle, padding,\n } = options;\n this.anchorPosition = anchorPosition || AnchorPosition.TOP_LEFT;\n this.backgroundColor = backgroundColor || 'rgba(0, 0, 0, 0.5)';\n this.fontColor = fontColor || 'rgba(255, 255, 255, 1)';\n this.fontSize = fontSize || 14;\n this.fontStyle = fontStyle || 'Georgia';\n this.padding = padding || 4;\n }\n}\n\nexport class DrawTextField {\n public text: string[]\n\n public anchor : IPoint\n\n public options: DrawTextFieldOptions\n\n constructor(\n text: string | string[] | DrawTextField,\n anchor: IPoint,\n options: IDrawTextFieldOptions = {},\n ) {\n // eslint-disable-next-line no-nested-ternary\n this.text = typeof text === 'string'\n ? [text]\n : (text instanceof DrawTextField ? text.text : text);\n this.anchor = anchor;\n this.options = new DrawTextFieldOptions(options);\n }\n\n measureWidth(ctx: CanvasRenderingContext2D): number {\n const { padding } = this.options;\n return this.text.map((l) => ctx.measureText(l).width).reduce((w0, w1) => (w0 < w1 ? w1 : w0), 0) + (2 * padding);\n }\n\n measureHeight(): number {\n const { fontSize, padding } = this.options;\n return this.text.length * fontSize + (2 * padding);\n }\n\n getUpperLeft(ctx: CanvasRenderingContext2D, canvasDims?: IDimensions): IPoint {\n const { anchorPosition } = this.options;\n const isShiftLeft = anchorPosition === AnchorPosition.BOTTOM_RIGHT || anchorPosition === AnchorPosition.TOP_RIGHT;\n const isShiftTop = anchorPosition === AnchorPosition.BOTTOM_LEFT || anchorPosition === AnchorPosition.BOTTOM_RIGHT;\n\n const textFieldWidth = this.measureWidth(ctx);\n const textFieldHeight = this.measureHeight();\n const x = (isShiftLeft ? this.anchor.x - textFieldWidth : this.anchor.x);\n const y = isShiftTop ? this.anchor.y - textFieldHeight : this.anchor.y;\n\n // adjust anchor if text box exceeds canvas borders\n if (canvasDims) {\n const { width, height } = canvasDims;\n const newX = Math.max(Math.min(x, width - textFieldWidth), 0);\n const newY = Math.max(Math.min(y, height - textFieldHeight), 0);\n return { x: newX, y: newY };\n }\n return { x, y };\n }\n\n draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D) {\n const canvas = resolveInput(canvasArg);\n const ctx = getContext2dOrThrow(canvas);\n\n const {\n backgroundColor, fontColor, fontSize, fontStyle, padding,\n } = this.options;\n\n ctx.font = `${fontSize}px ${fontStyle}`;\n const maxTextWidth = this.measureWidth(ctx);\n const textHeight = this.measureHeight();\n\n ctx.fillStyle = backgroundColor;\n const upperLeft = this.getUpperLeft(ctx, canvas);\n ctx.fillRect(upperLeft.x, upperLeft.y, maxTextWidth, textHeight);\n\n ctx.fillStyle = fontColor;\n this.text.forEach((textLine, i) => {\n const x = padding + upperLeft.x;\n const y = padding + upperLeft.y + ((i + 1) * fontSize);\n ctx.fillText(textLine, x, y);\n });\n }\n}\n", "/* eslint-disable max-classes-per-file */\nimport { Box, IBoundingBox, IRect } from '../classes/index';\nimport { getContext2dOrThrow } from '../dom/getContext2dOrThrow';\nimport { AnchorPosition, DrawTextField, DrawTextFieldOptions, IDrawTextFieldOptions } from './DrawTextField';\n\nexport interface IDrawBoxOptions {\n boxColor?: string\n lineWidth?: number\n drawLabelOptions?: IDrawTextFieldOptions\n label?: string\n}\n\nexport class DrawBoxOptions {\n public boxColor: string\n\n public lineWidth: number\n\n public drawLabelOptions: DrawTextFieldOptions\n\n public label?: string\n\n constructor(options: IDrawBoxOptions = {}) {\n const {\n boxColor, lineWidth, label, drawLabelOptions,\n } = options;\n this.boxColor = boxColor || 'rgba(0, 0, 255, 1)';\n this.lineWidth = lineWidth || 2;\n this.label = label;\n\n const defaultDrawLabelOptions = {\n anchorPosition: AnchorPosition.BOTTOM_LEFT,\n backgroundColor: this.boxColor,\n };\n this.drawLabelOptions = new DrawTextFieldOptions({ ...defaultDrawLabelOptions, ...drawLabelOptions });\n }\n}\n\nexport class DrawBox {\n public box: Box\n\n public options: DrawBoxOptions\n\n constructor(\n box: IBoundingBox | IRect,\n options: IDrawBoxOptions = {},\n ) {\n this.box = new Box(box);\n this.options = new DrawBoxOptions(options);\n }\n\n draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D) {\n const ctx = getContext2dOrThrow(canvasArg);\n\n const { boxColor, lineWidth } = this.options;\n\n const {\n x, y, width, height,\n } = this.box;\n ctx.strokeStyle = boxColor;\n ctx.lineWidth = lineWidth;\n ctx.strokeRect(x, y, width, height);\n\n const { label } = this.options;\n if (label) {\n new DrawTextField([label], { x: x - (lineWidth / 2), y }, this.options.drawLabelOptions).draw(canvasArg);\n }\n }\n}\n", "import { Box, IBoundingBox, IRect } from '../classes/index';\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { isWithFaceDetection, WithFaceDetection } from '../factories/WithFaceDetection';\nimport { round } from '../utils/index';\nimport { DrawBox } from './DrawBox';\n\nexport type TDrawDetectionsInput = IRect | IBoundingBox | FaceDetection | WithFaceDetection<{}>\n\nexport function drawDetections(\n canvasArg: string | HTMLCanvasElement,\n detections: TDrawDetectionsInput | Array,\n) {\n const detectionsArray = Array.isArray(detections) ? detections : [detections];\n\n detectionsArray.forEach((det) => {\n // eslint-disable-next-line no-nested-ternary\n const score = det instanceof FaceDetection\n ? det.score\n : (isWithFaceDetection(det) ? det.detection.score : undefined);\n\n // eslint-disable-next-line no-nested-ternary\n const box = det instanceof FaceDetection\n ? det.box\n : (isWithFaceDetection(det) ? det.detection.box : new Box(det));\n\n const label = score ? `${round(score)}` : undefined;\n new DrawBox(box, { label }).draw(canvasArg);\n });\n}\n", "import { env } from '../env/index';\n\nexport function isMediaLoaded(media: HTMLImageElement | HTMLVideoElement) : boolean {\n const { Image, Video } = env.getEnv();\n\n return (media instanceof Image && media.complete)\n || (media instanceof Video && media.readyState >= 3);\n}\n", "import { env } from '../env/index';\nimport { isMediaLoaded } from './isMediaLoaded';\n\nexport function awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement) {\n // eslint-disable-next-line consistent-return\n return new Promise((resolve, reject) => {\n if (media instanceof env.getEnv().Canvas || isMediaLoaded(media)) return resolve(null);\n\n function onError(e: Event) {\n if (!e.currentTarget) return;\n // eslint-disable-next-line no-use-before-define\n e.currentTarget.removeEventListener('load', onLoad);\n e.currentTarget.removeEventListener('error', onError);\n reject(e);\n }\n\n function onLoad(e: Event) {\n if (!e.currentTarget) return;\n e.currentTarget.removeEventListener('load', onLoad);\n e.currentTarget.removeEventListener('error', onError);\n resolve(e);\n }\n\n media.addEventListener('load', onLoad);\n media.addEventListener('error', onError);\n });\n}\n", "import { env } from '../env/index';\n\nexport function bufferToImage(buf: Blob): Promise {\n return new Promise((resolve, reject) => {\n if (!(buf instanceof Blob)) reject(new Error('bufferToImage - expected buf to be of type: Blob'));\n const reader = new FileReader();\n reader.onload = () => {\n if (typeof reader.result !== 'string') reject(new Error('bufferToImage - expected reader.result to be a string, in onload'));\n const img = env.getEnv().createImageElement();\n img.onload = () => resolve(img);\n img.onerror = reject;\n img.src = reader.result as string;\n };\n reader.onerror = reject;\n reader.readAsDataURL(buf);\n });\n}\n", "import { Dimensions, IDimensions } from '../classes/Dimensions';\nimport { env } from '../env/index';\n\nexport function getMediaDimensions(input: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | IDimensions): Dimensions {\n const { Image, Video } = env.getEnv();\n\n if (input instanceof Image) {\n return new Dimensions(input.naturalWidth, input.naturalHeight);\n }\n if (input instanceof Video) {\n return new Dimensions(input.videoWidth, input.videoHeight);\n }\n return new Dimensions(input.width, input.height);\n}\n", "import { IDimensions } from '../classes/Dimensions';\nimport { env } from '../env/index';\nimport { getContext2dOrThrow } from './getContext2dOrThrow';\nimport { getMediaDimensions } from './getMediaDimensions';\nimport { isMediaLoaded } from './isMediaLoaded';\n\nexport function createCanvas({ width, height }: IDimensions): HTMLCanvasElement {\n const { createCanvasElement } = env.getEnv();\n const canvas = createCanvasElement();\n canvas.width = width;\n canvas.height = height;\n return canvas;\n}\n\nexport function createCanvasFromMedia(media: HTMLImageElement | HTMLVideoElement | ImageData, dims?: IDimensions): HTMLCanvasElement {\n const { ImageData } = env.getEnv();\n\n if (!(media instanceof ImageData) && !isMediaLoaded(media)) {\n throw new Error('createCanvasFromMedia - media has not finished loading yet');\n }\n\n const { width, height } = dims || getMediaDimensions(media);\n const canvas = createCanvas({ width, height });\n\n if (media instanceof ImageData) {\n getContext2dOrThrow(canvas).putImageData(media, 0, 0);\n } else {\n getContext2dOrThrow(canvas).drawImage(media, 0, 0, width, height);\n }\n return canvas;\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { env } from '../env/index';\nimport { isTensor4D } from '../utils/index';\n\nexport async function imageTensorToCanvas(\n imgTensor: tf.Tensor,\n canvas?: HTMLCanvasElement,\n): Promise {\n const targetCanvas = canvas || env.getEnv().createCanvasElement();\n\n const [height, width, numChannels] = imgTensor.shape.slice(isTensor4D(imgTensor) ? 1 : 0);\n const imgTensor3D = tf.tidy(() => imgTensor.as3D(height, width, numChannels).toInt());\n await tf.browser.toPixels(imgTensor3D, targetCanvas);\n\n imgTensor3D.dispose();\n\n return targetCanvas;\n}\n", "import { env } from '../env/index';\n\nexport function isMediaElement(input: any) {\n const { Image, Canvas, Video } = env.getEnv();\n\n return input instanceof Image\n || input instanceof Canvas\n || input instanceof Video;\n}\n", "import { env } from '../env/index';\nimport { createCanvas, createCanvasFromMedia } from './createCanvas';\nimport { getContext2dOrThrow } from './getContext2dOrThrow';\nimport { getMediaDimensions } from './getMediaDimensions';\n\nexport function imageToSquare(input: HTMLImageElement | HTMLCanvasElement, inputSize: number, centerImage: boolean = false) {\n const { Image, Canvas } = env.getEnv();\n\n if (!(input instanceof Image || input instanceof Canvas)) {\n throw new Error('imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement');\n }\n\n if (inputSize <= 0) return createCanvas({ width: 1, height: 1 });\n const dims = getMediaDimensions(input);\n const scale = inputSize / Math.max(dims.height, dims.width);\n const width = scale * dims.width;\n const height = scale * dims.height;\n\n const targetCanvas = createCanvas({ width: inputSize, height: inputSize });\n const inputCanvas = input instanceof Canvas ? input : createCanvasFromMedia(input);\n\n const offset = Math.abs(width - height) / 2;\n const dx = centerImage && width < height ? offset : 0;\n const dy = centerImage && height < width ? offset : 0;\n if (inputCanvas.width > 0 && inputCanvas.height > 0) getContext2dOrThrow(targetCanvas).drawImage(inputCanvas, dx, dy, width, height);\n\n return targetCanvas;\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { Dimensions } from '../classes/Dimensions';\nimport { env } from '../env/index';\nimport { padToSquare } from '../ops/padToSquare';\nimport { computeReshapedDimensions, isTensor3D, isTensor4D, range } from '../utils/index';\nimport { createCanvasFromMedia } from './createCanvas';\nimport { imageToSquare } from './imageToSquare';\nimport { TResolvedNetInput } from './types';\n\nexport class NetInput {\n private _imageTensors: Array = []\n\n private _canvases: HTMLCanvasElement[] = []\n\n private _batchSize: number\n\n private _treatAsBatchInput: boolean = false\n\n private _inputDimensions: number[][] = []\n\n private _inputSize: number\n\n constructor(inputs: Array, treatAsBatchInput: boolean = false) {\n if (!Array.isArray(inputs)) {\n throw new Error(`NetInput.constructor - expected inputs to be an Array of TResolvedNetInput or to be instanceof tf.Tensor4D, instead have ${inputs}`);\n }\n\n this._treatAsBatchInput = treatAsBatchInput;\n this._batchSize = inputs.length;\n\n inputs.forEach((input, idx) => {\n if (isTensor3D(input)) {\n this._imageTensors[idx] = input;\n this._inputDimensions[idx] = input.shape;\n return;\n }\n\n if (isTensor4D(input)) {\n const batchSize = (input as any).shape[0];\n if (batchSize !== 1) {\n throw new Error(`NetInput - tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`);\n }\n\n this._imageTensors[idx] = input;\n this._inputDimensions[idx] = (input as any).shape.slice(1);\n return;\n }\n\n const canvas = (input as any) instanceof env.getEnv().Canvas ? input : createCanvasFromMedia(input);\n this._canvases[idx] = canvas;\n this._inputDimensions[idx] = [canvas.height, canvas.width, 3];\n });\n }\n\n public get imageTensors(): Array {\n return this._imageTensors;\n }\n\n public get canvases(): HTMLCanvasElement[] {\n return this._canvases;\n }\n\n public get isBatchInput(): boolean {\n return this.batchSize > 1 || this._treatAsBatchInput;\n }\n\n public get batchSize(): number {\n return this._batchSize;\n }\n\n public get inputDimensions(): number[][] {\n return this._inputDimensions;\n }\n\n public get inputSize(): number | undefined {\n return this._inputSize;\n }\n\n public get reshapedInputDimensions(): Dimensions[] {\n return range(this.batchSize, 0, 1).map(\n (_, batchIdx) => this.getReshapedInputDimensions(batchIdx),\n );\n }\n\n public getInput(batchIdx: number): tf.Tensor3D | tf.Tensor4D | HTMLCanvasElement {\n return this.canvases[batchIdx] || this.imageTensors[batchIdx];\n }\n\n public getInputDimensions(batchIdx: number): number[] {\n return this._inputDimensions[batchIdx];\n }\n\n public getInputHeight(batchIdx: number): number {\n return this._inputDimensions[batchIdx][0];\n }\n\n public getInputWidth(batchIdx: number): number {\n return this._inputDimensions[batchIdx][1];\n }\n\n public getReshapedInputDimensions(batchIdx: number): Dimensions {\n if (typeof this.inputSize !== 'number') {\n throw new Error('getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet');\n }\n\n const width = this.getInputWidth(batchIdx);\n const height = this.getInputHeight(batchIdx);\n return computeReshapedDimensions({ width, height }, this.inputSize);\n }\n\n /**\n * Create a batch tensor from all input canvases and tensors\n * with size [batchSize, inputSize, inputSize, 3].\n *\n * @param inputSize Height and width of the tensor.\n * @param isCenterImage (optional, default: false) If true, add an equal amount of padding on\n * both sides of the minor dimension oof the image.\n * @returns The batch tensor.\n */\n public toBatchTensor(inputSize: number, isCenterInputs: boolean = true): tf.Tensor4D {\n this._inputSize = inputSize;\n\n return tf.tidy(() => {\n const inputTensors = range(this.batchSize, 0, 1).map((batchIdx) => {\n const input = this.getInput(batchIdx);\n\n if (input instanceof tf.Tensor) {\n let imgTensor = isTensor4D(input) ? input : tf.expandDims(input);\n imgTensor = padToSquare(imgTensor, isCenterInputs);\n\n if (imgTensor.shape[1] !== inputSize || imgTensor.shape[2] !== inputSize) {\n imgTensor = tf.image.resizeBilinear(imgTensor, [inputSize, inputSize], false, false);\n }\n\n return imgTensor.as3D(inputSize, inputSize, 3);\n }\n\n if (input instanceof env.getEnv().Canvas) {\n return tf.browser.fromPixels(imageToSquare(input, inputSize, isCenterInputs));\n }\n\n throw new Error(`toBatchTensor - at batchIdx ${batchIdx}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${input}`);\n });\n\n const batchTensor = tf.stack(inputTensors.map((t) => tf.cast(t, 'float32'))).as4D(this.batchSize, inputSize, inputSize, 3);\n\n return batchTensor;\n });\n }\n}\n", "import { isTensor3D, isTensor4D } from '../utils/index';\nimport { awaitMediaLoaded } from './awaitMediaLoaded';\nimport { isMediaElement } from './isMediaElement';\nimport { NetInput } from './NetInput';\nimport { resolveInput } from './resolveInput';\nimport { TNetInput } from './types';\n\n/**\n * Validates the input to make sure, they are valid net inputs and awaits all media elements\n * to be finished loading.\n *\n * @param input The input, which can be a media element or an array of different media elements.\n * @returns A NetInput instance, which can be passed into one of the neural networks.\n */\nexport async function toNetInput(inputs: TNetInput): Promise {\n if (inputs instanceof NetInput) return inputs;\n const inputArgArray = Array.isArray(inputs) ? inputs : [inputs];\n if (!inputArgArray.length) throw new Error('toNetInput - empty array passed as input');\n const getIdxHint = (idx: number) => (Array.isArray(inputs) ? ` at input index ${idx}:` : '');\n const inputArray = inputArgArray.map(resolveInput);\n inputArray.forEach((input, i) => {\n if (!isMediaElement(input) && !isTensor3D(input) && !isTensor4D(input)) {\n if (typeof inputArgArray[i] === 'string') throw new Error(`toNetInput -${getIdxHint(i)} string passed, but could not resolve HTMLElement for element id ${inputArgArray[i]}`);\n throw new Error(`toNetInput -${getIdxHint(i)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`);\n }\n if (isTensor4D(input)) {\n // if tf.Tensor4D is passed in the input array, the batch size has to be 1\n const batchSize = input.shape[0];\n if (batchSize !== 1) throw new Error(`toNetInput -${getIdxHint(i)} tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`);\n }\n });\n // wait for all media elements being loaded\n await Promise.all(inputArray.map((input) => isMediaElement(input) && awaitMediaLoaded(input)));\n return new NetInput(inputArray, Array.isArray(inputs));\n}\n", "import { FaceDetection } from '../classes/FaceDetection';\nimport { Rect } from '../classes/Rect';\nimport { env } from '../env/index';\nimport { createCanvas } from './createCanvas';\nimport { getContext2dOrThrow } from './getContext2dOrThrow';\nimport { imageTensorToCanvas } from './imageTensorToCanvas';\nimport { toNetInput } from './toNetInput';\nimport { TNetInput } from './types';\n\n/**\n * Extracts the image regions containing the detected faces.\n *\n * @param input The image that face detection has been performed on.\n * @param detections The face detection results or face bounding boxes for that image.\n * @returns The Canvases of the corresponding image region for each detected face.\n */\nexport async function extractFaces(input: TNetInput, detections: Array): Promise {\n const { Canvas } = env.getEnv();\n let canvas = input as HTMLCanvasElement;\n if (!(input instanceof Canvas)) {\n const netInput = await toNetInput(input);\n if (netInput.batchSize > 1) throw new Error('extractFaces - batchSize > 1 not supported');\n const tensorOrCanvas = netInput.getInput(0);\n canvas = tensorOrCanvas instanceof Canvas ? tensorOrCanvas : await imageTensorToCanvas(tensorOrCanvas);\n }\n const ctx = getContext2dOrThrow(canvas);\n const boxes = detections\n .map((det) => (det instanceof FaceDetection ? det.forSize(canvas.width, canvas.height).box.floor() : det))\n .map((box) => box.clipAtImageBorders(canvas.width, canvas.height));\n return boxes.map(({ x, y, width, height }) => {\n const faceImg = createCanvas({ width, height });\n if (width > 0 && height > 0) getContext2dOrThrow(faceImg).putImageData(ctx.getImageData(x, y, width, height), 0, 0);\n return faceImg;\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { Rect } from '../classes/index';\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { isTensor3D, isTensor4D } from '../utils/index';\n\n/**\n * Extracts the tensors of the image regions containing the detected faces.\n * Useful if you want to compute the face descriptors for the face images.\n * Using this method is faster then extracting a canvas for each face and\n * converting them to tensors individually.\n *\n * @param imageTensor The image tensor that face detection has been performed on.\n * @param detections The face detection results or face bounding boxes for that image.\n * @returns Tensors of the corresponding image region for each detected face.\n */\nexport async function extractFaceTensors(imageTensor: tf.Tensor3D | tf.Tensor4D, detections: Array): Promise {\n if (!isTensor3D(imageTensor) && !isTensor4D(imageTensor)) {\n throw new Error('extractFaceTensors - expected image tensor to be 3D or 4D');\n }\n\n if (isTensor4D(imageTensor) && imageTensor.shape[0] > 1) {\n throw new Error('extractFaceTensors - batchSize > 1 not supported');\n }\n\n return tf.tidy(() => {\n const [imgHeight, imgWidth, numChannels] = imageTensor.shape.slice(isTensor4D(imageTensor) ? 1 : 0);\n\n const boxes = detections\n .map((det) => (det instanceof FaceDetection\n ? det.forSize(imgWidth, imgHeight).box\n : det))\n .map((box) => box.clipAtImageBorders(imgWidth, imgHeight));\n\n const faceTensors = boxes.map(({\n x, y, width, height,\n }) => tf.slice3d(imageTensor.as3D(imgHeight, imgWidth, numChannels), [y, x, 0], [height, width, numChannels]));\n\n return faceTensors;\n });\n}\n", "import { env } from '../env/index';\n\nexport async function fetchOrThrow(\n url: string,\n // eslint-disable-next-line no-undef\n init?: RequestInit,\n): Promise {\n const { fetch } = env.getEnv();\n const res = await fetch(url, init);\n if (!(res.status < 400)) {\n throw new Error(`failed to fetch: (${res.status}) ${res.statusText}, from url: ${res.url}`);\n }\n return res;\n}\n", "import { bufferToImage } from './bufferToImage';\nimport { fetchOrThrow } from './fetchOrThrow';\n\nexport async function fetchImage(uri: string): Promise {\n const res = await fetchOrThrow(uri);\n const blob = await (res).blob();\n\n if (!blob.type.startsWith('image/')) {\n throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${blob.type}, for url: ${res.url}`);\n }\n return bufferToImage(blob);\n}\n", "import { fetchOrThrow } from './fetchOrThrow';\n\nexport async function fetchJson(uri: string): Promise {\n return (await fetchOrThrow(uri)).json();\n}\n", "import { fetchOrThrow } from './fetchOrThrow';\n\nexport async function fetchNetWeights(uri: string): Promise {\n return new Float32Array(await (await fetchOrThrow(uri)).arrayBuffer());\n}\n", "export function getModelUris(uri: string | undefined, defaultModelName: string) {\n const defaultManifestFilename = `${defaultModelName}-weights_manifest.json`;\n\n if (!uri) {\n return {\n modelBaseUri: '',\n manifestUri: defaultManifestFilename,\n };\n }\n\n if (uri === '/') {\n return {\n modelBaseUri: '/',\n manifestUri: `/${defaultManifestFilename}`,\n };\n }\n // eslint-disable-next-line no-nested-ternary\n const protocol = uri.startsWith('http://') ? 'http://' : uri.startsWith('https://') ? 'https://' : '';\n uri = uri.replace(protocol, '');\n\n const parts = uri.split('/').filter((s) => s);\n\n const manifestFile = uri.endsWith('.json')\n ? parts[parts.length - 1]\n : defaultManifestFilename;\n\n let modelBaseUri = protocol + (uri.endsWith('.json') ? parts.slice(0, parts.length - 1) : parts).join('/');\n modelBaseUri = uri.startsWith('/') ? `/${modelBaseUri}` : modelBaseUri;\n\n return {\n modelBaseUri,\n manifestUri: modelBaseUri === '/' ? `/${manifestFile}` : `${modelBaseUri}/${manifestFile}`,\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { getModelUris } from '../common/getModelUris';\nimport { fetchJson } from './fetchJson';\n\nexport async function loadWeightMap(\n uri: string | undefined,\n defaultModelName: string,\n): Promise {\n const { manifestUri, modelBaseUri } = getModelUris(uri, defaultModelName);\n const manifest = await fetchJson(manifestUri);\n // if (manifest['weightsManifest']) manifest = manifest['weightsManifest'];\n return tf.io.loadWeights(manifest, modelBaseUri);\n}\n", "import { IDimensions } from '../classes/index';\nimport { getMediaDimensions } from './getMediaDimensions';\n\nexport function matchDimensions(input: IDimensions, reference: IDimensions, useMediaDimensions: boolean = false) {\n const { width, height } = useMediaDimensions\n ? getMediaDimensions(reference)\n : reference;\n input.width = width;\n input.height = height;\n return { width, height };\n}\n", "import * as tf from '../dist/tfjs.esm';\n\nimport { ParamMapping } from './common/index';\nimport { getModelUris } from './common/getModelUris';\nimport { loadWeightMap } from './dom/index';\nimport { env } from './env/index';\n\nexport abstract class NeuralNetwork {\n constructor(name: string) {\n this._name = name;\n }\n\n protected _params: TNetParams | undefined = undefined\n\n protected _paramMappings: ParamMapping[] = []\n\n public _name: any;\n\n public get params(): TNetParams | undefined { return this._params; }\n\n public get paramMappings(): ParamMapping[] { return this._paramMappings; }\n\n public get isLoaded(): boolean { return !!this.params; }\n\n public getParamFromPath(paramPath: string): tf.Tensor {\n const { obj, objProp } = this.traversePropertyPath(paramPath);\n return obj[objProp];\n }\n\n public reassignParamFromPath(paramPath: string, tensor: tf.Tensor) {\n const { obj, objProp } = this.traversePropertyPath(paramPath);\n obj[objProp].dispose();\n obj[objProp] = tensor;\n }\n\n public getParamList() {\n return this._paramMappings.map(({ paramPath }) => ({\n path: paramPath,\n tensor: this.getParamFromPath(paramPath),\n }));\n }\n\n public getTrainableParams() {\n return this.getParamList().filter((param) => param.tensor instanceof tf.Variable);\n }\n\n public getFrozenParams() {\n return this.getParamList().filter((param) => !(param.tensor instanceof tf.Variable));\n }\n\n public variable() {\n this.getFrozenParams().forEach(({ path, tensor }) => {\n this.reassignParamFromPath(path, tensor.variable());\n });\n }\n\n public freeze() {\n this.getTrainableParams().forEach(({ path, tensor: variable }) => {\n const tensor = tf.tensor(variable.dataSync());\n variable.dispose();\n this.reassignParamFromPath(path, tensor);\n });\n }\n\n public dispose(throwOnRedispose: boolean = true) {\n this.getParamList().forEach((param) => {\n if (throwOnRedispose && param.tensor.isDisposed) {\n throw new Error(`param tensor has already been disposed for path ${param.path}`);\n }\n param.tensor.dispose();\n });\n this._params = undefined;\n }\n\n public serializeParams(): Float32Array {\n return new Float32Array(\n this.getParamList()\n .map(({ tensor }) => Array.from(tensor.dataSync()) as number[])\n .reduce((flat, arr) => flat.concat(arr)),\n );\n }\n\n public async load(weightsOrUrl: Float32Array | string | undefined): Promise {\n if (weightsOrUrl instanceof Float32Array) {\n this.extractWeights(weightsOrUrl);\n return;\n }\n await this.loadFromUri(weightsOrUrl);\n }\n\n public async loadFromUri(uri: string | undefined) {\n if (uri && typeof uri !== 'string') {\n throw new Error(`${this._name}.loadFromUri - expected model uri`);\n }\n const weightMap = await loadWeightMap(uri, this.getDefaultModelName());\n this.loadFromWeightMap(weightMap);\n }\n\n public async loadFromDisk(filePath: string | undefined) {\n if (filePath && typeof filePath !== 'string') {\n throw new Error(`${this._name}.loadFromDisk - expected model file path`);\n }\n const { readFile } = env.getEnv();\n const { manifestUri, modelBaseUri } = getModelUris(filePath, this.getDefaultModelName());\n const fetchWeightsFromDisk = (filePaths: string[]) => Promise.all(filePaths.map((fp) => readFile(fp).then((buf) => buf.buffer)));\n const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk);\n const manifest = JSON.parse((await readFile(manifestUri)).toString());\n const weightMap = await loadWeights(manifest, modelBaseUri);\n this.loadFromWeightMap(weightMap);\n }\n\n public loadFromWeightMap(weightMap: tf.NamedTensorMap) {\n const { paramMappings, params } = this.extractParamsFromWeightMap(weightMap);\n this._paramMappings = paramMappings;\n this._params = params;\n }\n\n public extractWeights(weights: Float32Array) {\n const { paramMappings, params } = this.extractParams(weights);\n this._paramMappings = paramMappings;\n this._params = params;\n }\n\n private traversePropertyPath(paramPath: string) {\n if (!this.params) {\n throw new Error('traversePropertyPath - model has no loaded params');\n }\n\n const result = paramPath.split('/').reduce((res: { nextObj: any, obj?: any, objProp?: string }, objProp) => {\n // eslint-disable-next-line no-prototype-builtins\n if (!res.nextObj.hasOwnProperty(objProp)) {\n throw new Error(`traversePropertyPath - object does not have property ${objProp}, for path ${paramPath}`);\n }\n return { obj: res.nextObj, objProp, nextObj: res.nextObj[objProp] };\n }, { nextObj: this.params });\n\n const { obj, objProp } = result;\n if (!obj || !objProp || !(obj[objProp] instanceof tf.Tensor)) {\n throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${paramPath}`);\n }\n\n return { obj, objProp };\n }\n\n protected abstract getDefaultModelName(): string\n\n // eslint-disable-next-line no-unused-vars\n protected abstract extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): { params: TNetParams, paramMappings: ParamMapping[] }\n\n // eslint-disable-next-line no-unused-vars\n protected abstract extractParams(weights: Float32Array): { params: TNetParams, paramMappings: ParamMapping[] }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { SeparableConvParams } from './types';\n\nexport function depthwiseSeparableConv(\n x: tf.Tensor4D,\n params: SeparableConvParams,\n stride: [number, number],\n): tf.Tensor4D {\n return tf.tidy(() => {\n let out = tf.separableConv2d(x, params.depthwise_filter, params.pointwise_filter, stride, 'same');\n out = tf.add(out, params.bias);\n return out;\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams, SeparableConvParams } from '../common/index';\nimport { depthwiseSeparableConv } from '../common/depthwiseSeparableConv';\nimport { DenseBlock3Params, DenseBlock4Params } from './types';\n\nexport function denseBlock3(\n x: tf.Tensor4D,\n denseBlockParams: DenseBlock3Params,\n isFirstLayer: boolean = false,\n): tf.Tensor4D {\n return tf.tidy(() => {\n const out1 = tf.relu(\n isFirstLayer\n ? tf.add(\n tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, [2, 2], 'same'),\n denseBlockParams.conv0.bias,\n )\n : depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, [2, 2]),\n ) as tf.Tensor4D;\n const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1]);\n\n const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D;\n const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1]);\n\n return tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D;\n });\n}\n\nexport function denseBlock4(\n x: tf.Tensor4D,\n denseBlockParams: DenseBlock4Params,\n isFirstLayer: boolean = false,\n isScaleDown: boolean = true,\n): tf.Tensor4D {\n return tf.tidy(() => {\n const out1 = tf.relu(\n isFirstLayer\n ? tf.add(\n tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, isScaleDown ? [2, 2] : [1, 1], 'same'),\n denseBlockParams.conv0.bias,\n )\n : depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, isScaleDown ? [2, 2] : [1, 1]),\n ) as tf.Tensor4D;\n const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1]);\n\n const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D;\n const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1]);\n\n const in4 = tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D;\n const out4 = depthwiseSeparableConv(in4, denseBlockParams.conv3, [1, 1]);\n\n return tf.relu(tf.add(out1, tf.add(out2, tf.add(out3, out4)))) as tf.Tensor4D;\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams } from './types';\n\nexport function convLayer(\n x: tf.Tensor4D,\n params: ConvParams,\n padding: 'valid' | 'same' = 'same',\n withRelu: boolean = false,\n): tf.Tensor4D {\n return tf.tidy(() => {\n const out = tf.add(\n tf.conv2d(x, params.filters, [1, 1], padding),\n params.bias,\n ) as tf.Tensor4D;\n\n return withRelu ? tf.relu(out) : out;\n });\n}\n", "import { ParamMapping } from './types';\n\nexport function disposeUnusedWeightTensors(weightMap: any, paramMappings: ParamMapping[]) {\n Object.keys(weightMap).forEach((path) => {\n if (!paramMappings.some((pm) => pm.originalPath === path)) {\n weightMap[path].dispose();\n }\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams, ExtractWeightsFunction, ParamMapping } from './types';\n\nexport function extractConvParamsFactory(\n extractWeights: ExtractWeightsFunction,\n paramMappings: ParamMapping[],\n) {\n return (\n channelsIn: number,\n channelsOut: number,\n filterSize: number,\n mappedPrefix: string,\n ): ConvParams => {\n const filters = tf.tensor4d(\n extractWeights(channelsIn * channelsOut * filterSize * filterSize),\n [filterSize, filterSize, channelsIn, channelsOut],\n );\n const bias = tf.tensor1d(extractWeights(channelsOut));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/filters` },\n { paramPath: `${mappedPrefix}/bias` },\n );\n\n return { filters, bias };\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ExtractWeightsFunction, FCParams, ParamMapping } from './types';\n\nexport function extractFCParamsFactory(\n extractWeights: ExtractWeightsFunction,\n paramMappings: ParamMapping[],\n) {\n return (\n channelsIn: number,\n channelsOut: number,\n mappedPrefix: string,\n ): FCParams => {\n const fc_weights = tf.tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut]);\n const fc_bias = tf.tensor1d(extractWeights(channelsOut));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/weights` },\n { paramPath: `${mappedPrefix}/bias` },\n );\n\n return {\n weights: fc_weights,\n bias: fc_bias,\n };\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\n// eslint-disable-next-line no-unused-vars\nexport type ExtractWeightsFunction = (numWeights: number) => Float32Array\n\nexport type ParamMapping = {\n originalPath?: string\n paramPath: string\n}\n\nexport type ConvParams = {\n filters: tf.Tensor4D\n bias: tf.Tensor1D\n}\n\nexport type FCParams = {\n weights: tf.Tensor2D\n bias: tf.Tensor1D\n}\n\nexport class SeparableConvParams {\n // eslint-disable-next-line no-useless-constructor\n constructor(\n // eslint-disable-next-line no-unused-vars\n public depthwise_filter: tf.Tensor4D,\n // eslint-disable-next-line no-unused-vars\n public pointwise_filter: tf.Tensor4D,\n // eslint-disable-next-line no-unused-vars\n public bias: tf.Tensor1D,\n // eslint-disable-next-line no-empty-function\n ) {}\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ExtractWeightsFunction, ParamMapping, SeparableConvParams } from './types';\n\nexport function extractSeparableConvParamsFactory(\n extractWeights: ExtractWeightsFunction,\n paramMappings: ParamMapping[],\n) {\n return (channelsIn: number, channelsOut: number, mappedPrefix: string): SeparableConvParams => {\n const depthwise_filter = tf.tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1]);\n const pointwise_filter = tf.tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut]);\n const bias = tf.tensor1d(extractWeights(channelsOut));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/depthwise_filter` },\n { paramPath: `${mappedPrefix}/pointwise_filter` },\n { paramPath: `${mappedPrefix}/bias` },\n );\n\n return new SeparableConvParams(\n depthwise_filter,\n pointwise_filter,\n bias,\n );\n };\n}\n\nexport function loadSeparableConvParamsFactory(\n // eslint-disable-next-line no-unused-vars\n extractWeightEntry: (originalPath: string, paramRank: number) => T,\n) {\n return (prefix: string): SeparableConvParams => {\n const depthwise_filter = extractWeightEntry(`${prefix}/depthwise_filter`, 4);\n const pointwise_filter = extractWeightEntry(`${prefix}/pointwise_filter`, 4);\n const bias = extractWeightEntry(`${prefix}/bias`, 1);\n\n return new SeparableConvParams(\n depthwise_filter,\n pointwise_filter,\n bias,\n );\n };\n}\n", "import { isTensor } from '../utils/index';\nimport { ParamMapping } from './types';\n\nexport function extractWeightEntryFactory(weightMap: any, paramMappings: ParamMapping[]) {\n return (originalPath: string, paramRank: number, mappedPath?: string) => {\n const tensor = weightMap[originalPath];\n\n if (!isTensor(tensor, paramRank)) {\n throw new Error(`expected weightMap[${originalPath}] to be a Tensor${paramRank}D, instead have ${tensor}`);\n }\n\n paramMappings.push(\n { originalPath, paramPath: mappedPath || originalPath },\n );\n\n return tensor;\n };\n}\n", "export function extractWeightsFactory(weights: Float32Array) {\n let remainingWeights = weights;\n\n function extractWeights(numWeights: number): Float32Array {\n const ret = remainingWeights.slice(0, numWeights);\n remainingWeights = remainingWeights.slice(numWeights);\n return ret;\n }\n\n function getRemainingWeights(): Float32Array {\n return remainingWeights;\n }\n\n return {\n extractWeights,\n getRemainingWeights,\n };\n}\n", "import { extractConvParamsFactory, extractSeparableConvParamsFactory, ExtractWeightsFunction, ParamMapping } from '../common/index';\nimport { DenseBlock3Params, DenseBlock4Params } from './types';\n\nexport function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {\n const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings);\n const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings);\n\n function extractDenseBlock3Params(channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer: boolean = false): DenseBlock3Params {\n const conv0 = isFirstLayer\n ? extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv0`)\n : extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/conv0`);\n const conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv1`);\n const conv2 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv2`);\n\n return { conv0, conv1, conv2 };\n }\n\n function extractDenseBlock4Params(channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer: boolean = false): DenseBlock4Params {\n const { conv0, conv1, conv2 } = extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer);\n const conv3 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv3`);\n\n return {\n conv0, conv1, conv2, conv3,\n };\n }\n\n return {\n extractDenseBlock3Params,\n extractDenseBlock4Params,\n };\n}\n", "import { extractWeightsFactory, ParamMapping } from '../common/index';\nimport { extractorsFactory } from './extractorsFactory';\nimport { FaceFeatureExtractorParams } from './types';\n\nexport function extractParams(weights: Float32Array): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const {\n extractDenseBlock4Params,\n } = extractorsFactory(extractWeights, paramMappings);\n\n const dense0 = extractDenseBlock4Params(3, 32, 'dense0', true);\n const dense1 = extractDenseBlock4Params(32, 64, 'dense1');\n const dense2 = extractDenseBlock4Params(64, 128, 'dense2');\n const dense3 = extractDenseBlock4Params(128, 256, 'dense3');\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n paramMappings,\n params: {\n dense0, dense1, dense2, dense3,\n },\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams } from './types';\n\n// eslint-disable-next-line no-unused-vars\nexport function loadConvParamsFactory(extractWeightEntry: (originalPath: string, paramRank: number) => T) {\n return (prefix: string): ConvParams => {\n const filters = extractWeightEntry(`${prefix}/filters`, 4);\n const bias = extractWeightEntry(`${prefix}/bias`, 1);\n\n return { filters, bias };\n };\n}\n", "import { extractWeightEntryFactory, loadSeparableConvParamsFactory, ParamMapping } from '../common/index';\nimport { loadConvParamsFactory } from '../common/loadConvParamsFactory';\nimport { DenseBlock3Params, DenseBlock4Params } from './types';\n\nexport function loadParamsFactory(weightMap: any, paramMappings: ParamMapping[]) {\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n const extractConvParams = loadConvParamsFactory(extractWeightEntry);\n const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry);\n\n function extractDenseBlock3Params(prefix: string, isFirstLayer: boolean = false): DenseBlock3Params {\n const conv0 = isFirstLayer\n ? extractConvParams(`${prefix}/conv0`)\n : extractSeparableConvParams(`${prefix}/conv0`);\n const conv1 = extractSeparableConvParams(`${prefix}/conv1`);\n const conv2 = extractSeparableConvParams(`${prefix}/conv2`);\n\n return { conv0, conv1, conv2 };\n }\n\n function extractDenseBlock4Params(prefix: string, isFirstLayer: boolean = false): DenseBlock4Params {\n const conv0 = isFirstLayer\n ? extractConvParams(`${prefix}/conv0`)\n : extractSeparableConvParams(`${prefix}/conv0`);\n const conv1 = extractSeparableConvParams(`${prefix}/conv1`);\n const conv2 = extractSeparableConvParams(`${prefix}/conv2`);\n const conv3 = extractSeparableConvParams(`${prefix}/conv3`);\n\n return {\n conv0, conv1, conv2, conv3,\n };\n }\n\n return {\n extractDenseBlock3Params,\n extractDenseBlock4Params,\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, ParamMapping } from '../common/index';\nimport { loadParamsFactory } from './loadParamsFactory';\nimport { FaceFeatureExtractorParams } from './types';\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractDenseBlock4Params,\n } = loadParamsFactory(weightMap, paramMappings);\n\n const params = {\n dense0: extractDenseBlock4Params('dense0', true),\n dense1: extractDenseBlock4Params('dense1'),\n dense2: extractDenseBlock4Params('dense2'),\n dense3: extractDenseBlock4Params('dense3'),\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { normalize } from '../ops/index';\nimport { denseBlock4 } from './denseBlock';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { FaceFeatureExtractorParams, IFaceFeatureExtractor } from './types';\n\nexport class FaceFeatureExtractor extends NeuralNetwork implements IFaceFeatureExtractor {\n constructor() {\n super('FaceFeatureExtractor');\n }\n\n public forwardInput(input: NetInput): tf.Tensor4D {\n const { params } = this;\n\n if (!params) {\n throw new Error('FaceFeatureExtractor - load model before inference');\n }\n\n return tf.tidy(() => {\n const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32');\n const meanRgb = [122.782, 117.001, 104.298];\n const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;\n\n let out = denseBlock4(normalized, params.dense0, true);\n out = denseBlock4(out, params.dense1);\n out = denseBlock4(out, params.dense2);\n out = denseBlock4(out, params.dense3);\n out = tf.avgPool(out, [7, 7], [2, 2], 'valid');\n\n return out;\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n protected getDefaultModelName(): string {\n return 'face_feature_extractor_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMap(weightMap);\n }\n\n protected extractParams(weights: Float32Array) {\n return extractParams(weights);\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { FCParams } from './types';\n\nexport function fullyConnectedLayer(\n x: tf.Tensor2D,\n params: FCParams,\n): tf.Tensor2D {\n return tf.tidy(() => tf.add(\n tf.matMul(x, params.weights),\n params.bias,\n ));\n}\n", "import { extractFCParamsFactory, extractWeightsFactory, ParamMapping } from '../common/index';\nimport { NetParams } from './types';\n\nexport function extractParams(weights: Float32Array, channelsIn: number, channelsOut: number): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const extractFCParams = extractFCParamsFactory(extractWeights, paramMappings);\n\n const fc = extractFCParams(channelsIn, channelsOut, 'fc');\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n paramMappings,\n params: { fc },\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, extractWeightEntryFactory, FCParams, ParamMapping } from '../common/index';\nimport { NetParams } from './types';\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n function extractFcParams(prefix: string): FCParams {\n const weights = extractWeightEntry(`${prefix}/weights`, 2);\n const bias = extractWeightEntry(`${prefix}/bias`, 1);\n return { weights, bias };\n }\n\n const params = {\n fc: extractFcParams('fc'),\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nexport function seperateWeightMaps(weightMap: tf.NamedTensorMap) {\n const featureExtractorMap: tf.NamedTensorMap = {};\n const classifierMap: tf.NamedTensorMap = {};\n\n Object.keys(weightMap).forEach((key) => {\n const map = key.startsWith('fc') ? classifierMap : featureExtractorMap;\n map[key] = weightMap[key];\n });\n\n return { featureExtractorMap, classifierMap };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { fullyConnectedLayer } from '../common/fullyConnectedLayer';\nimport { NetInput } from '../dom/index';\nimport { FaceFeatureExtractorParams, IFaceFeatureExtractor, TinyFaceFeatureExtractorParams } from '../faceFeatureExtractor/types';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { NetParams } from './types';\nimport { seperateWeightMaps } from './util';\n\nexport abstract class FaceProcessor<\n TExtractorParams extends FaceFeatureExtractorParams | TinyFaceFeatureExtractorParams\n>\n extends NeuralNetwork {\n protected _faceFeatureExtractor: IFaceFeatureExtractor\n\n constructor(_name: string, faceFeatureExtractor: IFaceFeatureExtractor) {\n super(_name);\n this._faceFeatureExtractor = faceFeatureExtractor;\n }\n\n public get faceFeatureExtractor(): IFaceFeatureExtractor {\n return this._faceFeatureExtractor;\n }\n\n protected abstract getDefaultModelName(): string\n\n protected abstract getClassifierChannelsIn(): number\n\n protected abstract getClassifierChannelsOut(): number\n\n public runNet(input: NetInput | tf.Tensor4D): tf.Tensor2D {\n const { params } = this;\n\n if (!params) {\n throw new Error(`${this._name} - load model before inference`);\n }\n\n return tf.tidy(() => {\n const bottleneckFeatures = input instanceof NetInput\n ? this.faceFeatureExtractor.forwardInput(input)\n : input;\n return fullyConnectedLayer(bottleneckFeatures.as2D(bottleneckFeatures.shape[0], -1), params.fc);\n });\n }\n\n public dispose(throwOnRedispose: boolean = true) {\n this.faceFeatureExtractor.dispose(throwOnRedispose);\n super.dispose(throwOnRedispose);\n }\n\n public loadClassifierParams(weights: Float32Array) {\n const { params, paramMappings } = this.extractClassifierParams(weights);\n this._params = params;\n this._paramMappings = paramMappings;\n }\n\n public extractClassifierParams(weights: Float32Array) {\n return extractParams(weights, this.getClassifierChannelsIn(), this.getClassifierChannelsOut());\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n const { featureExtractorMap, classifierMap } = seperateWeightMaps(weightMap);\n\n this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap);\n\n return extractParamsFromWeightMap(classifierMap);\n }\n\n protected extractParams(weights: Float32Array) {\n const cIn = this.getClassifierChannelsIn();\n const cOut = this.getClassifierChannelsOut();\n const classifierWeightSize = (cOut * cIn) + cOut;\n\n const featureExtractorWeights = weights.slice(0, weights.length - classifierWeightSize);\n const classifierWeights = weights.slice(weights.length - classifierWeightSize);\n\n this.faceFeatureExtractor.extractWeights(featureExtractorWeights);\n return this.extractClassifierParams(classifierWeights);\n }\n}\n", "export const FACE_EXPRESSION_LABELS = ['neutral', 'happy', 'sad', 'angry', 'fearful', 'disgusted', 'surprised'];\n\nexport class FaceExpressions {\n public neutral: number\n\n public happy: number\n\n public sad: number\n\n public angry: number\n\n public fearful: number\n\n public disgusted: number\n\n public surprised: number\n\n constructor(probabilities: number[] | Float32Array) {\n if (probabilities.length !== 7) {\n throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${probabilities.length}`);\n }\n\n FACE_EXPRESSION_LABELS.forEach((expression, idx) => {\n this[expression] = probabilities[idx];\n });\n }\n\n asSortedArray() {\n return FACE_EXPRESSION_LABELS\n .map((expression) => ({ expression, probability: this[expression] as number }))\n .sort((e0, e1) => e1.probability - e0.probability);\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor';\nimport { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types';\nimport { FaceProcessor } from '../faceProcessor/FaceProcessor';\nimport { FaceExpressions } from './FaceExpressions';\n\nexport class FaceExpressionNet extends FaceProcessor {\n constructor(faceFeatureExtractor: FaceFeatureExtractor = new FaceFeatureExtractor()) {\n super('FaceExpressionNet', faceFeatureExtractor);\n }\n\n public forwardInput(input: NetInput | tf.Tensor4D): tf.Tensor2D {\n return tf.tidy(() => tf.softmax(this.runNet(input)));\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n public async predictExpressions(input: TNetInput) {\n const netInput = await toNetInput(input);\n const out = await this.forwardInput(netInput);\n const probabilitesByBatch = await Promise.all(tf.unstack(out).map(async (t) => {\n const data = t.dataSync();\n t.dispose();\n return data;\n }));\n out.dispose();\n\n const predictionsByBatch = probabilitesByBatch\n .map((probabilites) => new FaceExpressions(probabilites as Float32Array));\n\n return netInput.isBatchInput\n ? predictionsByBatch\n : predictionsByBatch[0];\n }\n\n protected getDefaultModelName(): string {\n return 'face_expression_model';\n }\n\n protected getClassifierChannelsIn(): number {\n return 256;\n }\n\n protected getClassifierChannelsOut(): number {\n return 7;\n }\n}\n", "import { FaceExpressions } from '../faceExpressionNet/FaceExpressions';\n\nexport type WithFaceExpressions = TSource & { expressions: FaceExpressions }\n\nexport function isWithFaceExpressions(obj: any): obj is WithFaceExpressions<{}> {\n return obj.expressions instanceof FaceExpressions;\n}\n\nexport function extendWithFaceExpressions(sourceObj: TSource, expressions: FaceExpressions): WithFaceExpressions {\n const extension = { expressions };\n return { ...sourceObj, ...extension };\n}\n", "import { IPoint, Point } from '../classes/index';\nimport { FaceExpressions } from '../faceExpressionNet/index';\nimport { isWithFaceDetection } from '../factories/WithFaceDetection';\nimport { isWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';\nimport { round } from '../utils/index';\nimport { DrawTextField } from './DrawTextField';\n\nexport type DrawFaceExpressionsInput = FaceExpressions | WithFaceExpressions<{}>\n\nexport function drawFaceExpressions(\n canvasArg: string | HTMLCanvasElement,\n faceExpressions: DrawFaceExpressionsInput | Array,\n minConfidence = 0.1,\n textFieldAnchor?: IPoint,\n) {\n const faceExpressionsArray = Array.isArray(faceExpressions) ? faceExpressions : [faceExpressions];\n\n faceExpressionsArray.forEach((e) => {\n // eslint-disable-next-line no-nested-ternary\n const expr = e instanceof FaceExpressions\n ? e\n : (isWithFaceExpressions(e) ? e.expressions : undefined);\n if (!expr) {\n throw new Error('drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof');\n }\n\n const sorted = expr.asSortedArray();\n const resultsToDisplay = sorted.filter((exprLocal) => exprLocal.probability > minConfidence);\n\n const anchor = isWithFaceDetection(e)\n ? e.detection.box.bottomLeft\n : (textFieldAnchor || new Point(0, 0));\n\n const drawTextField = new DrawTextField(\n resultsToDisplay.map((exprLocal) => `${exprLocal.expression} (${round(exprLocal.probability)})`),\n anchor,\n );\n drawTextField.draw(canvasArg);\n });\n}\n", "import { FaceDetection } from '../classes/FaceDetection';\nimport { FaceLandmarks } from '../classes/FaceLandmarks';\nimport { FaceLandmarks68 } from '../classes/FaceLandmarks68';\nimport { isWithFaceDetection, WithFaceDetection } from './WithFaceDetection';\n\nexport type WithFaceLandmarks<\n TSource extends WithFaceDetection<{}>,\n TFaceLandmarks extends FaceLandmarks = FaceLandmarks68 > = TSource & {\n landmarks: TFaceLandmarks,\n unshiftedLandmarks: TFaceLandmarks,\n alignedRect: FaceDetection,\n angle: { roll: number | undefined, pitch: number | undefined, yaw: number | undefined },\n }\n\nexport function isWithFaceLandmarks(obj: any): obj is WithFaceLandmarks, FaceLandmarks> {\n return isWithFaceDetection(obj)\n // eslint-disable-next-line dot-notation\n && obj['landmarks'] instanceof FaceLandmarks\n // eslint-disable-next-line dot-notation\n && obj['unshiftedLandmarks'] instanceof FaceLandmarks\n // eslint-disable-next-line dot-notation\n && obj['alignedRect'] instanceof FaceDetection;\n}\n\nfunction calculateFaceAngle(mesh) {\n // returns the angle in the plane (in radians) between the positive x-axis and the ray from (0,0) to the point (x,y)\n const radians = (a1, a2, b1, b2) => (Math.atan2(b2 - a2, b1 - a1) % Math.PI);\n // convert radians to degrees\n // eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars\n const degrees = (theta) => (theta * 180) / Math.PI;\n\n const angle = { roll: undefined, pitch: undefined, yaw: undefined };\n\n if (!mesh || !mesh._positions || mesh._positions.length !== 68) return angle;\n const pt = mesh._positions;\n\n // values are in radians in range of -pi/2 to pi/2 which is -90 to +90 degrees\n // value of 0 means center\n\n // roll is face lean from left to right\n // comparing x,y of outside corners of leftEye and rightEye\n angle.roll = -radians(pt[36]._x, pt[36]._y, pt[45]._x, pt[45]._y);\n\n // pitch is face turn from left right\n // comparing x distance of top of nose to left and right edge of face\n // precision is lacking since coordinates are not precise enough\n angle.pitch = radians(0, Math.abs(pt[0]._x - pt[30]._x) / pt[30]._x, Math.PI, Math.abs(pt[16]._x - pt[30]._x) / pt[30]._x);\n\n // yaw is face move from up to down\n // comparing size of the box around the face with top and bottom of detected landmarks\n // silly hack, but this gives us face compression on y-axis\n // e.g., tilting head up hides the forehead that doesn't have any landmarks so ratio drops\n const bottom = pt.reduce((prev, cur) => (prev < cur._y ? prev : cur._y), +Infinity);\n const top = pt.reduce((prev, cur) => (prev > cur._y ? prev : cur._y), -Infinity);\n angle.yaw = Math.PI * (mesh._imgDims._height / (top - bottom) / 1.40 - 1);\n\n return angle;\n}\n\nexport function extendWithFaceLandmarks<\n TSource extends WithFaceDetection<{}>,\n TFaceLandmarks extends FaceLandmarks = FaceLandmarks68 >(sourceObj: TSource, unshiftedLandmarks: TFaceLandmarks): WithFaceLandmarks {\n const { box: shift } = sourceObj.detection;\n const landmarks = unshiftedLandmarks.shiftBy(shift.x, shift.y);\n\n const rect = landmarks.align();\n const { imageDims } = sourceObj.detection;\n const alignedRect = new FaceDetection(sourceObj.detection.score, rect.rescale(imageDims.reverse()), imageDims);\n const angle = calculateFaceAngle(unshiftedLandmarks);\n\n const extension = {\n landmarks,\n unshiftedLandmarks,\n alignedRect,\n angle,\n };\n\n return { ...sourceObj, ...extension };\n}\n", "/* eslint-disable max-classes-per-file */\nimport { IPoint } from '../classes/index';\nimport { FaceLandmarks } from '../classes/FaceLandmarks';\nimport { FaceLandmarks68 } from '../classes/FaceLandmarks68';\nimport { getContext2dOrThrow } from '../dom/getContext2dOrThrow';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { isWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';\nimport { drawContour } from './drawContour';\n\nexport interface IDrawFaceLandmarksOptions {\n drawLines?: boolean\n drawPoints?: boolean\n lineWidth?: number\n pointSize?: number\n lineColor?: string\n pointColor?: string\n}\n\nexport class DrawFaceLandmarksOptions {\n public drawLines: boolean\n\n public drawPoints: boolean\n\n public lineWidth: number\n\n public pointSize: number\n\n public lineColor: string\n\n public pointColor: string\n\n constructor(options: IDrawFaceLandmarksOptions = {}) {\n const {\n drawLines = true, drawPoints = true, lineWidth, lineColor, pointSize, pointColor,\n } = options;\n this.drawLines = drawLines;\n this.drawPoints = drawPoints;\n this.lineWidth = lineWidth || 1;\n this.pointSize = pointSize || 2;\n this.lineColor = lineColor || 'rgba(0, 255, 255, 1)';\n this.pointColor = pointColor || 'rgba(255, 0, 255, 1)';\n }\n}\n\nexport class DrawFaceLandmarks {\n public faceLandmarks: FaceLandmarks\n\n public options: DrawFaceLandmarksOptions\n\n constructor(\n faceLandmarks: FaceLandmarks,\n options: IDrawFaceLandmarksOptions = {},\n ) {\n this.faceLandmarks = faceLandmarks;\n this.options = new DrawFaceLandmarksOptions(options);\n }\n\n draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D) {\n const ctx = getContext2dOrThrow(canvasArg);\n\n const {\n drawLines, drawPoints, lineWidth, lineColor, pointSize, pointColor,\n } = this.options;\n\n if (drawLines && this.faceLandmarks instanceof FaceLandmarks68) {\n ctx.strokeStyle = lineColor;\n ctx.lineWidth = lineWidth;\n drawContour(ctx, this.faceLandmarks.getJawOutline());\n drawContour(ctx, this.faceLandmarks.getLeftEyeBrow());\n drawContour(ctx, this.faceLandmarks.getRightEyeBrow());\n drawContour(ctx, this.faceLandmarks.getNose());\n drawContour(ctx, this.faceLandmarks.getLeftEye(), true);\n drawContour(ctx, this.faceLandmarks.getRightEye(), true);\n drawContour(ctx, this.faceLandmarks.getMouth(), true);\n }\n\n if (drawPoints) {\n ctx.strokeStyle = pointColor;\n ctx.fillStyle = pointColor;\n\n const drawPoint = (pt: IPoint) => {\n ctx.beginPath();\n ctx.arc(pt.x, pt.y, pointSize, 0, 2 * Math.PI);\n ctx.fill();\n };\n this.faceLandmarks.positions.forEach(drawPoint);\n }\n }\n}\n\nexport type DrawFaceLandmarksInput = FaceLandmarks | WithFaceLandmarks>\n\nexport function drawFaceLandmarks(\n canvasArg: string | HTMLCanvasElement,\n faceLandmarks: DrawFaceLandmarksInput | Array,\n) {\n const faceLandmarksArray = Array.isArray(faceLandmarks) ? faceLandmarks : [faceLandmarks];\n faceLandmarksArray.forEach((f) => {\n // eslint-disable-next-line no-nested-ternary\n const landmarks = f instanceof FaceLandmarks\n ? f\n : (isWithFaceLandmarks(f) ? f.landmarks : undefined);\n if (!landmarks) {\n throw new Error('drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks> or array thereof');\n }\n\n new DrawFaceLandmarks(landmarks).draw(canvasArg);\n });\n}\n", "import { extractConvParamsFactory, extractSeparableConvParamsFactory, extractWeightsFactory } from '../common/index';\nimport { ExtractWeightsFunction, ParamMapping } from '../common/types';\nimport { range } from '../utils/index';\nimport { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';\n\nfunction extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {\n const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings);\n const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings);\n\n function extractReductionBlockParams(channelsIn: number, channelsOut: number, mappedPrefix: string): ReductionBlockParams {\n const separable_conv0 = extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/separable_conv0`);\n const separable_conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/separable_conv1`);\n const expansion_conv = extractConvParams(channelsIn, channelsOut, 1, `${mappedPrefix}/expansion_conv`);\n\n return { separable_conv0, separable_conv1, expansion_conv };\n }\n\n function extractMainBlockParams(channels: number, mappedPrefix: string): MainBlockParams {\n const separable_conv0 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv0`);\n const separable_conv1 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv1`);\n const separable_conv2 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv2`);\n\n return { separable_conv0, separable_conv1, separable_conv2 };\n }\n\n return {\n extractConvParams,\n extractSeparableConvParams,\n extractReductionBlockParams,\n extractMainBlockParams,\n };\n}\n\nexport function extractParams(weights: Float32Array, numMainBlocks: number): { params: TinyXceptionParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const {\n extractConvParams,\n extractSeparableConvParams,\n extractReductionBlockParams,\n extractMainBlockParams,\n } = extractorsFactory(extractWeights, paramMappings);\n\n const entry_flow_conv_in = extractConvParams(3, 32, 3, 'entry_flow/conv_in');\n const entry_flow_reduction_block_0 = extractReductionBlockParams(32, 64, 'entry_flow/reduction_block_0');\n const entry_flow_reduction_block_1 = extractReductionBlockParams(64, 128, 'entry_flow/reduction_block_1');\n\n const entry_flow = {\n conv_in: entry_flow_conv_in,\n reduction_block_0: entry_flow_reduction_block_0,\n reduction_block_1: entry_flow_reduction_block_1,\n };\n\n const middle_flow = {};\n range(numMainBlocks, 0, 1).forEach((idx) => {\n middle_flow[`main_block_${idx}`] = extractMainBlockParams(128, `middle_flow/main_block_${idx}`);\n });\n\n const exit_flow_reduction_block = extractReductionBlockParams(128, 256, 'exit_flow/reduction_block');\n const exit_flow_separable_conv = extractSeparableConvParams(256, 512, 'exit_flow/separable_conv');\n\n const exit_flow = {\n reduction_block: exit_flow_reduction_block,\n separable_conv: exit_flow_separable_conv,\n };\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n paramMappings,\n params: { entry_flow, middle_flow, exit_flow },\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, extractWeightEntryFactory, loadSeparableConvParamsFactory, ParamMapping } from '../common/index';\nimport { loadConvParamsFactory } from '../common/loadConvParamsFactory';\nimport { range } from '../utils/index';\nimport { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';\n\nfunction loadParamsFactory(weightMap: any, paramMappings: ParamMapping[]) {\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n const extractConvParams = loadConvParamsFactory(extractWeightEntry);\n const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry);\n\n function extractReductionBlockParams(mappedPrefix: string): ReductionBlockParams {\n const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`);\n const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`);\n const expansion_conv = extractConvParams(`${mappedPrefix}/expansion_conv`);\n\n return { separable_conv0, separable_conv1, expansion_conv };\n }\n\n function extractMainBlockParams(mappedPrefix: string): MainBlockParams {\n const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`);\n const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`);\n const separable_conv2 = extractSeparableConvParams(`${mappedPrefix}/separable_conv2`);\n\n return { separable_conv0, separable_conv1, separable_conv2 };\n }\n\n return {\n extractConvParams,\n extractSeparableConvParams,\n extractReductionBlockParams,\n extractMainBlockParams,\n };\n}\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n numMainBlocks: number,\n): { params: TinyXceptionParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractConvParams,\n extractSeparableConvParams,\n extractReductionBlockParams,\n extractMainBlockParams,\n } = loadParamsFactory(weightMap, paramMappings);\n\n const entry_flow_conv_in = extractConvParams('entry_flow/conv_in');\n const entry_flow_reduction_block_0 = extractReductionBlockParams('entry_flow/reduction_block_0');\n const entry_flow_reduction_block_1 = extractReductionBlockParams('entry_flow/reduction_block_1');\n\n const entry_flow = {\n conv_in: entry_flow_conv_in,\n reduction_block_0: entry_flow_reduction_block_0,\n reduction_block_1: entry_flow_reduction_block_1,\n };\n\n const middle_flow = {};\n range(numMainBlocks, 0, 1).forEach((idx) => {\n middle_flow[`main_block_${idx}`] = extractMainBlockParams(`middle_flow/main_block_${idx}`);\n });\n\n const exit_flow_reduction_block = extractReductionBlockParams('exit_flow/reduction_block');\n const exit_flow_separable_conv = extractSeparableConvParams('exit_flow/separable_conv');\n\n const exit_flow = {\n reduction_block: exit_flow_reduction_block,\n separable_conv: exit_flow_separable_conv,\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params: { entry_flow, middle_flow, exit_flow }, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams, depthwiseSeparableConv } from '../common/index';\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { normalize } from '../ops/index';\nimport { range } from '../utils/index';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';\n\nfunction conv(x: tf.Tensor4D, params: ConvParams, stride: [number, number]): tf.Tensor4D {\n return tf.add(tf.conv2d(x, params.filters, stride, 'same'), params.bias);\n}\n\nfunction reductionBlock(x: tf.Tensor4D, params: ReductionBlockParams, isActivateInput: boolean = true): tf.Tensor4D {\n let out = isActivateInput ? tf.relu(x) : x;\n out = depthwiseSeparableConv(out, params.separable_conv0, [1, 1]);\n out = depthwiseSeparableConv(tf.relu(out), params.separable_conv1, [1, 1]);\n out = tf.maxPool(out, [3, 3], [2, 2], 'same');\n out = tf.add(out, conv(x, params.expansion_conv, [2, 2]));\n return out;\n}\n\nfunction mainBlock(x: tf.Tensor4D, params: MainBlockParams): tf.Tensor4D {\n let out = depthwiseSeparableConv(tf.relu(x), params.separable_conv0, [1, 1]);\n out = depthwiseSeparableConv(tf.relu(out), params.separable_conv1, [1, 1]);\n out = depthwiseSeparableConv(tf.relu(out), params.separable_conv2, [1, 1]);\n out = tf.add(out, x);\n return out;\n}\n\nexport class TinyXception extends NeuralNetwork {\n private _numMainBlocks: number\n\n constructor(numMainBlocks: number) {\n super('TinyXception');\n this._numMainBlocks = numMainBlocks;\n }\n\n public forwardInput(input: NetInput): tf.Tensor4D {\n const { params } = this;\n if (!params) {\n throw new Error('TinyXception - load model before inference');\n }\n return tf.tidy(() => {\n const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32');\n const meanRgb = [122.782, 117.001, 104.298];\n const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;\n let out = tf.relu(conv(normalized, params.entry_flow.conv_in, [2, 2]));\n out = reductionBlock(out, params.entry_flow.reduction_block_0, false);\n out = reductionBlock(out, params.entry_flow.reduction_block_1);\n range(this._numMainBlocks, 0, 1).forEach((idx) => {\n out = mainBlock(out, params.middle_flow[`main_block_${idx}`]);\n });\n out = reductionBlock(out, params.exit_flow.reduction_block);\n out = tf.relu(depthwiseSeparableConv(out, params.exit_flow.separable_conv, [1, 1]));\n return out;\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n protected getDefaultModelName(): string {\n return 'tiny_xception_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMap(weightMap, this._numMainBlocks);\n }\n\n protected extractParams(weights: Float32Array) {\n return extractParams(weights, this._numMainBlocks);\n }\n}\n", "import { extractFCParamsFactory, extractWeightsFactory, ParamMapping } from '../common/index';\nimport { NetParams } from './types';\n\nexport function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const extractFCParams = extractFCParamsFactory(extractWeights, paramMappings);\n\n const age = extractFCParams(512, 1, 'fc/age');\n const gender = extractFCParams(512, 2, 'fc/gender');\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n paramMappings,\n params: { fc: { age, gender } },\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, extractWeightEntryFactory, FCParams, ParamMapping } from '../common/index';\nimport { NetParams } from './types';\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n function extractFcParams(prefix: string): FCParams {\n const weights = extractWeightEntry(`${prefix}/weights`, 2);\n const bias = extractWeightEntry(`${prefix}/bias`, 1);\n return { weights, bias };\n }\n\n const params = {\n fc: {\n age: extractFcParams('fc/age'),\n gender: extractFcParams('fc/gender'),\n },\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { FCParams } from '../common/index';\n\n// eslint-disable-next-line no-shadow\nexport enum Gender {\n // eslint-disable-next-line no-unused-vars\n FEMALE = 'female',\n // eslint-disable-next-line no-unused-vars\n MALE = 'male'\n}\n\nexport type AgeAndGenderPrediction = {\n age: number\n gender: Gender\n genderProbability: number\n}\n\nexport type NetOutput = { age: tf.Tensor1D, gender: tf.Tensor2D }\n\nexport type NetParams = {\n fc: {\n age: FCParams\n gender: FCParams\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { fullyConnectedLayer } from '../common/fullyConnectedLayer';\nimport { seperateWeightMaps } from '../faceProcessor/util';\nimport { TinyXception } from '../xception/TinyXception';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { AgeAndGenderPrediction, Gender, NetOutput, NetParams } from './types';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\n\nexport class AgeGenderNet extends NeuralNetwork {\n private _faceFeatureExtractor: TinyXception\n\n constructor(faceFeatureExtractor: TinyXception = new TinyXception(2)) {\n super('AgeGenderNet');\n this._faceFeatureExtractor = faceFeatureExtractor;\n }\n\n public get faceFeatureExtractor(): TinyXception {\n return this._faceFeatureExtractor;\n }\n\n public runNet(input: NetInput | tf.Tensor4D): NetOutput {\n const { params } = this;\n\n if (!params) {\n throw new Error(`${this._name} - load model before inference`);\n }\n\n return tf.tidy(() => {\n const bottleneckFeatures = input instanceof NetInput\n ? this.faceFeatureExtractor.forwardInput(input)\n : input;\n\n const pooled = tf.avgPool(bottleneckFeatures, [7, 7], [2, 2], 'valid').as2D(bottleneckFeatures.shape[0], -1);\n const age = fullyConnectedLayer(pooled, params.fc.age).as1D();\n const gender = fullyConnectedLayer(pooled, params.fc.gender);\n return { age, gender };\n });\n }\n\n public forwardInput(input: NetInput | tf.Tensor4D): NetOutput {\n return tf.tidy(() => {\n const { age, gender } = this.runNet(input);\n return { age, gender: tf.softmax(gender) };\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n public async predictAgeAndGender(input: TNetInput): Promise {\n const netInput = await toNetInput(input);\n const out = await this.forwardInput(netInput);\n\n const ages = tf.unstack(out.age);\n const genders = tf.unstack(out.gender);\n const ageAndGenderTensors = ages.map((ageTensor, i) => ({\n ageTensor,\n genderTensor: genders[i],\n }));\n\n const predictionsByBatch = await Promise.all(\n ageAndGenderTensors.map(async ({ ageTensor, genderTensor }) => {\n const age = (ageTensor.dataSync())[0];\n const probMale = (genderTensor.dataSync())[0];\n const isMale = probMale > 0.5;\n const gender = isMale ? Gender.MALE : Gender.FEMALE;\n const genderProbability = isMale ? probMale : (1 - probMale);\n\n ageTensor.dispose();\n genderTensor.dispose();\n return { age, gender, genderProbability };\n }),\n );\n out.age.dispose();\n out.gender.dispose();\n\n return netInput.isBatchInput ? predictionsByBatch as AgeAndGenderPrediction[] : predictionsByBatch[0] as AgeAndGenderPrediction;\n }\n\n protected getDefaultModelName(): string {\n return 'age_gender_model';\n }\n\n public dispose(throwOnRedispose: boolean = true) {\n this.faceFeatureExtractor.dispose(throwOnRedispose);\n super.dispose(throwOnRedispose);\n }\n\n public loadClassifierParams(weights: Float32Array) {\n const { params, paramMappings } = this.extractClassifierParams(weights);\n this._params = params;\n this._paramMappings = paramMappings;\n }\n\n public extractClassifierParams(weights: Float32Array) {\n return extractParams(weights);\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n const { featureExtractorMap, classifierMap } = seperateWeightMaps(weightMap);\n\n this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap);\n\n return extractParamsFromWeightMap(classifierMap);\n }\n\n protected extractParams(weights: Float32Array) {\n const classifierWeightSize = (512 * 1 + 1) + (512 * 2 + 2);\n\n const featureExtractorWeights = weights.slice(0, weights.length - classifierWeightSize);\n const classifierWeights = weights.slice(weights.length - classifierWeightSize);\n\n this.faceFeatureExtractor.extractWeights(featureExtractorWeights);\n return this.extractClassifierParams(classifierWeights);\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { IDimensions, Point } from '../classes/index';\nimport { FaceLandmarks68 } from '../classes/FaceLandmarks68';\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { FaceFeatureExtractorParams, TinyFaceFeatureExtractorParams } from '../faceFeatureExtractor/types';\nimport { FaceProcessor } from '../faceProcessor/FaceProcessor';\nimport { isEven } from '../utils/index';\n\nexport abstract class FaceLandmark68NetBase<\n TExtractorParams extends FaceFeatureExtractorParams | TinyFaceFeatureExtractorParams\n>\n extends FaceProcessor {\n public postProcess(output: tf.Tensor2D, inputSize: number, originalDimensions: IDimensions[]): tf.Tensor2D {\n const inputDimensions = originalDimensions.map(({ width, height }) => {\n const scale = inputSize / Math.max(height, width);\n return {\n width: width * scale,\n height: height * scale,\n };\n });\n\n const batchSize = inputDimensions.length;\n\n return tf.tidy(() => {\n const createInterleavedTensor = (fillX: number, fillY: number) => tf.stack([tf.fill([68], fillX, 'float32'), tf.fill([68], fillY, 'float32')], 1).as2D(1, 136).as1D();\n\n // eslint-disable-next-line no-unused-vars\n const getPadding = (batchIdx: number, cond: (w: number, h: number) => boolean): number => {\n const { width, height } = inputDimensions[batchIdx];\n return cond(width, height) ? Math.abs(width - height) / 2 : 0;\n };\n\n const getPaddingX = (batchIdx: number) => getPadding(batchIdx, (w, h) => w < h);\n const getPaddingY = (batchIdx: number) => getPadding(batchIdx, (w, h) => h < w);\n\n const landmarkTensors = output\n .mul(tf.fill([batchSize, 136], inputSize, 'float32'))\n .sub(tf.stack(Array.from(Array(batchSize), (_, batchIdx) => createInterleavedTensor(\n getPaddingX(batchIdx),\n getPaddingY(batchIdx),\n ))))\n .div(tf.stack(Array.from(Array(batchSize), (_, batchIdx) => createInterleavedTensor(\n inputDimensions[batchIdx].width,\n inputDimensions[batchIdx].height,\n ))));\n\n return landmarkTensors as tf.Tensor2D;\n });\n }\n\n public forwardInput(input: NetInput): tf.Tensor2D {\n return tf.tidy(() => {\n const out = this.runNet(input);\n return this.postProcess(\n out,\n input.inputSize as number,\n input.inputDimensions.map(([height, width]) => ({ height, width })),\n );\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n public async detectLandmarks(input: TNetInput): Promise {\n const netInput = await toNetInput(input);\n const landmarkTensors = tf.tidy(\n () => tf.unstack(this.forwardInput(netInput)),\n );\n\n const landmarksForBatch = await Promise.all(landmarkTensors.map(\n async (landmarkTensor, batchIdx) => {\n const landmarksArray = Array.from(landmarkTensor.dataSync());\n const xCoords = landmarksArray.filter((_, i) => isEven(i));\n const yCoords = landmarksArray.filter((_, i) => !isEven(i));\n\n return new FaceLandmarks68(\n Array(68).fill(0).map((_, i) => new Point(xCoords[i] as number, yCoords[i] as number)),\n {\n height: netInput.getInputHeight(batchIdx),\n width: netInput.getInputWidth(batchIdx),\n },\n );\n },\n ));\n\n landmarkTensors.forEach((t) => t.dispose());\n\n return netInput.isBatchInput ? landmarksForBatch as FaceLandmarks68[] : landmarksForBatch[0] as FaceLandmarks68;\n }\n\n protected getClassifierChannelsOut(): number {\n return 136;\n }\n}\n", "import { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor';\nimport { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types';\nimport { FaceLandmark68NetBase } from './FaceLandmark68NetBase';\n\nexport class FaceLandmark68Net extends FaceLandmark68NetBase {\n constructor(faceFeatureExtractor: FaceFeatureExtractor = new FaceFeatureExtractor()) {\n super('FaceLandmark68Net', faceFeatureExtractor);\n }\n\n protected getDefaultModelName(): string {\n return 'face_landmark_68_model';\n }\n\n protected getClassifierChannelsIn(): number {\n return 256;\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, ParamMapping } from '../common/index';\nimport { loadParamsFactory } from './loadParamsFactory';\nimport { TinyFaceFeatureExtractorParams } from './types';\n\nexport function extractParamsFromWeightMapTiny(\n weightMap: tf.NamedTensorMap,\n): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractDenseBlock3Params,\n } = loadParamsFactory(weightMap, paramMappings);\n\n const params = {\n dense0: extractDenseBlock3Params('dense0', true),\n dense1: extractDenseBlock3Params('dense1'),\n dense2: extractDenseBlock3Params('dense2'),\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params, paramMappings };\n}\n", "import { extractWeightsFactory, ParamMapping } from '../common/index';\nimport { extractorsFactory } from './extractorsFactory';\nimport { TinyFaceFeatureExtractorParams } from './types';\n\nexport function extractParamsTiny(weights: Float32Array): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const {\n extractDenseBlock3Params,\n } = extractorsFactory(extractWeights, paramMappings);\n\n const dense0 = extractDenseBlock3Params(3, 32, 'dense0', true);\n const dense1 = extractDenseBlock3Params(32, 64, 'dense1');\n const dense2 = extractDenseBlock3Params(64, 128, 'dense2');\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n paramMappings,\n params: { dense0, dense1, dense2 },\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { normalize } from '../ops/index';\nimport { denseBlock3 } from './denseBlock';\nimport { extractParamsFromWeightMapTiny } from './extractParamsFromWeightMapTiny';\nimport { extractParamsTiny } from './extractParamsTiny';\nimport { IFaceFeatureExtractor, TinyFaceFeatureExtractorParams } from './types';\n\nexport class TinyFaceFeatureExtractor extends NeuralNetwork implements IFaceFeatureExtractor {\n constructor() {\n super('TinyFaceFeatureExtractor');\n }\n\n public forwardInput(input: NetInput): tf.Tensor4D {\n const { params } = this;\n\n if (!params) {\n throw new Error('TinyFaceFeatureExtractor - load model before inference');\n }\n\n return tf.tidy(() => {\n const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32');\n const meanRgb = [122.782, 117.001, 104.298];\n const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;\n\n let out = denseBlock3(normalized, params.dense0, true);\n out = denseBlock3(out, params.dense1);\n out = denseBlock3(out, params.dense2);\n out = tf.avgPool(out, [14, 14], [2, 2], 'valid');\n\n return out;\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n protected getDefaultModelName(): string {\n return 'face_feature_extractor_tiny_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMapTiny(weightMap);\n }\n\n protected extractParams(weights: Float32Array) {\n return extractParamsTiny(weights);\n }\n}\n", "import { TinyFaceFeatureExtractor } from '../faceFeatureExtractor/TinyFaceFeatureExtractor';\nimport { TinyFaceFeatureExtractorParams } from '../faceFeatureExtractor/types';\nimport { FaceLandmark68NetBase } from './FaceLandmark68NetBase';\n\nexport class FaceLandmark68TinyNet extends FaceLandmark68NetBase {\n constructor(faceFeatureExtractor: TinyFaceFeatureExtractor = new TinyFaceFeatureExtractor()) {\n super('FaceLandmark68TinyNet', faceFeatureExtractor);\n }\n\n protected getDefaultModelName(): string {\n return 'face_landmark_68_tiny_model';\n }\n\n protected getClassifierChannelsIn(): number {\n return 128;\n }\n}\n", "import { FaceLandmark68Net } from './FaceLandmark68Net';\n\nexport * from './FaceLandmark68Net';\nexport * from './FaceLandmark68TinyNet';\nexport class FaceLandmarkNet extends FaceLandmark68Net {}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ScaleLayerParams } from './types';\n\nexport function scale(x: tf.Tensor4D, params: ScaleLayerParams): tf.Tensor4D {\n return tf.add(tf.mul(x, params.weights), params.biases);\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { scale } from './scaleLayer';\nimport { ConvLayerParams } from './types';\n\nfunction convLayer(\n x: tf.Tensor4D,\n params: ConvLayerParams,\n strides: [number, number],\n withRelu: boolean,\n padding: 'valid' | 'same' = 'same',\n): tf.Tensor4D {\n const { filters, bias } = params.conv;\n\n let out = tf.conv2d(x, filters, strides, padding);\n out = tf.add(out, bias);\n out = scale(out, params.scale);\n return withRelu ? tf.relu(out) : out;\n}\n\nexport function conv(x: tf.Tensor4D, params: ConvLayerParams) {\n return convLayer(x, params, [1, 1], true);\n}\n\nexport function convNoRelu(x: tf.Tensor4D, params: ConvLayerParams) {\n return convLayer(x, params, [1, 1], false);\n}\n\nexport function convDown(x: tf.Tensor4D, params: ConvLayerParams) {\n return convLayer(x, params, [2, 2], true, 'valid');\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams, extractWeightsFactory, ExtractWeightsFunction, ParamMapping } from '../common/index';\nimport { isFloat } from '../utils/index';\nimport { ConvLayerParams, NetParams, ResidualLayerParams, ScaleLayerParams } from './types';\n\nfunction extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {\n function extractFilterValues(numFilterValues: number, numFilters: number, filterSize: number): tf.Tensor4D {\n const weights = extractWeights(numFilterValues);\n const depth = weights.length / (numFilters * filterSize * filterSize);\n\n if (isFloat(depth)) {\n throw new Error(`depth has to be an integer: ${depth}, weights.length: ${weights.length}, numFilters: ${numFilters}, filterSize: ${filterSize}`);\n }\n\n return tf.tidy(\n () => tf.transpose(\n tf.tensor4d(weights, [numFilters, depth, filterSize, filterSize]),\n [2, 3, 1, 0],\n ),\n );\n }\n\n function extractConvParams(\n numFilterValues: number,\n numFilters: number,\n filterSize: number,\n mappedPrefix: string,\n ): ConvParams {\n const filters = extractFilterValues(numFilterValues, numFilters, filterSize);\n const bias = tf.tensor1d(extractWeights(numFilters));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/filters` },\n { paramPath: `${mappedPrefix}/bias` },\n );\n\n return { filters, bias };\n }\n\n function extractScaleLayerParams(numWeights: number, mappedPrefix: string): ScaleLayerParams {\n const weights = tf.tensor1d(extractWeights(numWeights));\n const biases = tf.tensor1d(extractWeights(numWeights));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/weights` },\n { paramPath: `${mappedPrefix}/biases` },\n );\n\n return {\n weights,\n biases,\n };\n }\n\n function extractConvLayerParams(\n numFilterValues: number,\n numFilters: number,\n filterSize: number,\n mappedPrefix: string,\n ): ConvLayerParams {\n const conv = extractConvParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv`);\n const scale = extractScaleLayerParams(numFilters, `${mappedPrefix}/scale`);\n\n return { conv, scale };\n }\n\n function extractResidualLayerParams(\n numFilterValues: number,\n numFilters: number,\n filterSize: number,\n mappedPrefix: string,\n isDown: boolean = false,\n ): ResidualLayerParams {\n const conv1 = extractConvLayerParams((isDown ? 0.5 : 1) * numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv1`);\n const conv2 = extractConvLayerParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv2`);\n\n return { conv1, conv2 };\n }\n\n return {\n extractConvLayerParams,\n extractResidualLayerParams,\n };\n}\n\nexport function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractConvLayerParams,\n extractResidualLayerParams,\n } = extractorsFactory(extractWeights, paramMappings);\n\n const conv32_down = extractConvLayerParams(4704, 32, 7, 'conv32_down');\n const conv32_1 = extractResidualLayerParams(9216, 32, 3, 'conv32_1');\n const conv32_2 = extractResidualLayerParams(9216, 32, 3, 'conv32_2');\n const conv32_3 = extractResidualLayerParams(9216, 32, 3, 'conv32_3');\n\n const conv64_down = extractResidualLayerParams(36864, 64, 3, 'conv64_down', true);\n const conv64_1 = extractResidualLayerParams(36864, 64, 3, 'conv64_1');\n const conv64_2 = extractResidualLayerParams(36864, 64, 3, 'conv64_2');\n const conv64_3 = extractResidualLayerParams(36864, 64, 3, 'conv64_3');\n\n const conv128_down = extractResidualLayerParams(147456, 128, 3, 'conv128_down', true);\n const conv128_1 = extractResidualLayerParams(147456, 128, 3, 'conv128_1');\n const conv128_2 = extractResidualLayerParams(147456, 128, 3, 'conv128_2');\n\n const conv256_down = extractResidualLayerParams(589824, 256, 3, 'conv256_down', true);\n const conv256_1 = extractResidualLayerParams(589824, 256, 3, 'conv256_1');\n const conv256_2 = extractResidualLayerParams(589824, 256, 3, 'conv256_2');\n const conv256_down_out = extractResidualLayerParams(589824, 256, 3, 'conv256_down_out');\n\n const fc = tf.tidy(\n () => tf.transpose(tf.tensor2d(extractWeights(256 * 128), [128, 256]), [1, 0]),\n );\n paramMappings.push({ paramPath: 'fc' });\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n const params = {\n conv32_down,\n conv32_1,\n conv32_2,\n conv32_3,\n conv64_down,\n conv64_1,\n conv64_2,\n conv64_3,\n conv128_down,\n conv128_1,\n conv128_2,\n conv256_down,\n conv256_1,\n conv256_2,\n conv256_down_out,\n fc,\n };\n\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, extractWeightEntryFactory, ParamMapping } from '../common/index';\nimport { isTensor2D } from '../utils/index';\nimport { ConvLayerParams, NetParams, ResidualLayerParams, ScaleLayerParams } from './types';\n\nfunction extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n function extractScaleLayerParams(prefix: string): ScaleLayerParams {\n const weights = extractWeightEntry(`${prefix}/scale/weights`, 1);\n const biases = extractWeightEntry(`${prefix}/scale/biases`, 1);\n\n return { weights, biases };\n }\n\n function extractConvLayerParams(prefix: string): ConvLayerParams {\n const filters = extractWeightEntry(`${prefix}/conv/filters`, 4);\n const bias = extractWeightEntry(`${prefix}/conv/bias`, 1);\n const scale = extractScaleLayerParams(prefix);\n\n return { conv: { filters, bias }, scale };\n }\n\n function extractResidualLayerParams(prefix: string): ResidualLayerParams {\n return {\n conv1: extractConvLayerParams(`${prefix}/conv1`),\n conv2: extractConvLayerParams(`${prefix}/conv2`),\n };\n }\n\n return {\n extractConvLayerParams,\n extractResidualLayerParams,\n };\n}\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractConvLayerParams,\n extractResidualLayerParams,\n } = extractorsFactory(weightMap, paramMappings);\n\n const conv32_down = extractConvLayerParams('conv32_down');\n const conv32_1 = extractResidualLayerParams('conv32_1');\n const conv32_2 = extractResidualLayerParams('conv32_2');\n const conv32_3 = extractResidualLayerParams('conv32_3');\n\n const conv64_down = extractResidualLayerParams('conv64_down');\n const conv64_1 = extractResidualLayerParams('conv64_1');\n const conv64_2 = extractResidualLayerParams('conv64_2');\n const conv64_3 = extractResidualLayerParams('conv64_3');\n\n const conv128_down = extractResidualLayerParams('conv128_down');\n const conv128_1 = extractResidualLayerParams('conv128_1');\n const conv128_2 = extractResidualLayerParams('conv128_2');\n\n const conv256_down = extractResidualLayerParams('conv256_down');\n const conv256_1 = extractResidualLayerParams('conv256_1');\n const conv256_2 = extractResidualLayerParams('conv256_2');\n const conv256_down_out = extractResidualLayerParams('conv256_down_out');\n\n const { fc } = weightMap;\n paramMappings.push({ originalPath: 'fc', paramPath: 'fc' });\n\n if (!isTensor2D(fc)) {\n throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${fc}`);\n }\n\n const params = {\n conv32_down,\n conv32_1,\n conv32_2,\n conv32_3,\n conv64_down,\n conv64_1,\n conv64_2,\n conv64_3,\n conv128_down,\n conv128_1,\n conv128_2,\n conv256_down,\n conv256_1,\n conv256_2,\n conv256_down_out,\n fc,\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { conv, convDown, convNoRelu } from './convLayer';\nimport { ResidualLayerParams } from './types';\n\nexport function residual(x: tf.Tensor4D, params: ResidualLayerParams): tf.Tensor4D {\n let out = conv(x, params.conv1);\n out = convNoRelu(out, params.conv2);\n out = tf.add(out, x);\n out = tf.relu(out);\n return out;\n}\n\nexport function residualDown(x: tf.Tensor4D, params: ResidualLayerParams): tf.Tensor4D {\n let out = convDown(x, params.conv1);\n out = convNoRelu(out, params.conv2);\n\n let pooled = tf.avgPool(x, 2, 2, 'valid') as tf.Tensor4D;\n const zeros = tf.zeros(pooled.shape);\n const isPad = pooled.shape[3] !== out.shape[3];\n const isAdjustShape = pooled.shape[1] !== out.shape[1] || pooled.shape[2] !== out.shape[2];\n\n if (isAdjustShape) {\n const padShapeX = [...out.shape] as [number, number, number, number];\n padShapeX[1] = 1;\n const zerosW = tf.zeros(padShapeX);\n out = tf.concat([out, zerosW], 1);\n\n const padShapeY = [...out.shape] as [number, number, number, number];\n padShapeY[2] = 1;\n const zerosH = tf.zeros(padShapeY);\n out = tf.concat([out, zerosH], 2);\n }\n\n pooled = isPad ? tf.concat([pooled, zeros], 3) : pooled;\n out = tf.add(pooled, out) as tf.Tensor4D;\n\n out = tf.relu(out);\n return out;\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { normalize } from '../ops/index';\nimport { convDown } from './convLayer';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { residual, residualDown } from './residualLayer';\nimport { NetParams } from './types';\n\nexport class FaceRecognitionNet extends NeuralNetwork {\n constructor() {\n super('FaceRecognitionNet');\n }\n\n public forwardInput(input: NetInput): tf.Tensor2D {\n const { params } = this;\n\n if (!params) {\n throw new Error('FaceRecognitionNet - load model before inference');\n }\n\n return tf.tidy(() => {\n const batchTensor = tf.cast(input.toBatchTensor(150, true), 'float32');\n\n const meanRgb = [122.782, 117.001, 104.298];\n const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;\n\n let out = convDown(normalized, params.conv32_down);\n out = tf.maxPool(out, 3, 2, 'valid');\n\n out = residual(out, params.conv32_1);\n out = residual(out, params.conv32_2);\n out = residual(out, params.conv32_3);\n\n out = residualDown(out, params.conv64_down);\n out = residual(out, params.conv64_1);\n out = residual(out, params.conv64_2);\n out = residual(out, params.conv64_3);\n\n out = residualDown(out, params.conv128_down);\n out = residual(out, params.conv128_1);\n out = residual(out, params.conv128_2);\n\n out = residualDown(out, params.conv256_down);\n out = residual(out, params.conv256_1);\n out = residual(out, params.conv256_2);\n out = residualDown(out, params.conv256_down_out);\n\n const globalAvg = out.mean([1, 2]) as tf.Tensor2D;\n const fullyConnected = tf.matMul(globalAvg, params.fc);\n\n return fullyConnected;\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n public async computeFaceDescriptor(input: TNetInput): Promise {\n if (input?.shape?.some((dim) => dim <= 0)) return new Float32Array(128);\n const netInput = await toNetInput(input);\n const faceDescriptorTensors = tf.tidy(() => tf.unstack(this.forwardInput(netInput)));\n const faceDescriptorsForBatch = await Promise.all(faceDescriptorTensors.map((t) => t.data())) as Float32Array[];\n faceDescriptorTensors.forEach((t) => t.dispose());\n return netInput.isBatchInput ? faceDescriptorsForBatch : faceDescriptorsForBatch[0];\n }\n\n protected getDefaultModelName(): string {\n return 'face_recognition_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMap(weightMap);\n }\n\n protected extractParams(weights: Float32Array) {\n return extractParams(weights);\n }\n}\n", "import { FaceRecognitionNet } from './FaceRecognitionNet';\n\nexport * from './FaceRecognitionNet';\n\nexport function createFaceRecognitionNet(weights: Float32Array) {\n const net = new FaceRecognitionNet();\n net.extractWeights(weights);\n return net;\n}\n", "export type WithFaceDescriptor = TSource & {\n descriptor: Float32Array\n}\n\nexport function extendWithFaceDescriptor<\n TSource\n>(\n sourceObj: TSource,\n descriptor: Float32Array,\n): WithFaceDescriptor {\n const extension = { descriptor };\n return { ...sourceObj, ...extension };\n}\n", "export type WithAge = TSource & {\n age: number\n}\n\nexport function isWithAge(obj: any): obj is WithAge<{}> {\n return typeof obj.age === 'number';\n}\n\nexport function extendWithAge<\n TSource\n>(\n sourceObj: TSource,\n age: number,\n): WithAge {\n const extension = { age };\n return { ...sourceObj, ...extension };\n}\n", "import { Gender } from '../ageGenderNet/types';\nimport { isValidProbablitiy } from '../utils/index';\n\nexport type WithGender = TSource & {\n gender: Gender\n genderProbability: number\n}\n\nexport function isWithGender(obj: any): obj is WithGender<{}> {\n return (obj.gender === Gender.MALE || obj.gender === Gender.FEMALE)\n && isValidProbablitiy(obj.genderProbability);\n}\n\nexport function extendWithGender<\n TSource\n>(\n sourceObj: TSource,\n gender: Gender,\n genderProbability: number,\n): WithGender {\n const extension = { gender, genderProbability };\n return { ...sourceObj, ...extension };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ExtractWeightsFunction, ParamMapping, ConvParams, extractWeightsFactory } from '../common/index';\nimport { MobileNetV1, NetParams, PointwiseConvParams, PredictionLayerParams } from './types';\n\nfunction extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {\n function extractDepthwiseConvParams(numChannels: number, mappedPrefix: string): MobileNetV1.DepthwiseConvParams {\n const filters = tf.tensor4d(extractWeights(3 * 3 * numChannels), [3, 3, numChannels, 1]);\n const batch_norm_scale = tf.tensor1d(extractWeights(numChannels));\n const batch_norm_offset = tf.tensor1d(extractWeights(numChannels));\n const batch_norm_mean = tf.tensor1d(extractWeights(numChannels));\n const batch_norm_variance = tf.tensor1d(extractWeights(numChannels));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/filters` },\n { paramPath: `${mappedPrefix}/batch_norm_scale` },\n { paramPath: `${mappedPrefix}/batch_norm_offset` },\n { paramPath: `${mappedPrefix}/batch_norm_mean` },\n { paramPath: `${mappedPrefix}/batch_norm_variance` },\n );\n\n return {\n filters,\n batch_norm_scale,\n batch_norm_offset,\n batch_norm_mean,\n batch_norm_variance,\n };\n }\n\n function extractConvParams(\n channelsIn: number,\n channelsOut: number,\n filterSize: number,\n mappedPrefix: string,\n isPointwiseConv?: boolean,\n ): ConvParams {\n const filters = tf.tensor4d(\n extractWeights(channelsIn * channelsOut * filterSize * filterSize),\n [filterSize, filterSize, channelsIn, channelsOut],\n );\n const bias = tf.tensor1d(extractWeights(channelsOut));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/filters` },\n { paramPath: `${mappedPrefix}/${isPointwiseConv ? 'batch_norm_offset' : 'bias'}` },\n );\n\n return { filters, bias };\n }\n\n function extractPointwiseConvParams(\n channelsIn: number,\n channelsOut: number,\n filterSize: number,\n mappedPrefix: string,\n ): PointwiseConvParams {\n const {\n filters,\n bias,\n } = extractConvParams(channelsIn, channelsOut, filterSize, mappedPrefix, true);\n\n return {\n filters,\n batch_norm_offset: bias,\n };\n }\n\n function extractConvPairParams(\n channelsIn: number,\n channelsOut: number,\n mappedPrefix: string,\n ): MobileNetV1.ConvPairParams {\n const depthwise_conv = extractDepthwiseConvParams(channelsIn, `${mappedPrefix}/depthwise_conv`);\n const pointwise_conv = extractPointwiseConvParams(channelsIn, channelsOut, 1, `${mappedPrefix}/pointwise_conv`);\n\n return { depthwise_conv, pointwise_conv };\n }\n\n function extractMobilenetV1Params(): MobileNetV1.Params {\n const conv_0 = extractPointwiseConvParams(3, 32, 3, 'mobilenetv1/conv_0');\n const conv_1 = extractConvPairParams(32, 64, 'mobilenetv1/conv_1');\n const conv_2 = extractConvPairParams(64, 128, 'mobilenetv1/conv_2');\n const conv_3 = extractConvPairParams(128, 128, 'mobilenetv1/conv_3');\n const conv_4 = extractConvPairParams(128, 256, 'mobilenetv1/conv_4');\n const conv_5 = extractConvPairParams(256, 256, 'mobilenetv1/conv_5');\n const conv_6 = extractConvPairParams(256, 512, 'mobilenetv1/conv_6');\n const conv_7 = extractConvPairParams(512, 512, 'mobilenetv1/conv_7');\n const conv_8 = extractConvPairParams(512, 512, 'mobilenetv1/conv_8');\n const conv_9 = extractConvPairParams(512, 512, 'mobilenetv1/conv_9');\n const conv_10 = extractConvPairParams(512, 512, 'mobilenetv1/conv_10');\n const conv_11 = extractConvPairParams(512, 512, 'mobilenetv1/conv_11');\n const conv_12 = extractConvPairParams(512, 1024, 'mobilenetv1/conv_12');\n const conv_13 = extractConvPairParams(1024, 1024, 'mobilenetv1/conv_13');\n return {\n conv_0,\n conv_1,\n conv_2,\n conv_3,\n conv_4,\n conv_5,\n conv_6,\n conv_7,\n conv_8,\n conv_9,\n conv_10,\n conv_11,\n conv_12,\n conv_13,\n };\n }\n\n function extractPredictionLayerParams(): PredictionLayerParams {\n const conv_0 = extractPointwiseConvParams(1024, 256, 1, 'prediction_layer/conv_0');\n const conv_1 = extractPointwiseConvParams(256, 512, 3, 'prediction_layer/conv_1');\n const conv_2 = extractPointwiseConvParams(512, 128, 1, 'prediction_layer/conv_2');\n const conv_3 = extractPointwiseConvParams(128, 256, 3, 'prediction_layer/conv_3');\n const conv_4 = extractPointwiseConvParams(256, 128, 1, 'prediction_layer/conv_4');\n const conv_5 = extractPointwiseConvParams(128, 256, 3, 'prediction_layer/conv_5');\n const conv_6 = extractPointwiseConvParams(256, 64, 1, 'prediction_layer/conv_6');\n const conv_7 = extractPointwiseConvParams(64, 128, 3, 'prediction_layer/conv_7');\n const box_encoding_0_predictor = extractConvParams(512, 12, 1, 'prediction_layer/box_predictor_0/box_encoding_predictor');\n const class_predictor_0 = extractConvParams(512, 9, 1, 'prediction_layer/box_predictor_0/class_predictor');\n const box_encoding_1_predictor = extractConvParams(1024, 24, 1, 'prediction_layer/box_predictor_1/box_encoding_predictor');\n const class_predictor_1 = extractConvParams(1024, 18, 1, 'prediction_layer/box_predictor_1/class_predictor');\n const box_encoding_2_predictor = extractConvParams(512, 24, 1, 'prediction_layer/box_predictor_2/box_encoding_predictor');\n const class_predictor_2 = extractConvParams(512, 18, 1, 'prediction_layer/box_predictor_2/class_predictor');\n const box_encoding_3_predictor = extractConvParams(256, 24, 1, 'prediction_layer/box_predictor_3/box_encoding_predictor');\n const class_predictor_3 = extractConvParams(256, 18, 1, 'prediction_layer/box_predictor_3/class_predictor');\n const box_encoding_4_predictor = extractConvParams(256, 24, 1, 'prediction_layer/box_predictor_4/box_encoding_predictor');\n const class_predictor_4 = extractConvParams(256, 18, 1, 'prediction_layer/box_predictor_4/class_predictor');\n const box_encoding_5_predictor = extractConvParams(128, 24, 1, 'prediction_layer/box_predictor_5/box_encoding_predictor');\n const class_predictor_5 = extractConvParams(128, 18, 1, 'prediction_layer/box_predictor_5/class_predictor');\n\n const box_predictor_0 = {\n box_encoding_predictor: box_encoding_0_predictor,\n class_predictor: class_predictor_0,\n };\n const box_predictor_1 = {\n box_encoding_predictor: box_encoding_1_predictor,\n class_predictor: class_predictor_1,\n };\n const box_predictor_2 = {\n box_encoding_predictor: box_encoding_2_predictor,\n class_predictor: class_predictor_2,\n };\n const box_predictor_3 = {\n box_encoding_predictor: box_encoding_3_predictor,\n class_predictor: class_predictor_3,\n };\n const box_predictor_4 = {\n box_encoding_predictor: box_encoding_4_predictor,\n class_predictor: class_predictor_4,\n };\n const box_predictor_5 = {\n box_encoding_predictor: box_encoding_5_predictor,\n class_predictor: class_predictor_5,\n };\n return {\n conv_0,\n conv_1,\n conv_2,\n conv_3,\n conv_4,\n conv_5,\n conv_6,\n conv_7,\n box_predictor_0,\n box_predictor_1,\n box_predictor_2,\n box_predictor_3,\n box_predictor_4,\n box_predictor_5,\n };\n }\n\n return {\n extractMobilenetV1Params,\n extractPredictionLayerParams,\n };\n}\n\nexport function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n const {\n extractMobilenetV1Params,\n extractPredictionLayerParams,\n } = extractorsFactory(extractWeights, paramMappings);\n const mobilenetv1 = extractMobilenetV1Params();\n const prediction_layer = extractPredictionLayerParams();\n const extra_dim = tf.tensor3d(\n extractWeights(5118 * 4),\n [1, 5118, 4],\n );\n const output_layer = {\n extra_dim,\n };\n paramMappings.push({ paramPath: 'output_layer/extra_dim' });\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n params: {\n mobilenetv1,\n prediction_layer,\n output_layer,\n },\n paramMappings,\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams, disposeUnusedWeightTensors, extractWeightEntryFactory, ParamMapping } from '../common/index';\nimport { isTensor3D } from '../utils/index';\nimport { BoxPredictionParams, MobileNetV1, NetParams, PointwiseConvParams, PredictionLayerParams } from './types';\n\nfunction extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n function extractPointwiseConvParams(prefix: string, idx: number, mappedPrefix: string): PointwiseConvParams {\n const filters = extractWeightEntry(`${prefix}/Conv2d_${idx}_pointwise/weights`, 4, `${mappedPrefix}/filters`);\n const batch_norm_offset = extractWeightEntry(`${prefix}/Conv2d_${idx}_pointwise/convolution_bn_offset`, 1, `${mappedPrefix}/batch_norm_offset`);\n return { filters, batch_norm_offset };\n }\n\n function extractConvPairParams(idx: number): MobileNetV1.ConvPairParams {\n const mappedPrefix = `mobilenetv1/conv_${idx}`;\n const prefixDepthwiseConv = `MobilenetV1/Conv2d_${idx}_depthwise`;\n const mappedPrefixDepthwiseConv = `${mappedPrefix}/depthwise_conv`;\n const mappedPrefixPointwiseConv = `${mappedPrefix}/pointwise_conv`;\n\n const filters = extractWeightEntry(`${prefixDepthwiseConv}/depthwise_weights`, 4, `${mappedPrefixDepthwiseConv}/filters`);\n const batch_norm_scale = extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/gamma`, 1, `${mappedPrefixDepthwiseConv}/batch_norm_scale`);\n const batch_norm_offset = extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/beta`, 1, `${mappedPrefixDepthwiseConv}/batch_norm_offset`);\n const batch_norm_mean = extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/moving_mean`, 1, `${mappedPrefixDepthwiseConv}/batch_norm_mean`);\n const batch_norm_variance = extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/moving_variance`, 1, `${mappedPrefixDepthwiseConv}/batch_norm_variance`);\n\n return {\n depthwise_conv: {\n filters,\n batch_norm_scale,\n batch_norm_offset,\n batch_norm_mean,\n batch_norm_variance,\n },\n pointwise_conv: extractPointwiseConvParams('MobilenetV1', idx, mappedPrefixPointwiseConv),\n };\n }\n\n function extractMobilenetV1Params(): MobileNetV1.Params {\n return {\n conv_0: extractPointwiseConvParams('MobilenetV1', 0, 'mobilenetv1/conv_0'),\n conv_1: extractConvPairParams(1),\n conv_2: extractConvPairParams(2),\n conv_3: extractConvPairParams(3),\n conv_4: extractConvPairParams(4),\n conv_5: extractConvPairParams(5),\n conv_6: extractConvPairParams(6),\n conv_7: extractConvPairParams(7),\n conv_8: extractConvPairParams(8),\n conv_9: extractConvPairParams(9),\n conv_10: extractConvPairParams(10),\n conv_11: extractConvPairParams(11),\n conv_12: extractConvPairParams(12),\n conv_13: extractConvPairParams(13),\n };\n }\n\n function extractConvParams(prefix: string, mappedPrefix: string): ConvParams {\n const filters = extractWeightEntry(`${prefix}/weights`, 4, `${mappedPrefix}/filters`);\n const bias = extractWeightEntry(`${prefix}/biases`, 1, `${mappedPrefix}/bias`);\n return { filters, bias };\n }\n\n function extractBoxPredictorParams(idx: number): BoxPredictionParams {\n const box_encoding_predictor = extractConvParams(\n `Prediction/BoxPredictor_${idx}/BoxEncodingPredictor`,\n `prediction_layer/box_predictor_${idx}/box_encoding_predictor`,\n );\n const class_predictor = extractConvParams(\n `Prediction/BoxPredictor_${idx}/ClassPredictor`,\n `prediction_layer/box_predictor_${idx}/class_predictor`,\n );\n return { box_encoding_predictor, class_predictor };\n }\n\n function extractPredictionLayerParams(): PredictionLayerParams {\n return {\n conv_0: extractPointwiseConvParams('Prediction', 0, 'prediction_layer/conv_0'),\n conv_1: extractPointwiseConvParams('Prediction', 1, 'prediction_layer/conv_1'),\n conv_2: extractPointwiseConvParams('Prediction', 2, 'prediction_layer/conv_2'),\n conv_3: extractPointwiseConvParams('Prediction', 3, 'prediction_layer/conv_3'),\n conv_4: extractPointwiseConvParams('Prediction', 4, 'prediction_layer/conv_4'),\n conv_5: extractPointwiseConvParams('Prediction', 5, 'prediction_layer/conv_5'),\n conv_6: extractPointwiseConvParams('Prediction', 6, 'prediction_layer/conv_6'),\n conv_7: extractPointwiseConvParams('Prediction', 7, 'prediction_layer/conv_7'),\n box_predictor_0: extractBoxPredictorParams(0),\n box_predictor_1: extractBoxPredictorParams(1),\n box_predictor_2: extractBoxPredictorParams(2),\n box_predictor_3: extractBoxPredictorParams(3),\n box_predictor_4: extractBoxPredictorParams(4),\n box_predictor_5: extractBoxPredictorParams(5),\n };\n }\n\n return {\n extractMobilenetV1Params,\n extractPredictionLayerParams,\n };\n}\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n const {\n extractMobilenetV1Params,\n extractPredictionLayerParams,\n } = extractorsFactory(weightMap, paramMappings);\n const extra_dim = weightMap['Output/extra_dim'];\n paramMappings.push({ originalPath: 'Output/extra_dim', paramPath: 'output_layer/extra_dim' });\n if (!isTensor3D(extra_dim)) {\n throw new Error(`expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have ${extra_dim}`);\n }\n\n const params = {\n mobilenetv1: extractMobilenetV1Params(),\n prediction_layer: extractPredictionLayerParams(),\n output_layer: {\n extra_dim,\n },\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { PointwiseConvParams } from './types';\n\nexport function pointwiseConvLayer(x: tf.Tensor4D, params: PointwiseConvParams, strides: [number, number]) {\n return tf.tidy(() => {\n let out = tf.conv2d(x, params.filters, strides, 'same');\n out = tf.add(out, params.batch_norm_offset);\n return tf.clipByValue(out, 0, 6);\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { pointwiseConvLayer } from './pointwiseConvLayer';\nimport { MobileNetV1 } from './types';\n\nconst epsilon = 0.0010000000474974513;\n\nfunction depthwiseConvLayer(x: tf.Tensor4D, params: MobileNetV1.DepthwiseConvParams, strides: [number, number]) {\n return tf.tidy(() => {\n let out = tf.depthwiseConv2d(x, params.filters, strides, 'same');\n out = tf.batchNorm(\n out,\n params.batch_norm_mean,\n params.batch_norm_variance,\n params.batch_norm_offset,\n params.batch_norm_scale,\n epsilon,\n );\n return tf.clipByValue(out, 0, 6);\n });\n}\n\nfunction getStridesForLayerIdx(layerIdx: number): [number, number] {\n return [2, 4, 6, 12].some((idx) => idx === layerIdx) ? [2, 2] : [1, 1];\n}\n\nexport function mobileNetV1(x: tf.Tensor4D, params: MobileNetV1.Params) {\n return tf.tidy(() => {\n let conv11;\n let out = pointwiseConvLayer(x, params.conv_0, [2, 2]);\n\n const convPairParams = [\n params.conv_1,\n params.conv_2,\n params.conv_3,\n params.conv_4,\n params.conv_5,\n params.conv_6,\n params.conv_7,\n params.conv_8,\n params.conv_9,\n params.conv_10,\n params.conv_11,\n params.conv_12,\n params.conv_13,\n ];\n\n convPairParams.forEach((param, i) => {\n const layerIdx = i + 1;\n const depthwiseConvStrides = getStridesForLayerIdx(layerIdx);\n out = depthwiseConvLayer(out, param.depthwise_conv, depthwiseConvStrides);\n out = pointwiseConvLayer(out, param.pointwise_conv, [1, 1]);\n if (layerIdx === 11) conv11 = out;\n });\n\n if (conv11 === null) {\n throw new Error('mobileNetV1 - output of conv layer 11 is null');\n }\n\n return {\n out,\n conv11: conv11 as any,\n };\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nfunction IOU(boxes: tf.Tensor2D, i: number, j: number) {\n const boxesData = boxes.arraySync();\n const yminI = Math.min(boxesData[i][0], boxesData[i][2]);\n const xminI = Math.min(boxesData[i][1], boxesData[i][3]);\n const ymaxI = Math.max(boxesData[i][0], boxesData[i][2]);\n const xmaxI = Math.max(boxesData[i][1], boxesData[i][3]);\n const yminJ = Math.min(boxesData[j][0], boxesData[j][2]);\n const xminJ = Math.min(boxesData[j][1], boxesData[j][3]);\n const ymaxJ = Math.max(boxesData[j][0], boxesData[j][2]);\n const xmaxJ = Math.max(boxesData[j][1], boxesData[j][3]);\n const areaI = (ymaxI - yminI) * (xmaxI - xminI);\n const areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ);\n if (areaI <= 0 || areaJ <= 0) return 0.0;\n const intersectionYmin = Math.max(yminI, yminJ);\n const intersectionXmin = Math.max(xminI, xminJ);\n const intersectionYmax = Math.min(ymaxI, ymaxJ);\n const intersectionXmax = Math.min(xmaxI, xmaxJ);\n const intersectionArea = Math.max(intersectionYmax - intersectionYmin, 0.0) * Math.max(intersectionXmax - intersectionXmin, 0.0);\n return intersectionArea / (areaI + areaJ - intersectionArea);\n}\n\nexport function nonMaxSuppression(\n boxes: tf.Tensor2D,\n scores: number[],\n maxOutputSize: number,\n iouThreshold: number,\n scoreThreshold: number,\n): number[] {\n const numBoxes = boxes.shape[0];\n const outputSize = Math.min(maxOutputSize, numBoxes);\n\n const candidates = scores\n .map((score, boxIndex) => ({ score, boxIndex }))\n .filter((c) => c.score > scoreThreshold)\n .sort((c1, c2) => c2.score - c1.score);\n\n const suppressFunc = (x: number) => (x <= iouThreshold ? 1 : 0);\n const selected: number[] = [];\n\n candidates.forEach((c) => {\n if (selected.length >= outputSize) return;\n const originalScore = c.score;\n for (let j = selected.length - 1; j >= 0; --j) {\n const iou = IOU(boxes, c.boxIndex, selected[j]);\n if (iou === 0.0) continue;\n c.score *= suppressFunc(iou);\n if (c.score <= scoreThreshold) break;\n }\n if (originalScore === c.score) {\n selected.push(c.boxIndex);\n }\n });\n return selected;\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { OutputLayerParams } from './types';\n\nfunction getCenterCoordinatesAndSizesLayer(x: tf.Tensor2D) {\n const vec = tf.unstack(tf.transpose(x, [1, 0]));\n\n const sizes = [\n tf.sub(vec[2], vec[0]),\n tf.sub(vec[3], vec[1]),\n ];\n const centers = [\n tf.add(vec[0], tf.div(sizes[0], 2)),\n tf.add(vec[1], tf.div(sizes[1], 2)),\n ];\n return { sizes, centers };\n}\n\nfunction decodeBoxesLayer(x0: tf.Tensor2D, x1: tf.Tensor2D) {\n const { sizes, centers } = getCenterCoordinatesAndSizesLayer(x0);\n\n const vec = tf.unstack(tf.transpose(x1, [1, 0]));\n const div0_out = tf.div(tf.mul(tf.exp(tf.div(vec[2], 5)), sizes[0]), 2);\n const add0_out = tf.add(tf.mul(tf.div(vec[0], 10), sizes[0]), centers[0]);\n const div1_out = tf.div(tf.mul(tf.exp(tf.div(vec[3], 5)), sizes[1]), 2);\n const add1_out = tf.add(tf.mul(tf.div(vec[1], 10), sizes[1]), centers[1]);\n\n return tf.transpose(\n tf.stack([\n tf.sub(add0_out, div0_out),\n tf.sub(add1_out, div1_out),\n tf.add(add0_out, div0_out),\n tf.add(add1_out, div1_out),\n ]),\n [1, 0],\n );\n}\n\nexport function outputLayer(boxPredictions: tf.Tensor4D, classPredictions: tf.Tensor4D, params: OutputLayerParams) {\n return tf.tidy(() => {\n const batchSize = boxPredictions.shape[0];\n\n let boxes = decodeBoxesLayer(\n tf.reshape(tf.tile(params.extra_dim, [batchSize, 1, 1]), [-1, 4]) as tf.Tensor2D,\n tf.reshape(boxPredictions, [-1, 4]) as tf.Tensor2D,\n );\n boxes = tf.reshape(boxes, [batchSize, (boxes.shape[0] / batchSize), 4]);\n\n const scoresAndClasses = tf.sigmoid(tf.slice(classPredictions, [0, 0, 1], [-1, -1, -1]));\n let scores = tf.slice(scoresAndClasses, [0, 0, 0], [-1, -1, 1]) as tf.Tensor;\n\n scores = tf.reshape(scores, [batchSize, scores.shape[1] as number]);\n\n const boxesByBatch = tf.unstack(boxes) as tf.Tensor2D[];\n const scoresByBatch = tf.unstack(scores) as tf.Tensor1D[];\n\n return { boxes: boxesByBatch, scores: scoresByBatch };\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { convLayer } from '../common/index';\nimport { BoxPredictionParams } from './types';\n\nexport function boxPredictionLayer(\n x: tf.Tensor4D,\n params: BoxPredictionParams,\n) {\n return tf.tidy(() => {\n const batchSize = x.shape[0];\n const boxPredictionEncoding = tf.reshape(\n convLayer(x, params.box_encoding_predictor),\n [batchSize, -1, 1, 4],\n );\n const classPrediction = tf.reshape(\n convLayer(x, params.class_predictor),\n [batchSize, -1, 3],\n );\n return { boxPredictionEncoding, classPrediction };\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { boxPredictionLayer } from './boxPredictionLayer';\nimport { pointwiseConvLayer } from './pointwiseConvLayer';\nimport { PredictionLayerParams } from './types';\n\nexport function predictionLayer(\n x: tf.Tensor4D,\n conv11: tf.Tensor4D,\n params: PredictionLayerParams,\n) {\n return tf.tidy(() => {\n const conv0 = pointwiseConvLayer(x, params.conv_0, [1, 1]);\n const conv1 = pointwiseConvLayer(conv0, params.conv_1, [2, 2]);\n const conv2 = pointwiseConvLayer(conv1, params.conv_2, [1, 1]);\n const conv3 = pointwiseConvLayer(conv2, params.conv_3, [2, 2]);\n const conv4 = pointwiseConvLayer(conv3, params.conv_4, [1, 1]);\n const conv5 = pointwiseConvLayer(conv4, params.conv_5, [2, 2]);\n const conv6 = pointwiseConvLayer(conv5, params.conv_6, [1, 1]);\n const conv7 = pointwiseConvLayer(conv6, params.conv_7, [2, 2]);\n\n const boxPrediction0 = boxPredictionLayer(conv11, params.box_predictor_0);\n const boxPrediction1 = boxPredictionLayer(x, params.box_predictor_1);\n const boxPrediction2 = boxPredictionLayer(conv1, params.box_predictor_2);\n const boxPrediction3 = boxPredictionLayer(conv3, params.box_predictor_3);\n const boxPrediction4 = boxPredictionLayer(conv5, params.box_predictor_4);\n const boxPrediction5 = boxPredictionLayer(conv7, params.box_predictor_5);\n\n const boxPredictions = tf.concat([\n boxPrediction0.boxPredictionEncoding,\n boxPrediction1.boxPredictionEncoding,\n boxPrediction2.boxPredictionEncoding,\n boxPrediction3.boxPredictionEncoding,\n boxPrediction4.boxPredictionEncoding,\n boxPrediction5.boxPredictionEncoding,\n ], 1) as tf.Tensor4D;\n\n const classPredictions = tf.concat([\n boxPrediction0.classPrediction,\n boxPrediction1.classPrediction,\n boxPrediction2.classPrediction,\n boxPrediction3.classPrediction,\n boxPrediction4.classPrediction,\n boxPrediction5.classPrediction,\n ], 1) as tf.Tensor4D;\n\n return {\n boxPredictions,\n classPredictions,\n };\n });\n}\n", "export interface ISsdMobilenetv1Options {\n minConfidence?: number\n maxResults?: number\n}\n\nexport class SsdMobilenetv1Options {\n protected _name: string = 'SsdMobilenetv1Options'\n\n private _minConfidence: number\n\n private _maxResults: number\n\n constructor({ minConfidence, maxResults }: ISsdMobilenetv1Options = {}) {\n this._minConfidence = minConfidence || 0.5;\n this._maxResults = maxResults || 100;\n\n if (typeof this._minConfidence !== 'number' || this._minConfidence <= 0 || this._minConfidence >= 1) {\n throw new Error(`${this._name} - expected minConfidence to be a number between 0 and 1`);\n }\n\n if (typeof this._maxResults !== 'number') {\n throw new Error(`${this._name} - expected maxResults to be a number`);\n }\n }\n\n get minConfidence(): number { return this._minConfidence; }\n\n get maxResults(): number { return this._maxResults; }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { Rect } from '../classes/index';\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { mobileNetV1 } from './mobileNetV1';\nimport { nonMaxSuppression } from './nonMaxSuppression';\nimport { outputLayer } from './outputLayer';\nimport { predictionLayer } from './predictionLayer';\nimport { ISsdMobilenetv1Options, SsdMobilenetv1Options } from './SsdMobilenetv1Options';\nimport { NetParams } from './types';\n\nexport class SsdMobilenetv1 extends NeuralNetwork {\n constructor() {\n super('SsdMobilenetv1');\n }\n\n public forwardInput(input: NetInput) {\n const { params } = this;\n if (!params) throw new Error('SsdMobilenetv1 - load model before inference');\n return tf.tidy(() => {\n const batchTensor = tf.cast(input.toBatchTensor(512, false), 'float32');\n const x = tf.sub(tf.div(batchTensor, 127.5), 1) as tf.Tensor4D; // input is normalized -1..1\n const features = mobileNetV1(x, params.mobilenetv1);\n const { boxPredictions, classPredictions } = predictionLayer(features.out, features.conv11, params.prediction_layer);\n return outputLayer(boxPredictions, classPredictions, params.output_layer);\n });\n }\n\n public async forward(input: TNetInput) {\n return this.forwardInput(await toNetInput(input));\n }\n\n public async locateFaces(input: TNetInput, options: ISsdMobilenetv1Options = {}): Promise {\n const { maxResults, minConfidence } = new SsdMobilenetv1Options(options);\n const netInput = await toNetInput(input);\n const { boxes: _boxes, scores: _scores } = this.forwardInput(netInput);\n const boxes = _boxes[0];\n const scores = _scores[0];\n for (let i = 1; i < _boxes.length; i++) {\n _boxes[i].dispose();\n _scores[i].dispose();\n }\n const scoresData = Array.from(scores.dataSync());\n const iouThreshold = 0.5;\n const indices = nonMaxSuppression(boxes, scoresData as number[], maxResults, iouThreshold, minConfidence);\n const reshapedDims = netInput.getReshapedInputDimensions(0);\n const inputSize = netInput.inputSize as number;\n const padX = inputSize / reshapedDims.width;\n const padY = inputSize / reshapedDims.height;\n const boxesData = boxes.arraySync();\n const results = indices\n .map((idx) => {\n const [top, bottom] = [\n Math.max(0, boxesData[idx][0]),\n Math.min(1.0, boxesData[idx][2]),\n ].map((val) => val * padY);\n const [left, right] = [\n Math.max(0, boxesData[idx][1]),\n Math.min(1.0, boxesData[idx][3]),\n ].map((val) => val * padX);\n return new FaceDetection(\n scoresData[idx] as number,\n new Rect(left, top, right - left, bottom - top),\n { height: netInput.getInputHeight(0), width: netInput.getInputWidth(0) },\n );\n });\n boxes.dispose();\n scores.dispose();\n return results;\n }\n\n protected getDefaultModelName(): string {\n return 'ssd_mobilenetv1_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMap(weightMap);\n }\n\n protected extractParams(weights: Float32Array) {\n return extractParams(weights);\n }\n}\n", "import { SsdMobilenetv1 } from './SsdMobilenetv1';\n\nexport * from './SsdMobilenetv1';\nexport * from './SsdMobilenetv1Options';\n\nexport function createSsdMobilenetv1(weights: Float32Array) {\n const net = new SsdMobilenetv1();\n net.extractWeights(weights);\n return net;\n}\n\nexport function createFaceDetectionNet(weights: Float32Array) {\n return createSsdMobilenetv1(weights);\n}\n\n// alias for backward compatibily\nexport class FaceDetectionNet extends SsdMobilenetv1 {}\n", "import { Point } from '../classes/index';\n\nexport const IOU_THRESHOLD = 0.4;\n\nexport const BOX_ANCHORS = [\n new Point(0.738768, 0.874946),\n new Point(2.42204, 2.65704),\n new Point(4.30971, 7.04493),\n new Point(10.246, 4.59428),\n new Point(12.6868, 11.8741),\n];\n\nexport const BOX_ANCHORS_SEPARABLE = [\n new Point(1.603231, 2.094468),\n new Point(6.041143, 7.080126),\n new Point(2.882459, 3.518061),\n new Point(4.266906, 5.178857),\n new Point(9.041765, 10.66308),\n];\n\nexport const MEAN_RGB_SEPARABLE: [number, number, number] = [117.001, 114.697, 97.404];\n\nexport const DEFAULT_MODEL_NAME = 'tiny_yolov2_model';\nexport const DEFAULT_MODEL_NAME_SEPARABLE_CONV = 'tiny_yolov2_separable_conv_model';\n", "import { Point } from '../classes/Point';\n\nexport type TinyYolov2Config = {\n withSeparableConvs: boolean\n iouThreshold: number\n anchors: Point[]\n classes: string[]\n meanRgb?: [number, number, number]\n withClassScores?: boolean,\n filterSizes?: number[]\n isFirstLayerConv2d?: boolean\n}\n\nconst isNumber = (arg: any) => typeof arg === 'number';\n\nexport function validateConfig(config: any) {\n if (!config) {\n throw new Error(`invalid config: ${config}`);\n }\n\n if (typeof config.withSeparableConvs !== 'boolean') {\n throw new Error(`config.withSeparableConvs has to be a boolean, have: ${config.withSeparableConvs}`);\n }\n\n if (!isNumber(config.iouThreshold) || config.iouThreshold < 0 || config.iouThreshold > 1.0) {\n throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${config.iouThreshold}`);\n }\n\n if (\n !Array.isArray(config.classes)\n || !config.classes.length\n || !config.classes.every((c: any) => typeof c === 'string')\n ) {\n throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(config.classes)}`);\n }\n\n if (\n !Array.isArray(config.anchors)\n || !config.anchors.length\n || !config.anchors.map((a: any) => a || {}).every((a: any) => isNumber(a.x) && isNumber(a.y))\n ) {\n throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(config.anchors)}`);\n }\n\n if (config.meanRgb && (\n !Array.isArray(config.meanRgb)\n || config.meanRgb.length !== 3\n || !config.meanRgb.every(isNumber)\n )) {\n throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(config.meanRgb)}`);\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nexport function leaky(x: tf.Tensor4D): tf.Tensor4D {\n return tf.tidy(() => {\n const min = tf.mul(x, tf.scalar(0.10000000149011612));\n return tf.add(tf.relu(tf.sub(x, min)), min);\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { leaky } from './leaky';\nimport { ConvWithBatchNorm } from './types';\n\nexport function convWithBatchNorm(x: tf.Tensor4D, params: ConvWithBatchNorm): tf.Tensor4D {\n return tf.tidy(() => {\n let out = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]) as tf.Tensor4D;\n out = tf.conv2d(out, params.conv.filters, [1, 1], 'valid');\n out = tf.sub(out, params.bn.sub);\n out = tf.mul(out, params.bn.truediv);\n out = tf.add(out, params.conv.bias);\n return leaky(out);\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { SeparableConvParams } from '../common/types';\nimport { leaky } from './leaky';\n\nexport function depthwiseSeparableConv(x: tf.Tensor4D, params: SeparableConvParams): tf.Tensor4D {\n return tf.tidy(() => {\n let out = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]) as tf.Tensor4D;\n out = tf.separableConv2d(out, params.depthwise_filter, params.pointwise_filter, [1, 1], 'valid');\n out = tf.add(out, params.bias);\n return leaky(out);\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { extractConvParamsFactory } from '../common/index';\nimport { extractSeparableConvParamsFactory } from '../common/extractSeparableConvParamsFactory';\nimport { extractWeightsFactory } from '../common/extractWeightsFactory';\nimport { ExtractWeightsFunction, ParamMapping } from '../common/types';\nimport { TinyYolov2Config } from './config';\nimport { BatchNorm, ConvWithBatchNorm, TinyYolov2NetParams } from './types';\n\nfunction extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {\n const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings);\n\n function extractBatchNormParams(size: number, mappedPrefix: string): BatchNorm {\n const sub = tf.tensor1d(extractWeights(size));\n const truediv = tf.tensor1d(extractWeights(size));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/sub` },\n { paramPath: `${mappedPrefix}/truediv` },\n );\n return { sub, truediv };\n }\n\n function extractConvWithBatchNormParams(channelsIn: number, channelsOut: number, mappedPrefix: string): ConvWithBatchNorm {\n const conv = extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv`);\n const bn = extractBatchNormParams(channelsOut, `${mappedPrefix}/bn`);\n return { conv, bn };\n }\n const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings);\n\n return {\n extractConvParams,\n extractConvWithBatchNormParams,\n extractSeparableConvParams,\n };\n}\n\nexport function extractParams(\n weights: Float32Array,\n config: TinyYolov2Config,\n boxEncodingSize: number,\n filterSizes: number[],\n): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const paramMappings: ParamMapping[] = [];\n const {\n extractConvParams,\n extractConvWithBatchNormParams,\n extractSeparableConvParams,\n } = extractorsFactory(extractWeights, paramMappings);\n let params: TinyYolov2NetParams;\n\n if (config.withSeparableConvs) {\n const [s0, s1, s2, s3, s4, s5, s6, s7, s8] = filterSizes;\n const conv0 = config.isFirstLayerConv2d\n ? extractConvParams(s0, s1, 3, 'conv0')\n : extractSeparableConvParams(s0, s1, 'conv0');\n const conv1 = extractSeparableConvParams(s1, s2, 'conv1');\n const conv2 = extractSeparableConvParams(s2, s3, 'conv2');\n const conv3 = extractSeparableConvParams(s3, s4, 'conv3');\n const conv4 = extractSeparableConvParams(s4, s5, 'conv4');\n const conv5 = extractSeparableConvParams(s5, s6, 'conv5');\n const conv6 = s7 ? extractSeparableConvParams(s6, s7, 'conv6') : undefined;\n const conv7 = s8 ? extractSeparableConvParams(s7, s8, 'conv7') : undefined;\n const conv8 = extractConvParams(s8 || s7 || s6, 5 * boxEncodingSize, 1, 'conv8');\n params = {\n conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\n };\n } else {\n const [s0, s1, s2, s3, s4, s5, s6, s7, s8] = filterSizes;\n const conv0 = extractConvWithBatchNormParams(s0, s1, 'conv0');\n const conv1 = extractConvWithBatchNormParams(s1, s2, 'conv1');\n const conv2 = extractConvWithBatchNormParams(s2, s3, 'conv2');\n const conv3 = extractConvWithBatchNormParams(s3, s4, 'conv3');\n const conv4 = extractConvWithBatchNormParams(s4, s5, 'conv4');\n const conv5 = extractConvWithBatchNormParams(s5, s6, 'conv5');\n const conv6 = extractConvWithBatchNormParams(s6, s7, 'conv6');\n const conv7 = extractConvWithBatchNormParams(s7, s8, 'conv7');\n const conv8 = extractConvParams(s8, 5 * boxEncodingSize, 1, 'conv8');\n params = {\n conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\n };\n }\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams } from '../common/index';\nimport { disposeUnusedWeightTensors } from '../common/disposeUnusedWeightTensors';\nimport { loadSeparableConvParamsFactory } from '../common/extractSeparableConvParamsFactory';\nimport { extractWeightEntryFactory } from '../common/extractWeightEntryFactory';\nimport { ParamMapping } from '../common/types';\nimport { TinyYolov2Config } from './config';\nimport { BatchNorm, ConvWithBatchNorm, TinyYolov2NetParams } from './types';\n\nfunction extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n function extractBatchNormParams(prefix: string): BatchNorm {\n const sub = extractWeightEntry(`${prefix}/sub`, 1);\n const truediv = extractWeightEntry(`${prefix}/truediv`, 1);\n return { sub, truediv };\n }\n\n function extractConvParams(prefix: string): ConvParams {\n const filters = extractWeightEntry(`${prefix}/filters`, 4);\n const bias = extractWeightEntry(`${prefix}/bias`, 1);\n return { filters, bias };\n }\n\n function extractConvWithBatchNormParams(prefix: string): ConvWithBatchNorm {\n const conv = extractConvParams(`${prefix}/conv`);\n const bn = extractBatchNormParams(`${prefix}/bn`);\n return { conv, bn };\n }\n\n const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry);\n return {\n extractConvParams,\n extractConvWithBatchNormParams,\n extractSeparableConvParams,\n };\n}\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n config: TinyYolov2Config,\n): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractConvParams,\n extractConvWithBatchNormParams,\n extractSeparableConvParams,\n } = extractorsFactory(weightMap, paramMappings);\n\n let params: TinyYolov2NetParams;\n\n if (config.withSeparableConvs) {\n // eslint-disable-next-line no-mixed-operators\n const numFilters = (config.filterSizes && config.filterSizes.length || 9);\n params = {\n conv0: config.isFirstLayerConv2d ? extractConvParams('conv0') : extractSeparableConvParams('conv0'),\n conv1: extractSeparableConvParams('conv1'),\n conv2: extractSeparableConvParams('conv2'),\n conv3: extractSeparableConvParams('conv3'),\n conv4: extractSeparableConvParams('conv4'),\n conv5: extractSeparableConvParams('conv5'),\n conv6: numFilters > 7 ? extractSeparableConvParams('conv6') : undefined,\n conv7: numFilters > 8 ? extractSeparableConvParams('conv7') : undefined,\n conv8: extractConvParams('conv8'),\n };\n } else {\n params = {\n conv0: extractConvWithBatchNormParams('conv0'),\n conv1: extractConvWithBatchNormParams('conv1'),\n conv2: extractConvWithBatchNormParams('conv2'),\n conv3: extractConvWithBatchNormParams('conv3'),\n conv4: extractConvWithBatchNormParams('conv4'),\n conv5: extractConvWithBatchNormParams('conv5'),\n conv6: extractConvWithBatchNormParams('conv6'),\n conv7: extractConvWithBatchNormParams('conv7'),\n conv8: extractConvParams('conv8'),\n };\n }\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n return { params, paramMappings };\n}\n", "export interface ITinyYolov2Options {\n inputSize?: number\n scoreThreshold?: number\n}\n\nexport class TinyYolov2Options {\n protected _name: string = 'TinyYolov2Options'\n\n private _inputSize: number\n\n private _scoreThreshold: number\n\n constructor({ inputSize, scoreThreshold }: ITinyYolov2Options = {}) {\n this._inputSize = inputSize || 416;\n this._scoreThreshold = scoreThreshold || 0.5;\n\n if (typeof this._inputSize !== 'number' || this._inputSize % 32 !== 0) {\n throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`);\n }\n\n if (typeof this._scoreThreshold !== 'number' || this._scoreThreshold <= 0 || this._scoreThreshold >= 1) {\n throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`);\n }\n }\n\n get inputSize(): number { return this._inputSize; }\n\n get scoreThreshold(): number { return this._scoreThreshold; }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { BoundingBox } from '../classes/BoundingBox';\nimport { Dimensions } from '../classes/Dimensions';\nimport { ObjectDetection } from '../classes/ObjectDetection';\nimport { convLayer } from '../common/index';\nimport { ConvParams, SeparableConvParams } from '../common/types';\nimport { toNetInput } from '../dom/index';\nimport { NetInput } from '../dom/NetInput';\nimport { TNetInput } from '../dom/types';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { sigmoid } from '../ops/index';\nimport { nonMaxSuppression } from '../ops/nonMaxSuppression';\nimport { normalize } from '../ops/normalize';\nimport { TinyYolov2Config, validateConfig } from './config';\nimport { convWithBatchNorm } from './convWithBatchNorm';\nimport { depthwiseSeparableConv } from './depthwiseSeparableConv';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { leaky } from './leaky';\nimport { ITinyYolov2Options, TinyYolov2Options } from './TinyYolov2Options';\nimport { DefaultTinyYolov2NetParams, MobilenetParams, TinyYolov2NetParams } from './types';\n\nexport class TinyYolov2Base extends NeuralNetwork {\n public static DEFAULT_FILTER_SIZES = [3, 16, 32, 64, 128, 256, 512, 1024, 1024];\n\n private _config: TinyYolov2Config\n\n constructor(config: TinyYolov2Config) {\n super('TinyYolov2');\n validateConfig(config);\n this._config = config;\n }\n\n public get config(): TinyYolov2Config {\n return this._config;\n }\n\n public get withClassScores(): boolean {\n return this.config.withClassScores || this.config.classes.length > 1;\n }\n\n public get boxEncodingSize(): number {\n return 5 + (this.withClassScores ? this.config.classes.length : 0);\n }\n\n public runTinyYolov2(x: tf.Tensor4D, params: DefaultTinyYolov2NetParams): tf.Tensor4D {\n let out = convWithBatchNorm(x, params.conv0);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = convWithBatchNorm(out, params.conv1);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = convWithBatchNorm(out, params.conv2);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = convWithBatchNorm(out, params.conv3);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = convWithBatchNorm(out, params.conv4);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = convWithBatchNorm(out, params.conv5);\n out = tf.maxPool(out, [2, 2], [1, 1], 'same');\n out = convWithBatchNorm(out, params.conv6);\n out = convWithBatchNorm(out, params.conv7);\n return convLayer(out, params.conv8, 'valid', false);\n }\n\n public runMobilenet(x: tf.Tensor4D, params: MobilenetParams): tf.Tensor4D {\n let out = this.config.isFirstLayerConv2d\n ? leaky(convLayer(x, params.conv0 as ConvParams, 'valid', false))\n : depthwiseSeparableConv(x, params.conv0 as SeparableConvParams);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = depthwiseSeparableConv(out, params.conv1);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = depthwiseSeparableConv(out, params.conv2);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = depthwiseSeparableConv(out, params.conv3);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = depthwiseSeparableConv(out, params.conv4);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = depthwiseSeparableConv(out, params.conv5);\n out = tf.maxPool(out, [2, 2], [1, 1], 'same');\n out = params.conv6 ? depthwiseSeparableConv(out, params.conv6) : out;\n out = params.conv7 ? depthwiseSeparableConv(out, params.conv7) : out;\n return convLayer(out, params.conv8, 'valid', false);\n }\n\n public forwardInput(input: NetInput, inputSize: number): tf.Tensor4D {\n const { params } = this;\n\n if (!params) {\n throw new Error('TinyYolov2 - load model before inference');\n }\n\n return tf.tidy(() => {\n let batchTensor = tf.cast(input.toBatchTensor(inputSize, false), 'float32');\n batchTensor = this.config.meanRgb\n ? normalize(batchTensor, this.config.meanRgb)\n : batchTensor;\n batchTensor = batchTensor.div(255) as tf.Tensor4D;\n return this.config.withSeparableConvs\n ? this.runMobilenet(batchTensor, params as MobilenetParams)\n : this.runTinyYolov2(batchTensor, params as DefaultTinyYolov2NetParams);\n });\n }\n\n public async forward(input: TNetInput, inputSize: number): Promise {\n return this.forwardInput(await toNetInput(input), inputSize);\n }\n\n public async detect(input: TNetInput, forwardParams: ITinyYolov2Options = {}): Promise {\n const { inputSize, scoreThreshold } = new TinyYolov2Options(forwardParams);\n const netInput = await toNetInput(input);\n const out = await this.forwardInput(netInput, inputSize);\n const out0 = tf.tidy(() => tf.unstack(out)[0].expandDims()) as tf.Tensor4D;\n const inputDimensions = {\n width: netInput.getInputWidth(0),\n height: netInput.getInputHeight(0),\n };\n\n const results = await this.extractBoxes(out0, netInput.getReshapedInputDimensions(0), scoreThreshold);\n out.dispose();\n out0.dispose();\n\n const boxes = results.map((res) => res.box);\n const scores = results.map((res) => res.score);\n const classScores = results.map((res) => res.classScore);\n const classNames = results.map((res) => this.config.classes[res.label]);\n\n const indices = nonMaxSuppression(\n boxes.map((box) => box.rescale(inputSize)),\n scores,\n this.config.iouThreshold,\n true,\n );\n\n const detections = indices.map((idx) => new ObjectDetection(\n scores[idx],\n classScores[idx],\n classNames[idx],\n boxes[idx],\n inputDimensions,\n ));\n return detections;\n }\n\n protected getDefaultModelName(): string {\n return '';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMap(weightMap, this.config);\n }\n\n protected extractParams(weights: Float32Array) {\n const filterSizes = this.config.filterSizes || TinyYolov2Base.DEFAULT_FILTER_SIZES;\n\n const numFilters = filterSizes ? filterSizes.length : undefined;\n if (numFilters !== 7 && numFilters !== 8 && numFilters !== 9) {\n throw new Error(`TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found ${numFilters} filterSizes in config`);\n }\n return extractParams(weights, this.config, this.boxEncodingSize, filterSizes);\n }\n\n protected async extractBoxes(\n outputTensor: tf.Tensor4D,\n inputBlobDimensions: Dimensions,\n scoreThreshold?: number,\n ) {\n const { width, height } = inputBlobDimensions;\n const inputSize = Math.max(width, height);\n const correctionFactorX = inputSize / width;\n const correctionFactorY = inputSize / height;\n\n const numCells = outputTensor.shape[1];\n const numBoxes = this.config.anchors.length;\n\n const [boxesTensor, scoresTensor, classScoresTensor] = tf.tidy(() => {\n const reshaped = outputTensor.reshape([numCells, numCells, numBoxes, this.boxEncodingSize]);\n\n const boxes = reshaped.slice([0, 0, 0, 0], [numCells, numCells, numBoxes, 4]);\n const scores = reshaped.slice([0, 0, 0, 4], [numCells, numCells, numBoxes, 1]);\n const classScores = this.withClassScores\n ? tf.softmax(reshaped.slice([0, 0, 0, 5], [numCells, numCells, numBoxes, this.config.classes.length]), 3)\n : tf.scalar(0);\n return [boxes, scores, classScores];\n });\n\n const results = [] as any;\n const scoresData = await scoresTensor.array();\n const boxesData = await boxesTensor.array();\n for (let row = 0; row < numCells; row++) {\n for (let col = 0; col < numCells; col++) {\n for (let anchor = 0; anchor < numBoxes; anchor++) {\n const score = sigmoid(scoresData[row][col][anchor][0]);\n if (!scoreThreshold || score > scoreThreshold) {\n const ctX = ((col + sigmoid(boxesData[row][col][anchor][0])) / numCells) * correctionFactorX;\n const ctY = ((row + sigmoid(boxesData[row][col][anchor][1])) / numCells) * correctionFactorY;\n const widthLocal = ((Math.exp(boxesData[row][col][anchor][2]) * this.config.anchors[anchor].x) / numCells) * correctionFactorX;\n const heightLocal = ((Math.exp(boxesData[row][col][anchor][3]) * this.config.anchors[anchor].y) / numCells) * correctionFactorY;\n const x = (ctX - (widthLocal / 2));\n const y = (ctY - (heightLocal / 2));\n const pos = { row, col, anchor };\n const { classScore, label } = this.withClassScores\n ? await this.extractPredictedClass(classScoresTensor as tf.Tensor4D, pos)\n : { classScore: 1, label: 0 };\n results.push({\n box: new BoundingBox(x, y, x + widthLocal, y + heightLocal),\n score,\n classScore: score * classScore,\n label,\n ...pos,\n });\n }\n }\n }\n }\n\n boxesTensor.dispose();\n scoresTensor.dispose();\n classScoresTensor.dispose();\n return results;\n }\n\n private async extractPredictedClass(classesTensor: tf.Tensor4D, pos: { row: number, col: number, anchor: number }) {\n const { row, col, anchor } = pos;\n const classesData = await classesTensor.array();\n return Array(this.config.classes.length).fill(0)\n .map((_, i) => classesData[row][col][anchor][i])\n .map((classScore, label) => ({\n classScore,\n label,\n }))\n .reduce((max, curr) => (max.classScore > curr.classScore ? max : curr));\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { FaceDetection, Point } from '../classes/index';\nimport { ParamMapping } from '../common/types';\nimport { TNetInput } from '../dom/types';\nimport {\n BOX_ANCHORS,\n BOX_ANCHORS_SEPARABLE,\n DEFAULT_MODEL_NAME,\n DEFAULT_MODEL_NAME_SEPARABLE_CONV,\n IOU_THRESHOLD,\n MEAN_RGB_SEPARABLE,\n} from './const';\nimport { TinyYolov2Base } from './TinyYolov2Base';\nimport { ITinyYolov2Options } from './TinyYolov2Options';\nimport { TinyYolov2NetParams } from './types';\n\nexport class TinyYolov2 extends TinyYolov2Base {\n constructor(withSeparableConvs: boolean = true) {\n const config = {\n withSeparableConvs,\n iouThreshold: IOU_THRESHOLD,\n classes: ['face'],\n ...(withSeparableConvs\n ? {\n anchors: BOX_ANCHORS_SEPARABLE,\n meanRgb: MEAN_RGB_SEPARABLE,\n }\n : {\n anchors: BOX_ANCHORS,\n withClassScores: true,\n }),\n };\n\n super(config);\n }\n\n public get withSeparableConvs(): boolean {\n return this.config.withSeparableConvs;\n }\n\n public get anchors(): Point[] {\n return this.config.anchors;\n }\n\n public async locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise {\n const objectDetections = await this.detect(input, forwardParams);\n return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight }));\n }\n\n protected getDefaultModelName(): string {\n return this.withSeparableConvs ? DEFAULT_MODEL_NAME_SEPARABLE_CONV : DEFAULT_MODEL_NAME;\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {\n return super.extractParamsFromWeightMap(weightMap);\n }\n}\n", "import { TinyYolov2 } from './TinyYolov2';\n\nexport * from './TinyYolov2Options';\nexport * from './config';\nexport * from './types';\nexport { TinyYolov2 };\n\nexport function createTinyYolov2(weights: Float32Array, withSeparableConvs: boolean = true) {\n const net = new TinyYolov2(withSeparableConvs);\n net.extractWeights(weights);\n return net;\n}\n", "import { ITinyYolov2Options, TinyYolov2Options } from '../tinyYolov2/index';\n\nexport interface ITinyFaceDetectorOptions extends ITinyYolov2Options {}\n\nexport class TinyFaceDetectorOptions extends TinyYolov2Options {\n protected _name: string = 'TinyFaceDetectorOptions'\n}\n", "export class ComposableTask {\n // eslint-disable-next-line no-unused-vars\n public async then(onfulfilled: (value: T) => T | PromiseLike): Promise {\n return onfulfilled(await this.run());\n }\n\n public async run(): Promise {\n throw new Error('ComposableTask - run is not implemented');\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { extractFaces, extractFaceTensors, TNetInput } from '../dom/index';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { isWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';\n\nexport async function extractAllFacesAndComputeResults, TResult>(\n parentResults: TSource[],\n input: TNetInput,\n // eslint-disable-next-line no-unused-vars\n computeResults: (faces: Array) => Promise,\n extractedFaces?: Array | null,\n // eslint-disable-next-line no-unused-vars\n getRectForAlignment: (parentResult: WithFaceLandmarks) => FaceDetection = ({ alignedRect }) => alignedRect,\n) {\n const faceBoxes = parentResults.map((parentResult) => (isWithFaceLandmarks(parentResult)\n ? getRectForAlignment(parentResult)\n : parentResult.detection));\n const faces: Array = extractedFaces || (\n input instanceof tf.Tensor\n ? await extractFaceTensors(input, faceBoxes)\n : await extractFaces(input, faceBoxes)\n );\n const results = await computeResults(faces);\n faces.forEach((f) => f instanceof tf.Tensor && f.dispose());\n return results;\n}\n\nexport async function extractSingleFaceAndComputeResult, TResult>(\n parentResult: TSource,\n input: TNetInput,\n // eslint-disable-next-line no-unused-vars\n computeResult: (face: HTMLCanvasElement | tf.Tensor3D) => Promise,\n extractedFaces?: Array | null,\n // eslint-disable-next-line no-unused-vars\n getRectForAlignment?: (parentResultLocal: WithFaceLandmarks) => FaceDetection,\n) {\n return extractAllFacesAndComputeResults(\n [parentResult],\n input,\n async (faces) => computeResult(faces[0]),\n extractedFaces,\n getRectForAlignment,\n );\n}\n", "import { Point } from '../classes/index';\n\nexport const IOU_THRESHOLD = 0.4;\n\nexport const BOX_ANCHORS = [\n new Point(1.603231, 2.094468),\n new Point(6.041143, 7.080126),\n new Point(2.882459, 3.518061),\n new Point(4.266906, 5.178857),\n new Point(9.041765, 10.66308),\n];\n\nexport const MEAN_RGB: [number, number, number] = [117.001, 114.697, 97.404];\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { FaceDetection, Point } from '../classes/index';\nimport { ParamMapping } from '../common/index';\nimport { TNetInput } from '../dom/index';\nimport { ITinyYolov2Options } from '../tinyYolov2/index';\nimport { TinyYolov2Base } from '../tinyYolov2/TinyYolov2Base';\nimport { TinyYolov2NetParams } from '../tinyYolov2/types';\nimport { BOX_ANCHORS, IOU_THRESHOLD, MEAN_RGB } from './const';\n\nexport class TinyFaceDetector extends TinyYolov2Base {\n constructor() {\n const config = {\n withSeparableConvs: true,\n iouThreshold: IOU_THRESHOLD,\n classes: ['face'],\n anchors: BOX_ANCHORS,\n meanRgb: MEAN_RGB,\n isFirstLayerConv2d: true,\n filterSizes: [3, 16, 32, 64, 128, 256, 512],\n };\n\n super(config);\n }\n\n public get anchors(): Point[] {\n return this.config.anchors;\n }\n\n public async locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise {\n const objectDetections = await this.detect(input, forwardParams);\n return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight }));\n }\n\n protected getDefaultModelName(): string {\n return 'tiny_face_detector_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {\n return super.extractParamsFromWeightMap(weightMap);\n }\n}\n", "import { AgeGenderNet } from '../ageGenderNet/AgeGenderNet';\nimport { AgeAndGenderPrediction } from '../ageGenderNet/types';\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { FaceLandmarks68 } from '../classes/FaceLandmarks68';\nimport { TNetInput } from '../dom/index';\nimport { FaceExpressionNet } from '../faceExpressionNet/FaceExpressionNet';\nimport { FaceExpressions } from '../faceExpressionNet/FaceExpressions';\nimport { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';\nimport { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';\nimport { FaceRecognitionNet } from '../faceRecognitionNet/FaceRecognitionNet';\nimport { SsdMobilenetv1 } from '../ssdMobilenetv1/SsdMobilenetv1';\nimport { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';\nimport { TinyFaceDetector } from '../tinyFaceDetector/TinyFaceDetector';\nimport { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions';\nimport { ITinyYolov2Options, TinyYolov2 } from '../tinyYolov2/index';\n\nexport const nets = {\n ssdMobilenetv1: new SsdMobilenetv1(),\n tinyFaceDetector: new TinyFaceDetector(),\n tinyYolov2: new TinyYolov2(),\n faceLandmark68Net: new FaceLandmark68Net(),\n faceLandmark68TinyNet: new FaceLandmark68TinyNet(),\n faceRecognitionNet: new FaceRecognitionNet(),\n faceExpressionNet: new FaceExpressionNet(),\n ageGenderNet: new AgeGenderNet(),\n};\n\n/**\n * Attempts to detect all faces in an image using SSD Mobilenetv1 Network.\n *\n * @param input The input image.\n * @param options (optional, default: see SsdMobilenetv1Options constructor for default parameters).\n * @returns Bounding box of each face with score.\n */\nexport const ssdMobilenetv1 = (input: TNetInput, options: SsdMobilenetv1Options): Promise => nets.ssdMobilenetv1.locateFaces(input, options);\n\n/**\n * Attempts to detect all faces in an image using the Tiny Face Detector.\n *\n * @param input The input image.\n * @param options (optional, default: see TinyFaceDetectorOptions constructor for default parameters).\n * @returns Bounding box of each face with score.\n */\nexport const tinyFaceDetector = (input: TNetInput, options: TinyFaceDetectorOptions): Promise => nets.tinyFaceDetector.locateFaces(input, options);\n\n/**\n * Attempts to detect all faces in an image using the Tiny Yolov2 Network.\n *\n * @param input The input image.\n * @param options (optional, default: see TinyYolov2Options constructor for default parameters).\n * @returns Bounding box of each face with score.\n */\nexport const tinyYolov2 = (input: TNetInput, options: ITinyYolov2Options): Promise => nets.tinyYolov2.locateFaces(input, options);\n\n/**\n * Detects the 68 point face landmark positions of the face shown in an image.\n *\n * @param inputs The face image extracted from the bounding box of a face. Can\n * also be an array of input images, which will be batch processed.\n * @returns 68 point face landmarks or array thereof in case of batch input.\n */\nexport const detectFaceLandmarks = (input: TNetInput): Promise => nets.faceLandmark68Net.detectLandmarks(input);\n\n/**\n * Detects the 68 point face landmark positions of the face shown in an image\n * using a tinier version of the 68 point face landmark model, which is slightly\n * faster at inference, but also slightly less accurate.\n *\n * @param inputs The face image extracted from the bounding box of a face. Can\n * also be an array of input images, which will be batch processed.\n * @returns 68 point face landmarks or array thereof in case of batch input.\n */\nexport const detectFaceLandmarksTiny = (input: TNetInput): Promise => nets.faceLandmark68TinyNet.detectLandmarks(input);\n\n/**\n * Computes a 128 entry vector (face descriptor / face embeddings) from the face shown in an image,\n * which uniquely represents the features of that persons face. The computed face descriptor can\n * be used to measure the similarity between faces, by computing the euclidean distance of two\n * face descriptors.\n *\n * @param inputs The face image extracted from the aligned bounding box of a face. Can\n * also be an array of input images, which will be batch processed.\n * @returns Face descriptor with 128 entries or array thereof in case of batch input.\n */\nexport const computeFaceDescriptor = (input: TNetInput): Promise => nets.faceRecognitionNet.computeFaceDescriptor(input);\n\n/**\n * Recognizes the facial expressions from a face image.\n *\n * @param inputs The face image extracted from the bounding box of a face. Can\n * also be an array of input images, which will be batch processed.\n * @returns Facial expressions with corresponding probabilities or array thereof in case of batch input.\n */\nexport const recognizeFaceExpressions = (input: TNetInput): Promise => nets.faceExpressionNet.predictExpressions(input);\n\n/**\n * Predicts age and gender from a face image.\n *\n * @param inputs The face image extracted from the bounding box of a face. Can\n * also be an array of input images, which will be batch processed.\n * @returns Predictions with age, gender and gender probability or array thereof in case of batch input.\n */\nexport const predictAgeAndGender = (input: TNetInput): Promise => nets.ageGenderNet.predictAgeAndGender(input);\n\nexport const loadSsdMobilenetv1Model = (url: string) => nets.ssdMobilenetv1.load(url);\nexport const loadTinyFaceDetectorModel = (url: string) => nets.tinyFaceDetector.load(url);\nexport const loadTinyYolov2Model = (url: string) => nets.tinyYolov2.load(url);\nexport const loadFaceLandmarkModel = (url: string) => nets.faceLandmark68Net.load(url);\nexport const loadFaceLandmarkTinyModel = (url: string) => nets.faceLandmark68TinyNet.load(url);\nexport const loadFaceRecognitionModel = (url: string) => nets.faceRecognitionNet.load(url);\nexport const loadFaceExpressionModel = (url: string) => nets.faceExpressionNet.load(url);\nexport const loadAgeGenderModel = (url: string) => nets.ageGenderNet.load(url);\n\n// backward compatibility\nexport const loadFaceDetectionModel = loadSsdMobilenetv1Model;\nexport const locateFaces = ssdMobilenetv1;\nexport const detectLandmarks = detectFaceLandmarks;\n", "/* eslint-disable max-classes-per-file */\nimport * as tf from '../../dist/tfjs.esm';\n\nimport { TNetInput } from '../dom/index';\nimport { FaceExpressions } from '../faceExpressionNet/FaceExpressions';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { extendWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';\nimport { WithFaceLandmarks } from '../factories/WithFaceLandmarks';\nimport { ComposableTask } from './ComposableTask';\nimport { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';\nimport { extractAllFacesAndComputeResults, extractSingleFaceAndComputeResult } from './extractFacesAndComputeResults';\nimport { nets } from './nets';\nimport { PredictAllAgeAndGenderTask, PredictAllAgeAndGenderWithFaceAlignmentTask, PredictSingleAgeAndGenderTask, PredictSingleAgeAndGenderWithFaceAlignmentTask } from './PredictAgeAndGenderTask';\n\nexport class PredictFaceExpressionsTaskBase extends ComposableTask {\n constructor(\n // eslint-disable-next-line no-unused-vars\n protected parentTask: ComposableTask | Promise,\n // eslint-disable-next-line no-unused-vars\n protected input: TNetInput,\n // eslint-disable-next-line no-unused-vars\n protected extractedFaces?: Array,\n ) {\n super();\n }\n}\n\nexport class PredictAllFaceExpressionsTask> extends PredictFaceExpressionsTaskBase[], TSource[]> {\n public async run(): Promise[]> {\n const parentResults = await this.parentTask;\n\n const faceExpressionsByFace = await extractAllFacesAndComputeResults(\n parentResults,\n this.input,\n async (faces) => Promise.all(\n faces.map((face) => nets.faceExpressionNet.predictExpressions(face) as Promise),\n ),\n this.extractedFaces,\n );\n\n return parentResults.map(\n (parentResult, i) => extendWithFaceExpressions(parentResult, faceExpressionsByFace[i]),\n );\n }\n\n withAgeAndGender() {\n return new PredictAllAgeAndGenderTask(this, this.input);\n }\n}\n\nexport class PredictSingleFaceExpressionsTask> extends PredictFaceExpressionsTaskBase | undefined, TSource | undefined> {\n public async run(): Promise | undefined> {\n const parentResult = await this.parentTask;\n if (!parentResult) {\n return undefined;\n }\n\n const faceExpressions = await extractSingleFaceAndComputeResult(\n parentResult,\n this.input,\n (face) => nets.faceExpressionNet.predictExpressions(face) as Promise,\n this.extractedFaces,\n );\n\n return extendWithFaceExpressions(parentResult, faceExpressions);\n }\n\n withAgeAndGender() {\n return new PredictSingleAgeAndGenderTask(this, this.input);\n }\n}\n\nexport class PredictAllFaceExpressionsWithFaceAlignmentTask>> extends PredictAllFaceExpressionsTask {\n withAgeAndGender() {\n return new PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptors() {\n return new ComputeAllFaceDescriptorsTask(this, this.input);\n }\n}\n\nexport class PredictSingleFaceExpressionsWithFaceAlignmentTask>> extends PredictSingleFaceExpressionsTask {\n withAgeAndGender() {\n return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptor() {\n return new ComputeSingleFaceDescriptorTask(this, this.input);\n }\n}\n", "/* eslint-disable max-classes-per-file */\nimport * as tf from '../../dist/tfjs.esm';\n\nimport { AgeAndGenderPrediction } from '../ageGenderNet/types';\nimport { TNetInput } from '../dom/index';\nimport { extendWithAge, WithAge } from '../factories/WithAge';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { WithFaceLandmarks } from '../factories/WithFaceLandmarks';\nimport { extendWithGender, WithGender } from '../factories/WithGender';\nimport { ComposableTask } from './ComposableTask';\nimport { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';\nimport { extractAllFacesAndComputeResults, extractSingleFaceAndComputeResult } from './extractFacesAndComputeResults';\nimport { nets } from './nets';\nimport { PredictAllFaceExpressionsTask, PredictAllFaceExpressionsWithFaceAlignmentTask, PredictSingleFaceExpressionsTask, PredictSingleFaceExpressionsWithFaceAlignmentTask } from './PredictFaceExpressionsTask';\n\nexport class PredictAgeAndGenderTaskBase extends ComposableTask {\n constructor(\n // eslint-disable-next-line no-unused-vars\n protected parentTask: ComposableTask | Promise,\n // eslint-disable-next-line no-unused-vars\n protected input: TNetInput,\n // eslint-disable-next-line no-unused-vars\n protected extractedFaces?: Array,\n ) {\n super();\n }\n}\n\nexport class PredictAllAgeAndGenderTask> extends PredictAgeAndGenderTaskBase>[], TSource[]> {\n public async run(): Promise>[]> {\n const parentResults = await this.parentTask;\n const ageAndGenderByFace = await extractAllFacesAndComputeResults(\n parentResults,\n this.input,\n async (faces) => Promise.all(faces.map((face) => nets.ageGenderNet.predictAgeAndGender(face) as Promise)),\n this.extractedFaces,\n );\n return parentResults.map((parentResult, i) => {\n const { age, gender, genderProbability } = ageAndGenderByFace[i];\n return extendWithAge(extendWithGender(parentResult, gender, genderProbability), age);\n });\n }\n\n withFaceExpressions() {\n return new PredictAllFaceExpressionsTask(this, this.input);\n }\n}\n\nexport class PredictSingleAgeAndGenderTask> extends PredictAgeAndGenderTaskBase> | undefined, TSource | undefined> {\n public async run(): Promise> | undefined> {\n const parentResult = await this.parentTask;\n if (!parentResult) return undefined;\n const { age, gender, genderProbability } = await extractSingleFaceAndComputeResult(\n parentResult,\n this.input,\n (face) => nets.ageGenderNet.predictAgeAndGender(face) as Promise,\n this.extractedFaces,\n );\n return extendWithAge(extendWithGender(parentResult, gender, genderProbability), age);\n }\n\n withFaceExpressions() {\n return new PredictSingleFaceExpressionsTask(this, this.input);\n }\n}\n\nexport class PredictAllAgeAndGenderWithFaceAlignmentTask>> extends PredictAllAgeAndGenderTask {\n withFaceExpressions() {\n return new PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptors() {\n return new ComputeAllFaceDescriptorsTask(this, this.input);\n }\n}\n\nexport class PredictSingleAgeAndGenderWithFaceAlignmentTask>> extends PredictSingleAgeAndGenderTask {\n withFaceExpressions() {\n return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptor() {\n return new ComputeSingleFaceDescriptorTask(this, this.input);\n }\n}\n", "/* eslint-disable max-classes-per-file */\nimport { TNetInput } from '../dom/index';\nimport { extendWithFaceDescriptor, WithFaceDescriptor } from '../factories/WithFaceDescriptor';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { WithFaceLandmarks } from '../factories/WithFaceLandmarks';\nimport { ComposableTask } from './ComposableTask';\nimport { extractAllFacesAndComputeResults, extractSingleFaceAndComputeResult } from './extractFacesAndComputeResults';\nimport { nets } from './nets';\nimport { PredictAllAgeAndGenderWithFaceAlignmentTask, PredictSingleAgeAndGenderWithFaceAlignmentTask } from './PredictAgeAndGenderTask';\nimport { PredictAllFaceExpressionsWithFaceAlignmentTask, PredictSingleFaceExpressionsWithFaceAlignmentTask } from './PredictFaceExpressionsTask';\n\nexport class ComputeFaceDescriptorsTaskBase extends ComposableTask {\n constructor(\n // eslint-disable-next-line no-unused-vars\n protected parentTask: ComposableTask | Promise,\n // eslint-disable-next-line no-unused-vars\n protected input: TNetInput,\n ) {\n super();\n }\n}\n\nexport class ComputeAllFaceDescriptorsTask>> extends ComputeFaceDescriptorsTaskBase[], TSource[]> {\n public async run(): Promise[]> {\n const parentResults = await this.parentTask;\n const descriptors = await extractAllFacesAndComputeResults(\n parentResults,\n this.input,\n (faces) => Promise.all(faces.map((face) => nets.faceRecognitionNet.computeFaceDescriptor(face) as Promise)),\n null,\n (parentResult) => parentResult.landmarks.align(null, { useDlibAlignment: true }),\n );\n return descriptors.map((descriptor, i) => extendWithFaceDescriptor(parentResults[i], descriptor));\n }\n\n withFaceExpressions() {\n return new PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withAgeAndGender() {\n return new PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n}\n\nexport class ComputeSingleFaceDescriptorTask>> extends ComputeFaceDescriptorsTaskBase | undefined, TSource | undefined> {\n public async run(): Promise | undefined> {\n const parentResult = await this.parentTask;\n if (!parentResult) {\n return undefined;\n }\n const descriptor = await extractSingleFaceAndComputeResult(\n parentResult,\n this.input,\n (face) => nets.faceRecognitionNet.computeFaceDescriptor(face) as Promise,\n null,\n // eslint-disable-next-line no-shadow\n (parentResult) => parentResult.landmarks.align(null, { useDlibAlignment: true }),\n );\n\n return extendWithFaceDescriptor(parentResult, descriptor);\n }\n\n withFaceExpressions() {\n return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withAgeAndGender() {\n return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n}\n", "/* eslint-disable max-classes-per-file */\nimport * as tf from '../../dist/tfjs.esm';\n\nimport { FaceLandmarks68 } from '../classes/FaceLandmarks68';\nimport { extractFaces, extractFaceTensors, TNetInput } from '../dom/index';\nimport { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';\nimport { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { extendWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';\nimport { ComposableTask } from './ComposableTask';\nimport { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';\nimport { nets } from './nets';\nimport { PredictAllAgeAndGenderWithFaceAlignmentTask, PredictSingleAgeAndGenderWithFaceAlignmentTask } from './PredictAgeAndGenderTask';\nimport { PredictAllFaceExpressionsWithFaceAlignmentTask, PredictSingleFaceExpressionsWithFaceAlignmentTask } from './PredictFaceExpressionsTask';\n\nexport class DetectFaceLandmarksTaskBase extends ComposableTask {\n constructor(\n // eslint-disable-next-line no-unused-vars\n protected parentTask: ComposableTask | Promise,\n // eslint-disable-next-line no-unused-vars\n protected input: TNetInput,\n // eslint-disable-next-line no-unused-vars\n protected useTinyLandmarkNet: boolean,\n ) {\n super();\n }\n\n protected get landmarkNet(): FaceLandmark68Net | FaceLandmark68TinyNet {\n return this.useTinyLandmarkNet\n ? nets.faceLandmark68TinyNet\n : nets.faceLandmark68Net;\n }\n}\n\nexport class DetectAllFaceLandmarksTask> extends DetectFaceLandmarksTaskBase[], TSource[]> {\n public async run(): Promise[]> {\n const parentResults = await this.parentTask;\n const detections = parentResults.map((res) => res.detection);\n const faces: Array = this.input instanceof tf.Tensor\n ? await extractFaceTensors(this.input, detections)\n : await extractFaces(this.input, detections);\n const faceLandmarksByFace = await Promise.all(\n faces.map((face) => this.landmarkNet.detectLandmarks(face)),\n ) as FaceLandmarks68[];\n faces.forEach((f) => f instanceof tf.Tensor && f.dispose());\n return parentResults.map((parentResult, i) => extendWithFaceLandmarks(parentResult, faceLandmarksByFace[i]));\n }\n\n withFaceExpressions() {\n return new PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withAgeAndGender() {\n return new PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptors() {\n return new ComputeAllFaceDescriptorsTask(this, this.input);\n }\n}\n\nexport class DetectSingleFaceLandmarksTask> extends DetectFaceLandmarksTaskBase | undefined, TSource | undefined> {\n public async run(): Promise | undefined> {\n const parentResult = await this.parentTask;\n if (!parentResult) {\n return undefined;\n }\n const { detection } = parentResult;\n const faces: Array = this.input instanceof tf.Tensor\n ? await extractFaceTensors(this.input, [detection])\n : await extractFaces(this.input, [detection]);\n const landmarks = await this.landmarkNet.detectLandmarks(faces[0]) as FaceLandmarks68;\n faces.forEach((f) => f instanceof tf.Tensor && f.dispose());\n return extendWithFaceLandmarks(parentResult, landmarks);\n }\n\n withFaceExpressions() {\n return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withAgeAndGender() {\n return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptor() {\n return new ComputeSingleFaceDescriptorTask(this, this.input);\n }\n}\n", "/* eslint-disable max-classes-per-file */\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { TNetInput } from '../dom/index';\nimport { extendWithFaceDetection, WithFaceDetection } from '../factories/WithFaceDetection';\nimport { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';\nimport { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions';\nimport { TinyYolov2Options } from '../tinyYolov2/index';\nimport { ComposableTask } from './ComposableTask';\nimport { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks';\nimport { nets } from './nets';\nimport { PredictAllAgeAndGenderTask, PredictSingleAgeAndGenderTask } from './PredictAgeAndGenderTask';\nimport { PredictAllFaceExpressionsTask, PredictSingleFaceExpressionsTask } from './PredictFaceExpressionsTask';\nimport { FaceDetectionOptions } from './types';\n\nexport class DetectFacesTaskBase extends ComposableTask {\n // eslint-disable-next-line no-unused-vars\n constructor(protected input: TNetInput, protected options: FaceDetectionOptions = new SsdMobilenetv1Options()) {\n super();\n }\n}\n\nexport class DetectAllFacesTask extends DetectFacesTaskBase {\n public async run(): Promise {\n const { input, options } = this;\n let result;\n if (options instanceof TinyFaceDetectorOptions) result = nets.tinyFaceDetector.locateFaces(input, options);\n else if (options instanceof SsdMobilenetv1Options) result = nets.ssdMobilenetv1.locateFaces(input, options);\n else if (options instanceof TinyYolov2Options) result = nets.tinyYolov2.locateFaces(input, options);\n else throw new Error('detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | TinyYolov2Options');\n return result;\n }\n\n private runAndExtendWithFaceDetections(): Promise[]> {\n return new Promise[]>((resolve, reject) => {\n this.run()\n .then((detections) => resolve(detections.map((detection) => extendWithFaceDetection({}, detection))))\n .catch((err) => reject(err));\n });\n }\n\n withFaceLandmarks(useTinyLandmarkNet: boolean = false) {\n return new DetectAllFaceLandmarksTask(\n this.runAndExtendWithFaceDetections(),\n this.input,\n useTinyLandmarkNet,\n );\n }\n\n withFaceExpressions() {\n return new PredictAllFaceExpressionsTask(\n this.runAndExtendWithFaceDetections(),\n this.input,\n );\n }\n\n withAgeAndGender() {\n return new PredictAllAgeAndGenderTask(\n this.runAndExtendWithFaceDetections(),\n this.input,\n );\n }\n}\n\nexport class DetectSingleFaceTask extends DetectFacesTaskBase {\n public async run(): Promise {\n const faceDetections = await new DetectAllFacesTask(this.input, this.options);\n let faceDetectionWithHighestScore = faceDetections[0];\n faceDetections.forEach((faceDetection) => {\n if (faceDetection.score > faceDetectionWithHighestScore.score) faceDetectionWithHighestScore = faceDetection;\n });\n return faceDetectionWithHighestScore;\n }\n\n private runAndExtendWithFaceDetection(): Promise | undefined> {\n // eslint-disable-next-line no-async-promise-executor\n return new Promise | undefined>(async (resolve) => {\n const detection = await this.run();\n resolve(detection ? extendWithFaceDetection<{}>({}, detection) : undefined);\n });\n }\n\n withFaceLandmarks(useTinyLandmarkNet: boolean = false) {\n return new DetectSingleFaceLandmarksTask(\n this.runAndExtendWithFaceDetection(),\n this.input,\n useTinyLandmarkNet,\n );\n }\n\n withFaceExpressions() {\n return new PredictSingleFaceExpressionsTask(\n this.runAndExtendWithFaceDetection(),\n this.input,\n );\n }\n\n withAgeAndGender() {\n return new PredictSingleAgeAndGenderTask(\n this.runAndExtendWithFaceDetection(),\n this.input,\n );\n }\n}\n", "import { TNetInput } from '../dom/index';\nimport { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';\nimport { DetectAllFacesTask, DetectSingleFaceTask } from './DetectFacesTasks';\nimport { FaceDetectionOptions } from './types';\n\nexport function detectSingleFace(input: TNetInput, options: FaceDetectionOptions = new SsdMobilenetv1Options()): DetectSingleFaceTask {\n return new DetectSingleFaceTask(input, options);\n}\n\nexport function detectAllFaces(input: TNetInput, options: FaceDetectionOptions = new SsdMobilenetv1Options()): DetectAllFacesTask {\n return new DetectAllFacesTask(input, options);\n}\n", "import { TNetInput } from '../dom/index';\nimport { WithFaceDescriptor, WithFaceDetection, WithFaceLandmarks } from '../factories/index';\nimport { SsdMobilenetv1Options } from '../ssdMobilenetv1/index';\nimport { ITinyYolov2Options, TinyYolov2Options } from '../tinyYolov2/index';\nimport { detectAllFaces } from './detectFaces';\n\nexport async function allFacesSsdMobilenetv1(input: TNetInput, minConfidence?: number): Promise>>[]> {\n return detectAllFaces(input, new SsdMobilenetv1Options(minConfidence ? { minConfidence } : {}))\n .withFaceLandmarks()\n .withFaceDescriptors();\n}\n\nexport async function allFacesTinyYolov2(input: TNetInput, forwardParams: ITinyYolov2Options = {}): Promise>>[]> {\n return detectAllFaces(input, new TinyYolov2Options(forwardParams))\n .withFaceLandmarks()\n .withFaceDescriptors();\n}\n\nexport const allFaces = allFacesSsdMobilenetv1;\n", "export function euclideanDistance(arr1: number[] | Float32Array, arr2: number[] | Float32Array) {\n if (arr1.length !== arr2.length) throw new Error('euclideanDistance: arr1.length !== arr2.length');\n\n const desc1 = Array.from(arr1);\n const desc2 = Array.from(arr2);\n\n return Math.sqrt(\n desc1\n .map((val, i) => val - desc2[i])\n .reduce((res, diff) => res + (diff ** 2), 0),\n );\n}\n", "import { FaceMatch } from '../classes/FaceMatch';\nimport { LabeledFaceDescriptors } from '../classes/LabeledFaceDescriptors';\nimport { euclideanDistance } from '../euclideanDistance';\nimport { WithFaceDescriptor } from '../factories/index';\n\nexport class FaceMatcher {\n private _labeledDescriptors: LabeledFaceDescriptors[]\n\n private _distanceThreshold: number\n\n constructor(\n inputs: LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>,\n distanceThreshold: number = 0.6,\n ) {\n this._distanceThreshold = distanceThreshold;\n\n const inputArray = Array.isArray(inputs) ? inputs : [inputs];\n\n if (!inputArray.length) {\n throw new Error('FaceRecognizer.constructor - expected atleast one input');\n }\n\n let count = 1;\n const createUniqueLabel = () => `person ${count++}`;\n\n this._labeledDescriptors = inputArray.map((desc) => {\n if (desc instanceof LabeledFaceDescriptors) {\n return desc;\n }\n\n if (desc instanceof Float32Array) {\n return new LabeledFaceDescriptors(createUniqueLabel(), [desc]);\n }\n\n if (desc.descriptor && desc.descriptor instanceof Float32Array) {\n return new LabeledFaceDescriptors(createUniqueLabel(), [desc.descriptor]);\n }\n\n throw new Error('FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>');\n });\n }\n\n public get labeledDescriptors(): LabeledFaceDescriptors[] { return this._labeledDescriptors; }\n\n public get distanceThreshold(): number { return this._distanceThreshold; }\n\n public computeMeanDistance(queryDescriptor: Float32Array, descriptors: Float32Array[]): number {\n return descriptors\n .map((d) => euclideanDistance(d, queryDescriptor))\n .reduce((d1, d2) => d1 + d2, 0)\n / (descriptors.length || 1);\n }\n\n public matchDescriptor(queryDescriptor: Float32Array): FaceMatch {\n return this.labeledDescriptors\n .map(({ descriptors, label }) => new FaceMatch(\n label,\n this.computeMeanDistance(queryDescriptor, descriptors),\n ))\n .reduce((best, curr) => (best.distance < curr.distance ? best : curr));\n }\n\n public findBestMatch(queryDescriptor: Float32Array): FaceMatch {\n const bestMatch = this.matchDescriptor(queryDescriptor);\n return bestMatch.distance < this.distanceThreshold\n ? bestMatch\n : new FaceMatch('unknown', bestMatch.distance);\n }\n\n public toJSON(): any {\n return {\n distanceThreshold: this.distanceThreshold,\n labeledDescriptors: this.labeledDescriptors.map((ld) => ld.toJSON()),\n };\n }\n\n public static fromJSON(json: any): FaceMatcher {\n const labeledDescriptors = json.labeledDescriptors\n .map((ld: any) => LabeledFaceDescriptors.fromJSON(ld));\n return new FaceMatcher(labeledDescriptors, json.distanceThreshold);\n }\n}\n", "import { TinyFaceDetector } from './TinyFaceDetector';\n\nexport * from './TinyFaceDetector';\nexport * from './TinyFaceDetectorOptions';\n\nexport function createTinyFaceDetector(weights: Float32Array) {\n const net = new TinyFaceDetector();\n net.extractWeights(weights);\n return net;\n}\n", "import { Dimensions, IDimensions } from './classes/index';\nimport { FaceDetection } from './classes/FaceDetection';\nimport { FaceLandmarks } from './classes/FaceLandmarks';\nimport { extendWithFaceDetection, isWithFaceDetection } from './factories/WithFaceDetection';\nimport { extendWithFaceLandmarks, isWithFaceLandmarks } from './factories/WithFaceLandmarks';\n\nexport function resizeResults(results: T, dimensions: IDimensions): T {\n const { width, height } = new Dimensions(dimensions.width, dimensions.height);\n\n if (width <= 0 || height <= 0) {\n throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({ width, height })}`);\n }\n\n if (Array.isArray(results)) {\n // return results.map(obj => resizeResults(obj, { width, height })) as any as T\n return (results as Array).map((obj) => resizeResults(obj, { width, height } as IDimensions)) as any as T;\n }\n\n if (isWithFaceLandmarks(results)) {\n const resizedDetection = results.detection.forSize(width, height);\n const resizedLandmarks = results.unshiftedLandmarks.forSize(resizedDetection.box.width, resizedDetection.box.height);\n return extendWithFaceLandmarks(extendWithFaceDetection(results, resizedDetection), resizedLandmarks);\n }\n\n if (isWithFaceDetection(results)) {\n return extendWithFaceDetection(results, results.detection.forSize(width, height));\n }\n\n if (results instanceof FaceLandmarks || results instanceof FaceDetection) {\n return (results as any).forSize(width, height);\n }\n\n return results;\n}\n", "import * as tf from '../dist/tfjs.esm';\nimport * as draw from './draw/index';\nimport * as utils from './utils/index';\nimport * as pkg from '../package.json';\n\nexport { tf, draw, utils };\n\nexport * from './ageGenderNet/index';\nexport * from './classes/index';\nexport * from './dom/index';\nexport * from './env/index';\nexport * from './faceExpressionNet/index';\nexport * from './faceLandmarkNet/index';\nexport * from './faceRecognitionNet/index';\nexport * from './factories/index';\nexport * from './globalApi/index';\nexport * from './ops/index';\nexport * from './ssdMobilenetv1/index';\nexport * from './tinyFaceDetector/index';\nexport * from './tinyYolov2/index';\nexport * from './euclideanDistance';\nexport * from './NeuralNetwork';\nexport * from './resizeResults';\n\nconst node = (typeof process !== 'undefined');\nconst browser = (typeof navigator !== 'undefined') && (typeof navigator.userAgent !== 'undefined');\nexport const version = { faceapi: pkg.version as string, node, browser };\n"], - "mappings": ";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAKA;AACA;AADA;AACA;;;ACNA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACEO,qBACL,KACA,QACA,WAAoB,OACpB;AACA,MAAI;AAEJ,SAAO,MAAM,GAAG,QAAQ,CAAC,CAAE,GAAG,IAAK,YAAY;AAC7C,UAAM,OAAO,OAAO;AACpB,QAAI,OAAO,KAAK,GAAG,KAAK;AACxB,QAAI,OAAO,GAAG;AAAA;AAGhB,MAAI,UAAU;AACZ,UAAM,OAAO,OAAO,OAAO,SAAS;AACpC,UAAM,KAAK,OAAO;AAClB,QAAI,CAAC,QAAQ,CAAC,IAAI;AAChB;AAAA;AAGF,QAAI,OAAO,KAAK,GAAG,KAAK;AACxB,QAAI,OAAO,GAAG,GAAG,GAAG;AAAA;AAGtB,MAAI;AAAA;;;AC1BN;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACOO,uBAAwC;AAAA,EAK7C,YAAY,OAAe,QAAgB;AACzC,QAAI,CAAC,cAAc,UAAU,CAAC,cAAc,SAAS;AACnD,YAAM,IAAI,MAAM,wFAAwF,KAAK,UAAU,CAAE,OAAO;AAAA;AAGlI,SAAK,SAAS;AACd,SAAK,UAAU;AAAA;AAAA,MAGN,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,SAAiB;AAAE,WAAO,KAAK;AAAA;AAAA,EAEnC,UAAsB;AAC3B,WAAO,IAAI,WAAW,IAAI,KAAK,OAAO,IAAI,KAAK;AAAA;AAAA;;;ADrB5C,kBAAkB,SAAa,KAAa;AACjD,SAAO,mBAAqB,2BAAU,QAAO,MAAM,WAAW;AAAA;AAGzD,oBAAoB,SAAoC;AAC7D,SAAO,SAAS,SAAQ;AAAA;AAGnB,oBAAoB,SAAoC;AAC7D,SAAO,SAAS,SAAQ;AAAA;AAGnB,oBAAoB,SAAoC;AAC7D,SAAO,SAAS,SAAQ;AAAA;AAGnB,oBAAoB,SAAoC;AAC7D,SAAO,SAAS,SAAQ;AAAA;AAGnB,iBAAiB,KAAa;AACnC,SAAO,MAAM,MAAM;AAAA;AAGd,gBAAgB,KAAa;AAClC,SAAO,MAAM,MAAM;AAAA;AAGd,eAAe,KAAa,OAAe,GAAG;AACnD,QAAM,IAAI,MAAM;AAChB,SAAO,KAAK,MAAM,MAAM,KAAK;AAAA;AAGxB,sBAAsB,KAAmB;AAC9C,SAAO,OAAO,IAAI,SAAS,IAAI;AAAA;AAG1B,mCAAmC,CAAE,OAAO,SAAuB,WAAmB;AAC3F,QAAM,SAAQ,YAAY,KAAK,IAAI,QAAQ;AAC3C,SAAO,IAAI,WAAW,KAAK,MAAM,QAAQ,SAAQ,KAAK,MAAM,SAAS;AAAA;AAGhE,wBAAwB,KAAqB;AAClD,SAAO,IAAI,OAAO,CAAC,KAAK,OAAO,IAAI,IAAI,KAAK,IAAI,MAAM,GAAG,IACtD,IAAI,IAAI,MAAM,IAAI,QAAQ,IAAI;AAAA;AAG5B,eAAe,KAAa,OAAe,MAAwB;AACxE,SAAO,MAAM,KAAK,KAAK,GAAG,IAAI,CAAC,GAAG,MAAM,QAAS,IAAI;AAAA;AAGhD,uBAAuB,KAAU;AACtC,SAAO,CAAC,CAAC,OAAQ,QAAQ,YAAc,QAAQ,aAAc,CAAC,OAAO,MAAM,QAAQ,QAAQ;AAAA;AAGtF,4BAA4B,KAAU;AAC3C,SAAO,cAAc,QAAQ,OAAO,KAAK,OAAO;AAAA;;;AExD3C,kBAA8B;AAAA,EAKnC,YAAY,GAAW,GAAW;AAChC,SAAK,KAAK;AACV,SAAK,KAAK;AAAA;AAAA,MAGR,IAAY;AAAE,WAAO,KAAK;AAAA;AAAA,MAE1B,IAAY;AAAE,WAAO,KAAK;AAAA;AAAA,EAEvB,IAAI,IAAmB;AAC5B,WAAO,IAAI,MAAM,KAAK,IAAI,GAAG,GAAG,KAAK,IAAI,GAAG;AAAA;AAAA,EAGvC,IAAI,IAAmB;AAC5B,WAAO,IAAI,MAAM,KAAK,IAAI,GAAG,GAAG,KAAK,IAAI,GAAG;AAAA;AAAA,EAGvC,IAAI,IAAmB;AAC5B,WAAO,IAAI,MAAM,KAAK,IAAI,GAAG,GAAG,KAAK,IAAI,GAAG;AAAA;AAAA,EAGvC,IAAI,IAAmB;AAC5B,WAAO,IAAI,MAAM,KAAK,IAAI,GAAG,GAAG,KAAK,IAAI,GAAG;AAAA;AAAA,EAGvC,MAAa;AAClB,WAAO,IAAI,MAAM,KAAK,IAAI,KAAK,IAAI,KAAK,IAAI,KAAK;AAAA;AAAA,EAG5C,YAAoB;AACzB,WAAO,KAAK,KAAM,KAAK,KAAK,IAAM,KAAK,KAAK;AAAA;AAAA,EAGvC,QAAe;AACpB,WAAO,IAAI,MAAM,KAAK,MAAM,KAAK,IAAI,KAAK,MAAM,KAAK;AAAA;AAAA;;;ACtClD,gBAAwD;AAAA,SAC/C,OAAO,MAAoB;AACvC,WAAO,CAAC,CAAC,QAAQ,CAAC,KAAK,GAAG,KAAK,GAAG,KAAK,OAAO,KAAK,QAAQ,MAAM;AAAA;AAAA,SAGrD,iBAAiB,KAAU,QAAgB,0BAAmC,OAAO;AACjG,QAAI,CAAC,IAAI,OAAO,MAAM;AACpB,YAAM,IAAI,MAAM,GAAG,yBAAyB,KAAK,UAAU;AAAA;AAG7D,QAAI,CAAC,2BAA4B,KAAI,QAAQ,KAAK,IAAI,SAAS,IAAI;AACjE,YAAM,IAAI,MAAM,GAAG,mBAAmB,IAAI,sBAAsB,IAAI;AAAA;AAAA;AAAA,EAYxE,YAAY,MAA4B,0BAAmC,MAAM;AAC/E,UAAM,MAAO,QAAQ;AAErB,UAAM,SAAS,CAAC,IAAI,MAAM,IAAI,KAAK,IAAI,OAAO,IAAI,QAAQ,MAAM;AAChE,UAAM,SAAS,CAAC,IAAI,GAAG,IAAI,GAAG,IAAI,OAAO,IAAI,QAAQ,MAAM;AAE3D,QAAI,CAAC,UAAU,CAAC,QAAQ;AACtB,YAAM,IAAI,MAAM,2EAA2E,KAAK,UAAU;AAAA;AAG5G,UAAM,CAAC,GAAG,GAAG,OAAO,UAAU,SAC1B,CAAC,IAAI,GAAG,IAAI,GAAG,IAAI,OAAO,IAAI,UAC9B,CAAC,IAAI,MAAM,IAAI,KAAK,IAAI,QAAQ,IAAI,MAAM,IAAI,SAAS,IAAI;AAE/D,QAAI,iBAAiB;AAAA,MACnB;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA,OACZ,mBAAmB;AAEtB,SAAK,KAAK;AACV,SAAK,KAAK;AACV,SAAK,SAAS;AACd,SAAK,UAAU;AAAA;AAAA,MAGN,IAAY;AAAE,WAAO,KAAK;AAAA;AAAA,MAE1B,IAAY;AAAE,WAAO,KAAK;AAAA;AAAA,MAE1B,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,SAAiB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE/B,OAAe;AAAE,WAAO,KAAK;AAAA;AAAA,MAE7B,MAAc;AAAE,WAAO,KAAK;AAAA;AAAA,MAE5B,QAAgB;AAAE,WAAO,KAAK,IAAI,KAAK;AAAA;AAAA,MAEvC,SAAiB;AAAE,WAAO,KAAK,IAAI,KAAK;AAAA;AAAA,MAExC,OAAe;AAAE,WAAO,KAAK,QAAQ,KAAK;AAAA;AAAA,MAE1C,UAAiB;AAAE,WAAO,IAAI,MAAM,KAAK,MAAM,KAAK;AAAA;AAAA,MAEpD,WAAkB;AAAE,WAAO,IAAI,MAAM,KAAK,OAAO,KAAK;AAAA;AAAA,MAEtD,aAAoB;AAAE,WAAO,IAAI,MAAM,KAAK,MAAM,KAAK;AAAA;AAAA,MAEvD,cAAqB;AAAE,WAAO,IAAI,MAAM,KAAK,OAAO,KAAK;AAAA;AAAA,EAE7D,QAAsB;AAC3B,UAAM,CAAC,GAAG,GAAG,OAAO,UAAU,CAAC,KAAK,GAAG,KAAK,GAAG,KAAK,OAAO,KAAK,QAC7D,IAAI,CAAC,QAAQ,KAAK,MAAM;AAC3B,WAAO,IAAI,IAAI;AAAA,MACb;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA;AAAA;AAAA,EAIV,QAAsB;AAC3B,UAAM,CAAC,GAAG,GAAG,OAAO,UAAU,CAAC,KAAK,GAAG,KAAK,GAAG,KAAK,OAAO,KAAK,QAC7D,IAAI,CAAC,QAAQ,KAAK,MAAM;AAC3B,WAAO,IAAI,IAAI;AAAA,MACb;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA;AAAA;AAAA,EAIV,WAAyB;AAC9B,QAAI;AAAA,MACF;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA,QACX;AACJ,UAAM,OAAO,KAAK,IAAI,QAAQ;AAC9B,QAAI,QAAQ,QAAQ;AAClB,WAAM,OAAO;AACb,eAAS;AAAA;AAEX,QAAI,SAAS,OAAO;AAClB,WAAM,OAAO;AACb,gBAAU;AAAA;AAGZ,WAAO,IAAI,IAAI,CAAE,GAAG,GAAG,OAAO;AAAA;AAAA,EAGzB,QAAQ,GAAuC;AACpD,UAAM,SAAS,aAAa,KAAM,EAAkB,QAAQ;AAC5D,UAAM,SAAS,aAAa,KAAM,EAAkB,SAAS;AAC7D,WAAO,IAAI,IAAI;AAAA,MACb,GAAG,KAAK,IAAI;AAAA,MACZ,GAAG,KAAK,IAAI;AAAA,MACZ,OAAO,KAAK,QAAQ;AAAA,MACpB,QAAQ,KAAK,SAAS;AAAA;AAAA;AAAA,EAInB,IAAI,MAAc,MAA4B;AACnD,UAAM,CAAC,GAAG,GAAG,OAAO,UAAU;AAAA,MAC5B,KAAK,IAAK,OAAO;AAAA,MACjB,KAAK,IAAK,OAAO;AAAA,MACjB,KAAK,QAAQ;AAAA,MACb,KAAK,SAAS;AAAA;AAEhB,WAAO,IAAI,IAAI;AAAA,MACb;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA;AAAA;AAAA,EAIV,mBAAmB,UAAkB,WAAiC;AAC3E,UAAM,CAAE,GAAG,GAAG,OAAO,UAAW;AAChC,UAAM,WAAW,KAAK,IAAI,GAAG;AAC7B,UAAM,WAAW,KAAK,IAAI,GAAG;AAE7B,UAAM,WAAW,QAAQ;AACzB,UAAM,YAAY,SAAS;AAC3B,UAAM,eAAe,KAAK,IAAI,UAAU,WAAW;AACnD,UAAM,gBAAgB,KAAK,IAAI,WAAW,YAAY;AAEtD,WAAQ,IAAI,IAAI;AAAA,MACd,GAAG;AAAA,MAAU,GAAG;AAAA,MAAU,OAAO;AAAA,MAAc,QAAQ;AAAA,OACrD;AAAA;AAAA,EAGC,MAAM,IAAY,IAA0B;AACjD,UAAM,CAAE,OAAO,UAAW;AAC1B,UAAM,IAAI,KAAK,IAAI;AACnB,UAAM,IAAI,KAAK,IAAI;AAEnB,WAAO,IAAI,IAAI;AAAA,MACb;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA;AAAA;AAAA,EAIV,aAAa,aAAqB,YAAoB;AAC3D,UAAM,IAAI,KAAK,QAAQ;AACvB,UAAM,IAAI,KAAK,SAAS;AAExB,UAAM,KAAK;AACX,UAAM,KAAK;AACX,QAAI,MAAM;AACV,QAAI,MAAM;AAEV,QAAI,IAAI,KAAK;AACb,QAAI,IAAI,KAAK;AACb,QAAI,KAAK,KAAK;AACd,QAAI,KAAK,KAAK;AAEd,QAAI,KAAK,YAAY;AACnB,YAAM,CAAC,KAAK,aAAa;AACzB,WAAK;AAAA;AAEP,QAAI,KAAK,aAAa;AACpB,YAAM,CAAC,KAAK,cAAc;AAC1B,WAAK;AAAA;AAEP,QAAI,IAAI,GAAG;AACT,YAAM,IAAI;AACV,UAAI;AAAA;AAEN,QAAI,IAAI,GAAG;AACT,YAAM,IAAI;AACV,UAAI;AAAA;AAGN,WAAO;AAAA,MACL;AAAA,MAAI;AAAA,MAAK;AAAA,MAAI;AAAA,MAAK;AAAA,MAAG;AAAA,MAAI;AAAA,MAAG;AAAA,MAAI;AAAA,MAAG;AAAA;AAAA;AAAA,EAIhC,UAAU,QAAa;AAC5B,WAAO,IAAI,IAAI;AAAA,MACb,MAAM,KAAK,OAAQ,OAAO,OAAO,KAAK;AAAA,MACtC,KAAK,KAAK,MAAO,OAAO,MAAM,KAAK;AAAA,MACnC,OAAO,KAAK,QAAS,OAAO,QAAQ,KAAK;AAAA,MACzC,QAAQ,KAAK,SAAU,OAAO,SAAS,KAAK;AAAA,OAC3C,WAAW;AAAA;AAAA;;;ACjMX,gCAA0B,IAAyC;AAAA,EACxE,YAAY,MAAc,KAAa,OAAe,QAAgB,0BAAmC,OAAO;AAC9G,UAAM;AAAA,MACJ;AAAA,MAAM;AAAA,MAAK;AAAA,MAAO;AAAA,OACjB;AAAA;AAAA;;;ACTA,4BAAsB;AAAA,EAW3B,YACE,OACA,YACA,WACA,aACA,WACA;AACA,SAAK,aAAa,IAAI,WAAW,UAAU,OAAO,UAAU;AAC5D,SAAK,SAAS;AACd,SAAK,cAAc;AACnB,SAAK,aAAa;AAClB,SAAK,OAAO,IAAI,IAAI,aAAa,QAAQ,KAAK;AAAA;AAAA,MAGrC,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,aAAqB;AAAE,WAAO,KAAK;AAAA;AAAA,MAEnC,YAAoB;AAAE,WAAO,KAAK;AAAA;AAAA,MAElC,MAAW;AAAE,WAAO,KAAK;AAAA;AAAA,MAEzB,YAAwB;AAAE,WAAO,KAAK;AAAA;AAAA,MAEtC,aAAqB;AAAE,WAAO,KAAK,UAAU;AAAA;AAAA,MAE7C,cAAsB;AAAE,WAAO,KAAK,UAAU;AAAA;AAAA,MAE9C,cAAmB;AAAE,WAAO,IAAI,IAAI,KAAK,MAAM,QAAQ,KAAK,UAAU;AAAA;AAAA,EAE1E,QAAQ,OAAe,QAAiC;AAC7D,WAAO,IAAI,gBACT,KAAK,OACL,KAAK,YACL,KAAK,WACL,KAAK,aACL,CAAE,OAAO;AAAA;AAAA;;;ACzCR,kCAA4B,gBAAyC;AAAA,EAC1E,YACE,OACA,aACA,WACA;AACA,UAAM,OAAO,OAAO,IAAI,aAAa;AAAA;AAAA,EAGhC,QAAQ,OAAe,QAA+B;AAC3D,UAAM,CAAE,OAAO,aAAa,aAAc,MAAM,QAAQ,OAAO;AAC/D,WAAO,IAAI,cAAc,OAAO,aAAa;AAAA;AAAA;;;ACnB1C,aAAa,MAAW,MAAW,QAAiB,MAAM;AAC/D,QAAM,QAAQ,KAAK,IAAI,GAAK,KAAK,IAAI,KAAK,OAAO,KAAK,SAAS,KAAK,IAAI,KAAK,MAAM,KAAK;AACxF,QAAM,SAAS,KAAK,IAAI,GAAK,KAAK,IAAI,KAAK,QAAQ,KAAK,UAAU,KAAK,IAAI,KAAK,KAAK,KAAK;AAC1F,QAAM,eAAe,QAAQ;AAE7B,SAAO,QACH,eAAgB,MAAK,OAAO,KAAK,OAAO,gBACxC,eAAe,KAAK,IAAI,KAAK,MAAM,KAAK;AAAA;;;ACPvC,iBAAiB,KAA4B;AAClD,QAAM,KAAK,IAAI,IAAI,CAAC,OAAO,GAAG;AAC9B,QAAM,KAAK,IAAI,IAAI,CAAC,OAAO,GAAG;AAC9B,QAAM,OAAO,GAAG,OAAO,CAAC,KAAK,MAAO,IAAI,MAAM,IAAI,KAAM;AACxD,QAAM,OAAO,GAAG,OAAO,CAAC,KAAK,MAAO,IAAI,MAAM,IAAI,KAAM;AACxD,QAAM,OAAO,GAAG,OAAO,CAAC,KAAK,MAAO,MAAM,IAAI,IAAI,KAAM;AACxD,QAAM,OAAO,GAAG,OAAO,CAAC,KAAK,MAAO,MAAM,IAAI,IAAI,KAAM;AAExD,SAAO,IAAI,YAAY,MAAM,MAAM,MAAM;AAAA;;;ACPpC,2BACL,OACA,QACA,cACA,QAAiB,MACP;AACV,MAAI,uBAAuB,OACxB,IAAI,CAAC,OAAO,aAAc,EAAE,OAAO,YACnC,KAAK,CAAC,IAAI,OAAO,GAAG,QAAQ,GAAG,OAC/B,IAAI,CAAC,MAAM,EAAE;AAEhB,QAAM,OAAiB;AAEvB,SAAO,qBAAqB,SAAS,GAAG;AACtC,UAAM,OAAO,qBAAqB;AAClC,SAAK,KAAK;AAEV,UAAM,UAAU;AAEhB,UAAM,UAAoB;AAC1B,aAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACvC,YAAM,MAAM,QAAQ;AAEpB,YAAM,UAAU,MAAM;AACtB,YAAM,SAAS,MAAM;AAErB,cAAQ,KAAK,IAAI,SAAS,QAAQ;AAAA;AAGpC,2BAAuB,qBAAqB,OAC1C,CAAC,GAAG,MAAM,QAAQ,MAAM;AAAA;AAI5B,SAAO;AAAA;;;ACnCF,mBAAmB,GAAgB,SAAgC;AACxE,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,CAAC,GAAG,GAAG,KAAK;AAClB,UAAM,QAAQ,AAAG,sBAAK,CAAC,GAAG,EAAE,MAAM,MAAM,GAAG,IAAI,IAAI,GAAG;AACtD,UAAM,QAAQ,AAAG,sBAAK,CAAC,GAAG,EAAE,MAAM,MAAM,GAAG,IAAI,IAAI,GAAG;AACtD,UAAM,QAAQ,AAAG,sBAAK,CAAC,GAAG,EAAE,MAAM,MAAM,GAAG,IAAI,IAAI,GAAG;AACtD,UAAM,UAAU,AAAG,wBAAO,CAAC,OAAO,OAAO,QAAQ;AAEjD,WAAO,AAAG,qBAAI,GAAG;AAAA;AAAA;;;ACAd,qBACL,WACA,gBAAyB,OACZ;AACb,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,CAAC,QAAQ,SAAS,UAAU,MAAM,MAAM;AAC9C,QAAI,WAAW,OAAO;AACpB,aAAO;AAAA;AAGT,UAAM,UAAU,KAAK,IAAI,SAAS;AAClC,UAAM,gBAAgB,KAAK,MAAM,UAAW,iBAAgB,MAAM;AAClE,UAAM,cAAc,SAAS,QAAQ,IAAI;AAEzC,UAAM,sBAAsB,CAAC,uBAA0C;AACrE,YAAM,qBAAqB,UAAU,MAAM;AAC3C,yBAAmB,eAAe;AAClC,aAAO,AAAG,sBAAK,oBAAoB,GAAG;AAAA;AAGxC,UAAM,sBAAsB,oBAAoB;AAChD,UAAM,yBAAyB,UAAW,oBAAoB,MAAM;AAEpE,UAAM,uBAAuB,iBAAiB,yBAC1C,oBAAoB,0BACpB;AAEJ,UAAM,iBAAiB;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,MAEC,OAAO,CAAC,MAAM,CAAC,CAAC,GAChB,IAAI,CAAC,MAAiB,AAAG,sBAAK,GAAG;AACpC,WAAO,AAAG,wBAAO,gBAAgB;AAAA;AAAA;;;AC5C9B,sBAAsB,YAAmB;AAC9C,QAAM,QAAQ,WAAW;AACzB,WAAS,IAAI,MAAM,SAAS,GAAG,IAAI,GAAG,KAAK;AACzC,UAAM,IAAI,KAAK,MAAM,KAAK,WAAY,KAAI;AAC1C,UAAM,IAAI,MAAM;AAChB,UAAM,KAAK,MAAM;AACjB,UAAM,KAAK;AAAA;AAEb,SAAO;AAAA;;;ACDF,iBAAiB,GAAW;AACjC,SAAO,IAAK,KAAI,KAAK,IAAI,CAAC;AAAA;AAGrB,wBAAwB,GAAW;AACxC,SAAO,KAAK,IAAI,IAAK,KAAI;AAAA;;;ACHpB,yBAAmB,IAA2B;AAAA,EACnD,YAAY,GAAW,GAAW,OAAe,QAAgB,0BAAmC,OAAO;AACzG,UAAM;AAAA,MACJ;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA,OACZ;AAAA;AAAA;;;ACHP,IAAM,OAAO;AACb,IAAM,OAAO;AACb,IAAM,WAAW;AAOV,0BAA8C;AAAA,EAOnD,YACE,+BACA,SACA,QAAe,IAAI,MAAM,GAAG,IAC5B;AACA,UAAM,CAAE,OAAO,UAAW;AAC1B,SAAK,WAAW,IAAI,WAAW,OAAO;AACtC,SAAK,SAAS;AACd,SAAK,aAAa,8BAA8B,IAC9C,CAAC,OAAO,GAAG,IAAI,IAAI,MAAM,OAAO,SAAS,IAAI;AAAA;AAAA,MAItC,QAAe;AAAE,WAAO,IAAI,MAAM,KAAK,OAAO,GAAG,KAAK,OAAO;AAAA;AAAA,MAE7D,aAAqB;AAAE,WAAO,KAAK,SAAS;AAAA;AAAA,MAE5C,cAAsB;AAAE,WAAO,KAAK,SAAS;AAAA;AAAA,MAE7C,YAAqB;AAAE,WAAO,KAAK;AAAA;AAAA,MAEnC,oBAA6B;AACtC,WAAO,KAAK,WAAW,IACrB,CAAC,OAAO,GAAG,IAAI,KAAK,QAAQ,IAAI,IAAI,MAAM,KAAK,YAAY,KAAK;AAAA;AAAA,EAI7D,QAAiC,OAAe,QAAmB;AACxE,WAAO,IAAK,KAAK,YACf,KAAK,mBACL,CAAE,OAAO;AAAA;AAAA,EAIN,QAAiC,GAAW,GAAc;AAC/D,WAAO,IAAK,KAAK,YACf,KAAK,mBACL,KAAK,UACL,IAAI,MAAM,GAAG;AAAA;AAAA,EAIV,aAAsC,IAAc;AACzD,WAAO,KAAK,QAAQ,GAAG,GAAG,GAAG;AAAA;AAAA,EAcxB,MACL,WACA,UAAkE,IAC7D;AACL,QAAI,WAAW;AACb,YAAM,MAAM,qBAAqB,gBAC7B,UAAU,IAAI,UACd,IAAI,IAAI;AAEZ,aAAO,KAAK,QAAQ,IAAI,GAAG,IAAI,GAAG,MAAM,MAAM;AAAA;AAGhD,UAAM,CAAE,kBAAkB,iBAAkB,CAAE,kBAAkB,OAAO,eAAe,QAAQ;AAE9F,QAAI,kBAAkB;AACpB,aAAO,KAAK;AAAA;AAGd,WAAO,KAAK,aAAa;AAAA;AAAA,EAGnB,YAAiB;AACvB,UAAM,UAAU,KAAK;AAErB,UAAM,CAAC,eAAe,gBAAgB,eAAe;AACrD,UAAM,cAAc,CAAC,OAAc,YAAY,IAAI,IAAI;AACvD,UAAM,iBAAkB,aAAY,iBAAiB,YAAY,mBAAmB;AAEpF,UAAM,OAAO,KAAK,MAAM,iBAAiB;AAEzC,UAAM,WAAW,eAAe;AAEhC,UAAM,IAAI,KAAK,MAAM,KAAK,IAAI,GAAG,SAAS,IAAK,OAAO;AACtD,UAAM,IAAI,KAAK,MAAM,KAAK,IAAI,GAAG,SAAS,IAAK,OAAO;AAEtD,WAAO,IAAI,KAAK,GAAG,GAAG,KAAK,IAAI,MAAM,KAAK,aAAa,IAAI,KAAK,IAAI,MAAM,KAAK,cAAc;AAAA;AAAA,EAGvF,aAAa,SAAsB;AACzC,UAAM,MAAM,QAAQ,KAAK;AACzB,WAAO,IAAI,IAAI,IAAI,QAAQ,SAAS,IAAI,SAAS;AAAA;AAAA,EAGzC,2BAAoC;AAC5C,UAAM,IAAI,MAAM;AAAA;AAAA;;;AC3Hb,mCAA6B,cAAc;AAAA,EACtC,2BAAoC;AAC5C,UAAM,MAAM,KAAK;AACjB,WAAO;AAAA,MACL,IAAI;AAAA,MACJ,IAAI;AAAA,MACJ,eAAe,CAAC,IAAI,IAAI,IAAI;AAAA;AAAA;AAAA;;;ACN3B,oCAA8B,cAAc;AAAA,EAC1C,gBAAyB;AAC9B,WAAO,KAAK,UAAU,MAAM,GAAG;AAAA;AAAA,EAG1B,iBAA0B;AAC/B,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAG3B,kBAA2B;AAChC,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAG3B,UAAmB;AACxB,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAG3B,aAAsB;AAC3B,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAG3B,cAAuB;AAC5B,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAG3B,WAAoB;AACzB,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAGxB,2BAAoC;AAC5C,WAAO;AAAA,MACL,KAAK;AAAA,MACL,KAAK;AAAA,MACL,KAAK;AAAA,MACL,IAAI;AAAA;AAAA;;;AC/BH,sBAAsC;AAAA,EAK3C,YAAY,OAAe,UAAkB;AAC3C,SAAK,SAAS;AACd,SAAK,YAAY;AAAA;AAAA,MAGR,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,WAAmB;AAAE,WAAO,KAAK;AAAA;AAAA,EAErC,SAAS,eAAwB,MAAc;AACpD,WAAO,GAAG,KAAK,QAAQ,eAAe,KAAK,MAAM,KAAK,eAAe;AAAA;AAAA;;;ACjBlE,+BAAyB,IAAgB;AAAA,SAChC,wBAAwB,KAAU,QAAgB;AAC9D,QAAI,iBAAiB,KAAK;AAE1B,QAAI,CAAC,cAAc,IAAI,QAAQ;AAC7B,YAAM,IAAI,MAAM,GAAG,qCAAqC,IAAI;AAAA;AAAA;AAAA,EAMhE,YAAY,KAAiC,OAAe;AAC1D,UAAM;AACN,SAAK,SAAS;AAAA;AAAA,MAGL,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA;;;ACrBpC,mCAA6B;AAAA,EAKlC,YAAY,OAAe,aAA6B;AACtD,QAAI,CAAE,QAAO,UAAU,WAAW;AAChC,YAAM,IAAI,MAAM;AAAA;AAGlB,QAAI,CAAC,MAAM,QAAQ,gBAAgB,YAAY,KAAK,CAAC,SAAS,CAAE,iBAAgB,gBAAgB;AAC9F,YAAM,IAAI,MAAM;AAAA;AAGlB,SAAK,SAAS;AACd,SAAK,eAAe;AAAA;AAAA,MAGX,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,cAA8B;AAAE,WAAO,KAAK;AAAA;AAAA,EAEhD,SAAc;AACnB,WAAO;AAAA,MACL,OAAO,KAAK;AAAA,MACZ,aAAa,KAAK,YAAY,IAAI,CAAC,MAAM,MAAM,KAAK;AAAA;AAAA;AAAA,SAI1C,SAAS,MAAmC;AACxD,UAAM,cAAc,KAAK,YAAY,IAAI,CAAC,MAAW,IAAI,aAAa;AACtE,WAAO,IAAI,uBAAuB,KAAK,OAAO;AAAA;AAAA;;;AC1B3C,iCAA2B,WAAW;AAAA,SAC7B,0BAA0B,KAAU,QAAgB;AAChE,eAAW,wBAAwB,KAAK;AAExC,QACE,CAAC,mBAAmB,IAAI,UACrB,CAAC,mBAAmB,IAAI,aAC3B;AACA,YAAM,IAAI,MAAM,GAAG,uCAAuC,IAAI,eAAe,IAAI;AAAA;AAAA;AAAA,EAQrF,YAAY,KAAiC,OAAe,OAAe,YAAoB;AAC7F,UAAM,KAAK;AACX,SAAK,SAAS;AACd,SAAK,cAAc;AAAA;AAAA,MAGV,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,aAAqB;AAAE,WAAO,KAAK;AAAA;AAAA;;;ACvBzC,6BAA6B,KAAwC;AAC1E,SAAO,IAAI,qBAAqB;AAAA;AAG3B,iCAA0C,WAAoB,WAAsD;AACzH,QAAM,YAAY,CAAE;AACpB,SAAO,IAAK,cAAc;AAAA;;;ACVrB,4BAAyC;AAC9C,QAAM,QAAQ,OAAO;AACrB,MAAI,CAAC;AAAO,UAAM,IAAI,MAAM;AAE5B,QAAM,WAAW,MAAM;AACrB,UAAM,IAAI,MAAM;AAAA;AAGlB,SAAO;AAAA,IACL,QAAQ;AAAA,IACR;AAAA,IACA,OAAO;AAAA,IACP;AAAA,IACA,OAAO;AAAA,IACP,qBAAqB,MAAM,SAAS,cAAc;AAAA,IAClD,oBAAoB,MAAM,SAAS,cAAc;AAAA,IACjD;AAAA,IACA;AAAA;AAAA;;;ACjBG,0BAA0B,IAAsB;AACrD,MAAI,iBAAiB;AAErB,MAAI,CAAC,IAAI;AACP,QAAI;AAEF,WAAK,UAAQ;AAAA,aACN,KAAP;AACA,uBAAiB,IAAI;AAAA;AAAA;AAIzB,QAAM,WAAW,KACb,CAAC,aAAqB,IAAI,QAAgB,CAAC,SAAS,WAAW;AAC/D,OAAG,SAAS,UAAU,CAAC,KAAU,WAAoB,MAAM,OAAO,OAAO,QAAQ;AAAA,OAEjF,MAAM;AACN,UAAM,IAAI,MAAM,qEAAqE;AAAA;AAGzF,SAAO;AAAA,IACL;AAAA;AAAA;;;ACnBG,2BAAwC;AAE7C,QAAM,SAAS,OAAO,aAAa,OAAO;AAC1C,QAAM,QAAQ,OAAO,SAAS,OAAO;AAErC,QAAM,sBAAsB,MAAM;AAChC,QAAI;AAAQ,aAAO,IAAI;AACvB,UAAM,IAAI,MAAM;AAAA;AAGlB,QAAM,qBAAqB,MAAM;AAC/B,QAAI;AAAO,aAAO,IAAI;AACtB,UAAM,IAAI,MAAM;AAAA;AAGlB,QAAM,QAAQ,OAAO;AAGrB,QAAM,aAAa;AAEnB,SAAO;AAAA,IACL,QAAQ,UAAU,MAAM;AAAA;AAAA,IACxB,0BAA0B,OAAO,4BAA4B,MAAM;AAAA;AAAA,IACnE,OAAO,SAAS,MAAM;AAAA;AAAA,IACtB,WAAW,OAAO,aAAa,MAAM;AAAA;AAAA,IACrC,OAAO,OAAO,oBAAoB,MAAM;AAAA;AAAA,IACxC;AAAA,IACA;AAAA,IACA;AAAA,OACG;AAAA;AAAA;;;ACjCA,qBAA8B;AACnC,SAAO,OAAO,WAAW,YACpB,OAAO,aAAa,eACpB,OAAO,qBAAqB,eAC5B,OAAO,sBAAsB,eAC7B,OAAO,qBAAqB,eAC5B,OAAO,cAAc,eACrB,OAAO,6BAA6B;AAAA;;;ACPpC,oBAA6B;AAClC,SAAO,OAAO,WAAW,YACpB,OAAO,cAAY,cACnB,OAAO,WAAW,eAClB,OAAO,YAAY,eAAe,CAAC,CAAC,QAAQ;AAAA;;;ACGnD,IAAI;AAEJ,kBAA+B;AAC7B,MAAI,CAAC,aAAa;AAChB,UAAM,IAAI,MAAM;AAAA;AAElB,SAAO;AAAA;AAGT,gBAAgB,MAAkB;AAChC,gBAAc;AAAA;AAGhB,sBAAsB;AAGpB,MAAI;AAAa,WAAO,OAAO;AAC/B,MAAI;AAAY,WAAO,OAAO;AAC9B,SAAO;AAAA;AAGT,qBAAqB,MAA2B;AAC9C,MAAI,CAAC,aAAa;AAChB;AAAA;AAGF,MAAI,CAAC,aAAa;AAChB,UAAM,IAAI,MAAM;AAAA;AAGlB,QAAM,CAAE,SAAS,YAAY,QAAQ,QAAQ,YAAY,SAAU;AACnE,cAAY,SAAS;AACrB,cAAY,QAAQ;AACpB,cAAY,sBAAsB,KAAI,uBAAwB,OAAM,IAAI;AACxE,cAAY,qBAAqB,KAAI,sBAAuB,OAAM,IAAI;AAEtE,cAAY,YAAY,KAAI,aAAa,YAAY;AACrD,cAAY,QAAQ,KAAI,SAAS,YAAY;AAC7C,cAAY,QAAQ,KAAI,SAAS,YAAY;AAC7C,cAAY,WAAW,KAAI,YAAY,YAAY;AAAA;AAG9C,IAAM,MAAM;AAAA,EACjB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAGF;;;AC3DO,sBAAsB,KAAmB;AAC9C,MAAI,CAAC,IAAI,cAAc,OAAO,QAAQ,UAAU;AAC9C,WAAO,SAAS,eAAe;AAAA;AAEjC,SAAO;AAAA;;;ACHF,6BAA6B,WAA4F;AAC9H,QAAM,CAAE,QAAQ,uDAA6B,IAAI;AAEjD,MAAI,qBAAqB,2BAA0B;AACjD,WAAO;AAAA;AAGT,QAAM,SAAS,aAAa;AAE5B,MAAI,CAAE,mBAAkB,SAAS;AAC/B,UAAM,IAAI,MAAM;AAAA;AAGlB,QAAM,MAAM,OAAO,WAAW;AAC9B,MAAI,CAAC,KAAK;AACR,UAAM,IAAI,MAAM;AAAA;AAGlB,SAAO;AAAA;;;ACfF,IAAK;AAAL,UAAK,iBAAL;AAEL,gCAAW;AAEX,iCAAY;AAEZ,mCAAc;AAEd,oCAAe;AAAA,GARL;AAoBL,iCAA4D;AAAA,EAajE,YAAY,UAAiC,IAAI;AAC/C,UAAM;AAAA,MACJ;AAAA,MAAgB;AAAA,MAAiB;AAAA,MAAW;AAAA,MAAU;AAAA,MAAW;AAAA,QAC/D;AACJ,SAAK,iBAAiB,kBAAkB,eAAe;AACvD,SAAK,kBAAkB,mBAAmB;AAC1C,SAAK,YAAY,aAAa;AAC9B,SAAK,WAAW,YAAY;AAC5B,SAAK,YAAY,aAAa;AAC9B,SAAK,UAAU,WAAW;AAAA;AAAA;AAIvB,0BAAoB;AAAA,EAOzB,YACE,MACA,QACA,UAAiC,IACjC;AAEA,SAAK,OAAO,OAAO,SAAS,WACxB,CAAC,QACA,gBAAgB,gBAAgB,KAAK,OAAO;AACjD,SAAK,SAAS;AACd,SAAK,UAAU,IAAI,qBAAqB;AAAA;AAAA,EAG1C,aAAa,KAAuC;AAClD,UAAM,CAAE,WAAY,KAAK;AACzB,WAAO,KAAK,KAAK,IAAI,CAAC,MAAM,IAAI,YAAY,GAAG,OAAO,OAAO,CAAC,IAAI,OAAQ,KAAK,KAAK,KAAK,IAAK,KAAM,IAAI;AAAA;AAAA,EAG1G,gBAAwB;AACtB,UAAM,CAAE,UAAU,WAAY,KAAK;AACnC,WAAO,KAAK,KAAK,SAAS,WAAY,IAAI;AAAA;AAAA,EAG5C,aAAa,KAA+B,YAAkC;AAC5E,UAAM,CAAE,kBAAmB,KAAK;AAChC,UAAM,cAAc,mBAAmB,eAAe,gBAAgB,mBAAmB,eAAe;AACxG,UAAM,aAAa,mBAAmB,eAAe,eAAe,mBAAmB,eAAe;AAEtG,UAAM,iBAAiB,KAAK,aAAa;AACzC,UAAM,kBAAkB,KAAK;AAC7B,UAAM,IAAK,cAAc,KAAK,OAAO,IAAI,iBAAiB,KAAK,OAAO;AACtE,UAAM,IAAI,aAAa,KAAK,OAAO,IAAI,kBAAkB,KAAK,OAAO;AAGrE,QAAI,YAAY;AACd,YAAM,CAAE,OAAO,UAAW;AAC1B,YAAM,OAAO,KAAK,IAAI,KAAK,IAAI,GAAG,QAAQ,iBAAiB;AAC3D,YAAM,OAAO,KAAK,IAAI,KAAK,IAAI,GAAG,SAAS,kBAAkB;AAC7D,aAAO,CAAE,GAAG,MAAM,GAAG;AAAA;AAEvB,WAAO,CAAE,GAAG;AAAA;AAAA,EAGd,KAAK,WAAkE;AACrE,UAAM,SAAS,aAAa;AAC5B,UAAM,MAAM,oBAAoB;AAEhC,UAAM;AAAA,MACJ;AAAA,MAAiB;AAAA,MAAW;AAAA,MAAU;AAAA,MAAW;AAAA,QAC/C,KAAK;AAET,QAAI,OAAO,GAAG,cAAc;AAC5B,UAAM,eAAe,KAAK,aAAa;AACvC,UAAM,aAAa,KAAK;AAExB,QAAI,YAAY;AAChB,UAAM,YAAY,KAAK,aAAa,KAAK;AACzC,QAAI,SAAS,UAAU,GAAG,UAAU,GAAG,cAAc;AAErD,QAAI,YAAY;AAChB,SAAK,KAAK,QAAQ,CAAC,UAAU,MAAM;AACjC,YAAM,IAAI,UAAU,UAAU;AAC9B,YAAM,IAAI,UAAU,UAAU,IAAM,KAAI,KAAK;AAC7C,UAAI,SAAS,UAAU,GAAG;AAAA;AAAA;AAAA;;;AC9GzB,2BAAqB;AAAA,EAS1B,YAAY,UAA2B,IAAI;AACzC,UAAM;AAAA,MACJ;AAAA,MAAU;AAAA,MAAW;AAAA,MAAO;AAAA,QAC1B;AACJ,SAAK,WAAW,YAAY;AAC5B,SAAK,YAAY,aAAa;AAC9B,SAAK,QAAQ;AAEb,UAAM,0BAA0B;AAAA,MAC9B,gBAAgB,eAAe;AAAA,MAC/B,iBAAiB,KAAK;AAAA;AAExB,SAAK,mBAAmB,IAAI,qBAAqB,IAAK,4BAA4B;AAAA;AAAA;AAI/E,oBAAc;AAAA,EAKnB,YACE,KACA,UAA2B,IAC3B;AACA,SAAK,MAAM,IAAI,IAAI;AACnB,SAAK,UAAU,IAAI,eAAe;AAAA;AAAA,EAGpC,KAAK,WAAkE;AACrE,UAAM,MAAM,oBAAoB;AAEhC,UAAM,CAAE,UAAU,aAAc,KAAK;AAErC,UAAM;AAAA,MACJ;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA,QACX,KAAK;AACT,QAAI,cAAc;AAClB,QAAI,YAAY;AAChB,QAAI,WAAW,GAAG,GAAG,OAAO;AAE5B,UAAM,CAAE,SAAU,KAAK;AACvB,QAAI,OAAO;AACT,UAAI,cAAc,CAAC,QAAQ,CAAE,GAAG,IAAK,YAAY,GAAI,IAAK,KAAK,QAAQ,kBAAkB,KAAK;AAAA;AAAA;AAAA;;;ACxD7F,wBACL,WACA,YACA;AACA,QAAM,kBAAkB,MAAM,QAAQ,cAAc,aAAa,CAAC;AAElE,kBAAgB,QAAQ,CAAC,QAAQ;AAE/B,UAAM,QAAQ,eAAe,gBACzB,IAAI,QACH,oBAAoB,OAAO,IAAI,UAAU,QAAQ;AAGtD,UAAM,MAAM,eAAe,gBACvB,IAAI,MACH,oBAAoB,OAAO,IAAI,UAAU,MAAM,IAAI,IAAI;AAE5D,UAAM,QAAQ,QAAQ,GAAG,MAAM,WAAW;AAC1C,QAAI,QAAQ,KAAK,CAAE,QAAS,KAAK;AAAA;AAAA;;;ACxB9B,uBAAuB,OAAsD;AAClF,QAAM,CAAE,OAAO,SAAU,IAAI;AAE7B,SAAQ,iBAAiB,SAAS,MAAM,YAClC,iBAAiB,SAAS,MAAM,cAAc;AAAA;;;ACH/C,0BAA0B,OAAgE;AAE/F,SAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,QAAI,iBAAiB,IAAI,SAAS,UAAU,cAAc;AAAQ,aAAO,QAAQ;AAEjF,qBAAiB,GAAU;AACzB,UAAI,CAAC,EAAE;AAAe;AAEtB,QAAE,cAAc,oBAAoB,QAAQ;AAC5C,QAAE,cAAc,oBAAoB,SAAS;AAC7C,aAAO;AAAA;AAGT,oBAAgB,GAAU;AACxB,UAAI,CAAC,EAAE;AAAe;AACtB,QAAE,cAAc,oBAAoB,QAAQ;AAC5C,QAAE,cAAc,oBAAoB,SAAS;AAC7C,cAAQ;AAAA;AAGV,UAAM,iBAAiB,QAAQ;AAC/B,UAAM,iBAAiB,SAAS;AAAA;AAAA;;;ACtB7B,uBAAuB,KAAsC;AAClE,SAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,QAAI,CAAE,gBAAe;AAAO,aAAO,IAAI,MAAM;AAC7C,UAAM,SAAS,IAAI;AACnB,WAAO,SAAS,MAAM;AACpB,UAAI,OAAO,OAAO,WAAW;AAAU,eAAO,IAAI,MAAM;AACxD,YAAM,MAAM,IAAI,SAAS;AACzB,UAAI,SAAS,MAAM,QAAQ;AAC3B,UAAI,UAAU;AACd,UAAI,MAAM,OAAO;AAAA;AAEnB,WAAO,UAAU;AACjB,WAAO,cAAc;AAAA;AAAA;;;ACXlB,4BAA4B,OAA0F;AAC3H,QAAM,CAAE,OAAO,SAAU,IAAI;AAE7B,MAAI,iBAAiB,OAAO;AAC1B,WAAO,IAAI,WAAW,MAAM,cAAc,MAAM;AAAA;AAElD,MAAI,iBAAiB,OAAO;AAC1B,WAAO,IAAI,WAAW,MAAM,YAAY,MAAM;AAAA;AAEhD,SAAO,IAAI,WAAW,MAAM,OAAO,MAAM;AAAA;;;ACNpC,sBAAsB,CAAE,OAAO,SAA0C;AAC9E,QAAM,CAAE,uBAAwB,IAAI;AACpC,QAAM,SAAS;AACf,SAAO,QAAQ;AACf,SAAO,SAAS;AAChB,SAAO;AAAA;AAGF,+BAA+B,OAAwD,MAAuC;AACnI,QAAM,CAAE,yBAAc,IAAI;AAE1B,MAAI,CAAE,kBAAiB,eAAc,CAAC,cAAc,QAAQ;AAC1D,UAAM,IAAI,MAAM;AAAA;AAGlB,QAAM,CAAE,OAAO,UAAW,QAAQ,mBAAmB;AACrD,QAAM,SAAS,aAAa,CAAE,OAAO;AAErC,MAAI,iBAAiB,YAAW;AAC9B,wBAAoB,QAAQ,aAAa,OAAO,GAAG;AAAA,SAC9C;AACL,wBAAoB,QAAQ,UAAU,OAAO,GAAG,GAAG,OAAO;AAAA;AAE5D,SAAO;AAAA;;;ACxBT,mCACE,WACA,QAC4B;AAC5B,QAAM,eAAe,UAAU,IAAI,SAAS;AAE5C,QAAM,CAAC,QAAQ,OAAO,eAAe,UAAU,MAAM,MAAM,WAAW,aAAa,IAAI;AACvF,QAAM,cAAc,AAAG,sBAAK,MAAM,UAAU,KAAK,QAAQ,OAAO,aAAa;AAC7E,QAAM,AAAG,yBAAQ,SAAS,aAAa;AAEvC,cAAY;AAEZ,SAAO;AAAA;;;ACfF,wBAAwB,OAAY;AACzC,QAAM,CAAE,OAAO,QAAQ,SAAU,IAAI;AAErC,SAAO,iBAAiB,SACnB,iBAAiB,UACjB,iBAAiB;AAAA;;;ACFjB,uBAAuB,OAA6C,WAAmB,cAAuB,OAAO;AAC1H,QAAM,CAAE,OAAO,UAAW,IAAI;AAE9B,MAAI,CAAE,kBAAiB,SAAS,iBAAiB,SAAS;AACxD,UAAM,IAAI,MAAM;AAAA;AAGlB,MAAI,aAAa;AAAG,WAAO,aAAa,CAAE,OAAO,GAAG,QAAQ;AAC5D,QAAM,OAAO,mBAAmB;AAChC,QAAM,SAAQ,YAAY,KAAK,IAAI,KAAK,QAAQ,KAAK;AACrD,QAAM,QAAQ,SAAQ,KAAK;AAC3B,QAAM,SAAS,SAAQ,KAAK;AAE5B,QAAM,eAAe,aAAa,CAAE,OAAO,WAAW,QAAQ;AAC9D,QAAM,cAAc,iBAAiB,SAAS,QAAQ,sBAAsB;AAE5E,QAAM,SAAS,KAAK,IAAI,QAAQ,UAAU;AAC1C,QAAM,KAAK,eAAe,QAAQ,SAAS,SAAS;AACpD,QAAM,KAAK,eAAe,SAAS,QAAQ,SAAS;AACpD,MAAI,YAAY,QAAQ,KAAK,YAAY,SAAS;AAAG,wBAAoB,cAAc,UAAU,aAAa,IAAI,IAAI,OAAO;AAE7H,SAAO;AAAA;;;AChBF,qBAAe;AAAA,EAapB,YAAY,QAAkC,oBAA6B,OAAO;AAZ1E,yBAAkD;AAElD,qBAAiC;AAIjC,8BAA8B;AAE9B,4BAA+B;AAKrC,QAAI,CAAC,MAAM,QAAQ,SAAS;AAC1B,YAAM,IAAI,MAAM,4HAA4H;AAAA;AAG9I,SAAK,qBAAqB;AAC1B,SAAK,aAAa,OAAO;AAEzB,WAAO,QAAQ,CAAC,OAAO,QAAQ;AAC7B,UAAI,WAAW,QAAQ;AACrB,aAAK,cAAc,OAAO;AAC1B,aAAK,iBAAiB,OAAO,MAAM;AACnC;AAAA;AAGF,UAAI,WAAW,QAAQ;AACrB,cAAM,YAAa,MAAc,MAAM;AACvC,YAAI,cAAc,GAAG;AACnB,gBAAM,IAAI,MAAM,yCAAyC;AAAA;AAG3D,aAAK,cAAc,OAAO;AAC1B,aAAK,iBAAiB,OAAQ,MAAc,MAAM,MAAM;AACxD;AAAA;AAGF,YAAM,SAAU,iBAAyB,IAAI,SAAS,SAAS,QAAQ,sBAAsB;AAC7F,WAAK,UAAU,OAAO;AACtB,WAAK,iBAAiB,OAAO,CAAC,OAAO,QAAQ,OAAO,OAAO;AAAA;AAAA;AAAA,MAIpD,eAAiD;AAC1D,WAAO,KAAK;AAAA;AAAA,MAGH,WAAgC;AACzC,WAAO,KAAK;AAAA;AAAA,MAGH,eAAwB;AACjC,WAAO,KAAK,YAAY,KAAK,KAAK;AAAA;AAAA,MAGzB,YAAoB;AAC7B,WAAO,KAAK;AAAA;AAAA,MAGH,kBAA8B;AACvC,WAAO,KAAK;AAAA;AAAA,MAGH,YAAgC;AACzC,WAAO,KAAK;AAAA;AAAA,MAGH,0BAAwC;AACjD,WAAO,MAAM,KAAK,WAAW,GAAG,GAAG,IACjC,CAAC,GAAG,aAAa,KAAK,2BAA2B;AAAA;AAAA,EAI9C,SAAS,UAAiE;AAC/E,WAAO,KAAK,SAAS,aAAa,KAAK,aAAa;AAAA;AAAA,EAG/C,mBAAmB,UAA4B;AACpD,WAAO,KAAK,iBAAiB;AAAA;AAAA,EAGxB,eAAe,UAA0B;AAC9C,WAAO,KAAK,iBAAiB,UAAU;AAAA;AAAA,EAGlC,cAAc,UAA0B;AAC7C,WAAO,KAAK,iBAAiB,UAAU;AAAA;AAAA,EAGlC,2BAA2B,UAA8B;AAC9D,QAAI,OAAO,KAAK,cAAc,UAAU;AACtC,YAAM,IAAI,MAAM;AAAA;AAGlB,UAAM,QAAQ,KAAK,cAAc;AACjC,UAAM,SAAS,KAAK,eAAe;AACnC,WAAO,0BAA0B,CAAE,OAAO,SAAU,KAAK;AAAA;AAAA,EAYpD,cAAc,WAAmB,iBAA0B,MAAmB;AACnF,SAAK,aAAa;AAElB,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,eAAe,MAAM,KAAK,WAAW,GAAG,GAAG,IAAI,CAAC,aAAa;AACjE,cAAM,QAAQ,KAAK,SAAS;AAE5B,YAAI,iBAAoB,yBAAQ;AAC9B,cAAI,YAAY,WAAW,SAAS,QAAQ,AAAG,4BAAW;AAC1D,sBAAY,YAAY,WAAW;AAEnC,cAAI,UAAU,MAAM,OAAO,aAAa,UAAU,MAAM,OAAO,WAAW;AACxE,wBAAY,AAAG,uBAAM,eAAe,WAAW,CAAC,WAAW,YAAY,OAAO;AAAA;AAGhF,iBAAO,UAAU,KAAK,WAAW,WAAW;AAAA;AAG9C,YAAI,iBAAiB,IAAI,SAAS,QAAQ;AACxC,iBAAO,AAAG,yBAAQ,WAAW,cAAc,OAAO,WAAW;AAAA;AAG/D,cAAM,IAAI,MAAM,+BAA+B,qGAAqG;AAAA;AAGtJ,YAAM,cAAc,AAAG,uBAAM,aAAa,IAAI,CAAC,MAAM,AAAG,sBAAK,GAAG,aAAa,KAAK,KAAK,WAAW,WAAW,WAAW;AAExH,aAAO;AAAA;AAAA;AAAA;;;ACrIb,0BAAiC,QAAsC;AACrE,MAAI,kBAAkB;AAAU,WAAO;AACvC,QAAM,gBAAgB,MAAM,QAAQ,UAAU,SAAS,CAAC;AACxD,MAAI,CAAC,cAAc;AAAQ,UAAM,IAAI,MAAM;AAC3C,QAAM,aAAa,CAAC,QAAiB,MAAM,QAAQ,UAAU,mBAAmB,SAAS;AACzF,QAAM,aAAa,cAAc,IAAI;AACrC,aAAW,QAAQ,CAAC,OAAO,MAAM;AAC/B,QAAI,CAAC,eAAe,UAAU,CAAC,WAAW,UAAU,CAAC,WAAW,QAAQ;AACtE,UAAI,OAAO,cAAc,OAAO;AAAU,cAAM,IAAI,MAAM,eAAe,WAAW,sEAAsE,cAAc;AACxK,YAAM,IAAI,MAAM,eAAe,WAAW;AAAA;AAE5C,QAAI,WAAW,QAAQ;AAErB,YAAM,YAAY,MAAM,MAAM;AAC9B,UAAI,cAAc;AAAG,cAAM,IAAI,MAAM,eAAe,WAAW,iCAAiC;AAAA;AAAA;AAIpG,QAAM,QAAQ,IAAI,WAAW,IAAI,CAAC,UAAU,eAAe,UAAU,iBAAiB;AACtF,SAAO,IAAI,SAAS,YAAY,MAAM,QAAQ;AAAA;;;ACjBhD,4BAAmC,OAAkB,YAAuE;AAC1H,QAAM,CAAE,UAAW,IAAI;AACvB,MAAI,SAAS;AACb,MAAI,CAAE,kBAAiB,SAAS;AAC9B,UAAM,WAAW,MAAM,WAAW;AAClC,QAAI,SAAS,YAAY;AAAG,YAAM,IAAI,MAAM;AAC5C,UAAM,iBAAiB,SAAS,SAAS;AACzC,aAAS,0BAA0B,SAAS,iBAAiB,MAAM,oBAAoB;AAAA;AAEzF,QAAM,MAAM,oBAAoB;AAChC,QAAM,QAAQ,WACX,IAAI,CAAC,QAAS,eAAe,gBAAgB,IAAI,QAAQ,OAAO,OAAO,OAAO,QAAQ,IAAI,UAAU,KACpG,IAAI,CAAC,QAAQ,IAAI,mBAAmB,OAAO,OAAO,OAAO;AAC5D,SAAO,MAAM,IAAI,CAAC,CAAE,GAAG,GAAG,OAAO,YAAa;AAC5C,UAAM,UAAU,aAAa,CAAE,OAAO;AACtC,QAAI,QAAQ,KAAK,SAAS;AAAG,0BAAoB,SAAS,aAAa,IAAI,aAAa,GAAG,GAAG,OAAO,SAAS,GAAG;AACjH,WAAO;AAAA;AAAA;;;AChBX,kCAAyC,aAAwC,YAAiE;AAChJ,MAAI,CAAC,WAAW,gBAAgB,CAAC,WAAW,cAAc;AACxD,UAAM,IAAI,MAAM;AAAA;AAGlB,MAAI,WAAW,gBAAgB,YAAY,MAAM,KAAK,GAAG;AACvD,UAAM,IAAI,MAAM;AAAA;AAGlB,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,CAAC,WAAW,UAAU,eAAe,YAAY,MAAM,MAAM,WAAW,eAAe,IAAI;AAEjG,UAAM,QAAQ,WACX,IAAI,CAAC,QAAS,eAAe,gBAC1B,IAAI,QAAQ,UAAU,WAAW,MACjC,KACH,IAAI,CAAC,QAAQ,IAAI,mBAAmB,UAAU;AAEjD,UAAM,cAAc,MAAM,IAAI,CAAC;AAAA,MAC7B;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA,UACT,AAAG,yBAAQ,YAAY,KAAK,WAAW,UAAU,cAAc,CAAC,GAAG,GAAG,IAAI,CAAC,QAAQ,OAAO;AAEhG,WAAO;AAAA;AAAA;;;ACpCX,4BACE,KAEA,MACmB;AACnB,QAAM,CAAE,SAAU,IAAI;AACtB,QAAM,MAAM,MAAM,MAAM,KAAK;AAC7B,MAAI,CAAE,KAAI,SAAS,MAAM;AACvB,UAAM,IAAI,MAAM,qBAAqB,IAAI,WAAW,IAAI,yBAAyB,IAAI;AAAA;AAEvF,SAAO;AAAA;;;ACTT,0BAAiC,KAAwC;AACvE,QAAM,MAAM,MAAM,aAAa;AAC/B,QAAM,OAAO,MAAO,IAAK;AAEzB,MAAI,CAAC,KAAK,KAAK,WAAW,WAAW;AACnC,UAAM,IAAI,MAAM,wEAAwE,KAAK,kBAAkB,IAAI;AAAA;AAErH,SAAO,cAAc;AAAA;;;ACRvB,yBAAmC,KAAyB;AAC1D,SAAQ,OAAM,aAAa,MAAM;AAAA;;;ACDnC,+BAAsC,KAAoC;AACxE,SAAO,IAAI,aAAa,MAAO,OAAM,aAAa,MAAM;AAAA;;;ACHnD,sBAAsB,KAAyB,kBAA0B;AAC9E,QAAM,0BAA0B,GAAG;AAEnC,MAAI,CAAC,KAAK;AACR,WAAO;AAAA,MACL,cAAc;AAAA,MACd,aAAa;AAAA;AAAA;AAIjB,MAAI,QAAQ,KAAK;AACf,WAAO;AAAA,MACL,cAAc;AAAA,MACd,aAAa,IAAI;AAAA;AAAA;AAIrB,QAAM,WAAW,IAAI,WAAW,aAAa,YAAY,IAAI,WAAW,cAAc,aAAa;AACnG,QAAM,IAAI,QAAQ,UAAU;AAE5B,QAAM,QAAQ,IAAI,MAAM,KAAK,OAAO,CAAC,MAAM;AAE3C,QAAM,eAAe,IAAI,SAAS,WAC9B,MAAM,MAAM,SAAS,KACrB;AAEJ,MAAI,eAAe,WAAY,KAAI,SAAS,WAAW,MAAM,MAAM,GAAG,MAAM,SAAS,KAAK,OAAO,KAAK;AACtG,iBAAe,IAAI,WAAW,OAAO,IAAI,iBAAiB;AAE1D,SAAO;AAAA,IACL;AAAA,IACA,aAAa,iBAAiB,MAAM,IAAI,iBAAiB,GAAG,gBAAgB;AAAA;AAAA;;;AC1BhF,6BACE,KACA,kBAC4B;AAC5B,QAAM,CAAE,aAAa,gBAAiB,aAAa,KAAK;AACxD,QAAM,WAAW,MAAM,UAAuC;AAE9D,SAAO,AAAG,oBAAG,YAAY,UAAU;AAAA;;;ACT9B,yBAAyB,OAAoB,WAAwB,qBAA8B,OAAO;AAC/G,QAAM,CAAE,OAAO,UAAW,qBACtB,mBAAmB,aACnB;AACJ,QAAM,QAAQ;AACd,QAAM,SAAS;AACf,SAAO,CAAE,OAAO;AAAA;;;ACFX,0BAAyC;AAAA,EAC9C,YAAY,MAAc;AAIhB,mBAAkC;AAElC,0BAAiC;AALzC,SAAK,QAAQ;AAAA;AAAA,MASJ,SAAiC;AAAE,WAAO,KAAK;AAAA;AAAA,MAE/C,gBAAgC;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9C,WAAoB;AAAE,WAAO,CAAC,CAAC,KAAK;AAAA;AAAA,EAExC,iBAAiB,WAA8B;AACpD,UAAM,CAAE,KAAK,WAAY,KAAK,qBAAqB;AACnD,WAAO,IAAI;AAAA;AAAA,EAGN,sBAAsB,WAAmB,SAAmB;AACjE,UAAM,CAAE,KAAK,WAAY,KAAK,qBAAqB;AACnD,QAAI,SAAS;AACb,QAAI,WAAW;AAAA;AAAA,EAGV,eAAe;AACpB,WAAO,KAAK,eAAe,IAAI,CAAC,CAAE,eAAiB;AAAA,MACjD,MAAM;AAAA,MACN,QAAQ,KAAK,iBAAiB;AAAA;AAAA;AAAA,EAI3B,qBAAqB;AAC1B,WAAO,KAAK,eAAe,OAAO,CAAC,UAAU,MAAM,kBAAqB;AAAA;AAAA,EAGnE,kBAAkB;AACvB,WAAO,KAAK,eAAe,OAAO,CAAC,UAAU,CAAE,OAAM,kBAAqB;AAAA;AAAA,EAGrE,WAAW;AAChB,SAAK,kBAAkB,QAAQ,CAAC,CAAE,MAAM,qBAAa;AACnD,WAAK,sBAAsB,MAAM,QAAO;AAAA;AAAA;AAAA,EAIrC,SAAS;AACd,SAAK,qBAAqB,QAAQ,CAAC,CAAE,MAAM,QAAQ,cAAe;AAChE,YAAM,UAAS,AAAG,wBAAO,SAAS;AAClC,eAAS;AACT,WAAK,sBAAsB,MAAM;AAAA;AAAA;AAAA,EAI9B,QAAQ,mBAA4B,MAAM;AAC/C,SAAK,eAAe,QAAQ,CAAC,UAAU;AACrC,UAAI,oBAAoB,MAAM,OAAO,YAAY;AAC/C,cAAM,IAAI,MAAM,mDAAmD,MAAM;AAAA;AAE3E,YAAM,OAAO;AAAA;AAEf,SAAK,UAAU;AAAA;AAAA,EAGV,kBAAgC;AACrC,WAAO,IAAI,aACT,KAAK,eACF,IAAI,CAAC,CAAE,qBAAa,MAAM,KAAK,QAAO,aACtC,OAAO,CAAC,MAAM,QAAQ,KAAK,OAAO;AAAA;AAAA,QAI5B,KAAK,cAAgE;AAChF,QAAI,wBAAwB,cAAc;AACxC,WAAK,eAAe;AACpB;AAAA;AAEF,UAAM,KAAK,YAAY;AAAA;AAAA,QAGZ,YAAY,KAAyB;AAChD,QAAI,OAAO,OAAO,QAAQ,UAAU;AAClC,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAE1B,UAAM,YAAY,MAAM,cAAc,KAAK,KAAK;AAChD,SAAK,kBAAkB;AAAA;AAAA,QAGZ,aAAa,UAA8B;AACtD,QAAI,YAAY,OAAO,aAAa,UAAU;AAC5C,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAE1B,UAAM,CAAE,YAAa,IAAI;AACzB,UAAM,CAAE,aAAa,gBAAiB,aAAa,UAAU,KAAK;AAClE,UAAM,uBAAuB,CAAC,cAAwB,QAAQ,IAAI,UAAU,IAAI,CAAC,OAAO,SAAS,IAAI,KAAK,CAAC,QAAQ,IAAI;AACvH,UAAM,cAAc,AAAG,oBAAG,qBAAqB;AAC/C,UAAM,WAAW,KAAK,MAAO,OAAM,SAAS,cAAc;AAC1D,UAAM,YAAY,MAAM,YAAY,UAAU;AAC9C,SAAK,kBAAkB;AAAA;AAAA,EAGlB,kBAAkB,WAA8B;AACrD,UAAM,CAAE,eAAe,UAAW,KAAK,2BAA2B;AAClE,SAAK,iBAAiB;AACtB,SAAK,UAAU;AAAA;AAAA,EAGV,eAAe,SAAuB;AAC3C,UAAM,CAAE,eAAe,UAAW,KAAK,cAAc;AACrD,SAAK,iBAAiB;AACtB,SAAK,UAAU;AAAA;AAAA,EAGT,qBAAqB,WAAmB;AAC9C,QAAI,CAAC,KAAK,QAAQ;AAChB,YAAM,IAAI,MAAM;AAAA;AAGlB,UAAM,SAAS,UAAU,MAAM,KAAK,OAAO,CAAC,KAAoD,aAAY;AAE1G,UAAI,CAAC,IAAI,QAAQ,eAAe,WAAU;AACxC,cAAM,IAAI,MAAM,wDAAwD,sBAAqB;AAAA;AAE/F,aAAO,CAAE,KAAK,IAAI,SAAS,mBAAS,SAAS,IAAI,QAAQ;AAAA,OACxD,CAAE,SAAS,KAAK;AAEnB,UAAM,CAAE,KAAK,WAAY;AACzB,QAAI,CAAC,OAAO,CAAC,WAAW,CAAE,KAAI,oBAAuB,0BAAS;AAC5D,YAAM,IAAI,MAAM,8DAA8D;AAAA;AAGhF,WAAO,CAAE,KAAK;AAAA;AAAA;;;ACzIX,gCACL,GACA,QACA,QACa;AACb,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI,MAAM,AAAG,iCAAgB,GAAG,OAAO,kBAAkB,OAAO,kBAAkB,QAAQ;AAC1F,UAAM,AAAG,qBAAI,KAAK,OAAO;AACzB,WAAO;AAAA;AAAA;;;ACNJ,qBACL,GACA,kBACA,eAAwB,OACX;AACb,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,OAAO,AAAG,sBACd,eACI,AAAG,qBACH,AAAG,wBAAO,GAAI,iBAAiB,MAAqB,SAAS,CAAC,GAAG,IAAI,SACrE,iBAAiB,MAAM,QAEvB,uBAAuB,GAAG,iBAAiB,OAA8B,CAAC,GAAG;AAEnF,UAAM,OAAO,uBAAuB,MAAM,iBAAiB,OAAO,CAAC,GAAG;AAEtE,UAAM,MAAM,AAAG,sBAAK,AAAG,qBAAI,MAAM;AACjC,UAAM,OAAO,uBAAuB,KAAK,iBAAiB,OAAO,CAAC,GAAG;AAErE,WAAO,AAAG,sBAAK,AAAG,qBAAI,MAAM,AAAG,qBAAI,MAAM;AAAA;AAAA;AAItC,qBACL,GACA,kBACA,eAAwB,OACxB,cAAuB,MACV;AACb,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,OAAO,AAAG,sBACd,eACI,AAAG,qBACH,AAAG,wBAAO,GAAI,iBAAiB,MAAqB,SAAS,cAAc,CAAC,GAAG,KAAK,CAAC,GAAG,IAAI,SAC5F,iBAAiB,MAAM,QAEvB,uBAAuB,GAAG,iBAAiB,OAA8B,cAAc,CAAC,GAAG,KAAK,CAAC,GAAG;AAE1G,UAAM,OAAO,uBAAuB,MAAM,iBAAiB,OAAO,CAAC,GAAG;AAEtE,UAAM,MAAM,AAAG,sBAAK,AAAG,qBAAI,MAAM;AACjC,UAAM,OAAO,uBAAuB,KAAK,iBAAiB,OAAO,CAAC,GAAG;AAErE,UAAM,MAAM,AAAG,sBAAK,AAAG,qBAAI,MAAM,AAAG,qBAAI,MAAM;AAC9C,UAAM,OAAO,uBAAuB,KAAK,iBAAiB,OAAO,CAAC,GAAG;AAErE,WAAO,AAAG,sBAAK,AAAG,qBAAI,MAAM,AAAG,qBAAI,MAAM,AAAG,qBAAI,MAAM;AAAA;AAAA;;;AChDnD,mBACL,GACA,QACA,UAA4B,QAC5B,WAAoB,OACP;AACb,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,MAAM,AAAG,qBACb,AAAG,wBAAO,GAAG,OAAO,SAAS,CAAC,GAAG,IAAI,UACrC,OAAO;AAGT,WAAO,WAAW,AAAG,sBAAK,OAAO;AAAA;AAAA;;;ACd9B,oCAAoC,WAAgB,eAA+B;AACxF,SAAO,KAAK,WAAW,QAAQ,CAAC,SAAS;AACvC,QAAI,CAAC,cAAc,KAAK,CAAC,OAAO,GAAG,iBAAiB,OAAO;AACzD,gBAAU,MAAM;AAAA;AAAA;AAAA;;;ACDf,kCACL,gBACA,eACA;AACA,SAAO,CACL,YACA,aACA,YACA,iBACe;AACf,UAAM,UAAU,AAAG,0BACjB,eAAe,aAAa,cAAc,aAAa,aACvD,CAAC,YAAY,YAAY,YAAY;AAEvC,UAAM,OAAO,AAAG,0BAAS,eAAe;AAExC,kBAAc,KACZ,CAAE,WAAW,GAAG,yBAChB,CAAE,WAAW,GAAG;AAGlB,WAAO,CAAE,SAAS;AAAA;AAAA;;;ACrBf,gCACL,gBACA,eACA;AACA,SAAO,CACL,YACA,aACA,iBACa;AACb,UAAM,aAAa,AAAG,0BAAS,eAAe,aAAa,cAAc,CAAC,YAAY;AACtF,UAAM,UAAU,AAAG,0BAAS,eAAe;AAE3C,kBAAc,KACZ,CAAE,WAAW,GAAG,yBAChB,CAAE,WAAW,GAAG;AAGlB,WAAO;AAAA,MACL,SAAS;AAAA,MACT,MAAM;AAAA;AAAA;AAAA;;;ACHL,gCAA0B;AAAA,EAE/B,YAES,kBAEA,kBAEA,MAEP;AANO;AAEA;AAEA;AAAA;AAAA;;;ACxBJ,2CACL,gBACA,eACA;AACA,SAAO,CAAC,YAAoB,aAAqB,iBAA8C;AAC7F,UAAM,mBAAmB,AAAG,0BAAS,eAAe,IAAI,IAAI,aAAa,CAAC,GAAG,GAAG,YAAY;AAC5F,UAAM,mBAAmB,AAAG,0BAAS,eAAe,aAAa,cAAc,CAAC,GAAG,GAAG,YAAY;AAClG,UAAM,OAAO,AAAG,0BAAS,eAAe;AAExC,kBAAc,KACZ,CAAE,WAAW,GAAG,kCAChB,CAAE,WAAW,GAAG,kCAChB,CAAE,WAAW,GAAG;AAGlB,WAAO,IAAI,oBACT,kBACA,kBACA;AAAA;AAAA;AAKC,wCAEL,oBACA;AACA,SAAO,CAAC,WAAwC;AAC9C,UAAM,mBAAmB,mBAAgC,GAAG,2BAA2B;AACvF,UAAM,mBAAmB,mBAAgC,GAAG,2BAA2B;AACvF,UAAM,OAAO,mBAAgC,GAAG,eAAe;AAE/D,WAAO,IAAI,oBACT,kBACA,kBACA;AAAA;AAAA;;;ACpCC,mCAAmC,WAAgB,eAA+B;AACvF,SAAO,CAAC,cAAsB,WAAmB,eAAwB;AACvE,UAAM,UAAS,UAAU;AAEzB,QAAI,CAAC,SAAS,SAAQ,YAAY;AAChC,YAAM,IAAI,MAAM,sBAAsB,+BAA+B,4BAA4B;AAAA;AAGnG,kBAAc,KACZ,CAAE,cAAc,WAAW,cAAc;AAG3C,WAAO;AAAA;AAAA;;;ACfJ,+BAA+B,SAAuB;AAC3D,MAAI,mBAAmB;AAEvB,0BAAwB,YAAkC;AACxD,UAAM,MAAM,iBAAiB,MAAM,GAAG;AACtC,uBAAmB,iBAAiB,MAAM;AAC1C,WAAO;AAAA;AAGT,iCAA6C;AAC3C,WAAO;AAAA;AAGT,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;;;ACZG,2BAA2B,gBAAwC,eAA+B;AACvG,QAAM,oBAAoB,yBAAyB,gBAAgB;AACnE,QAAM,6BAA6B,kCAAkC,gBAAgB;AAErF,oCAAkC,YAAoB,aAAqB,cAAsB,eAAwB,OAA0B;AACjJ,UAAM,QAAQ,eACV,kBAAkB,YAAY,aAAa,GAAG,GAAG,wBACjD,2BAA2B,YAAY,aAAa,GAAG;AAC3D,UAAM,QAAQ,2BAA2B,aAAa,aAAa,GAAG;AACtE,UAAM,SAAQ,2BAA2B,aAAa,aAAa,GAAG;AAEtE,WAAO,CAAE,OAAO,OAAO;AAAA;AAGzB,oCAAkC,YAAoB,aAAqB,cAAsB,eAAwB,OAA0B;AACjJ,UAAM,CAAE,OAAO,OAAO,iBAAU,yBAAyB,YAAY,aAAa,cAAc;AAChG,UAAM,QAAQ,2BAA2B,aAAa,aAAa,GAAG;AAEtE,WAAO;AAAA,MACL;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA;AAAA;AAIzB,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;;;ACxBG,uBAAuB,SAA8F;AAC1H,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM;AAAA,IACJ;AAAA,MACE,kBAAkB,gBAAgB;AAEtC,QAAM,SAAS,yBAAyB,GAAG,IAAI,UAAU;AACzD,QAAM,SAAS,yBAAyB,IAAI,IAAI;AAChD,QAAM,SAAS,yBAAyB,IAAI,KAAK;AACjD,QAAM,SAAS,yBAAyB,KAAK,KAAK;AAElD,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL;AAAA,IACA,QAAQ;AAAA,MACN;AAAA,MAAQ;AAAA,MAAQ;AAAA,MAAQ;AAAA;AAAA;AAAA;;;ACvBvB,+BAA+B,oBAAuE;AAC3G,SAAO,CAAC,WAA+B;AACrC,UAAM,UAAU,mBAAgC,GAAG,kBAAkB;AACrE,UAAM,OAAO,mBAAgC,GAAG,eAAe;AAE/D,WAAO,CAAE,SAAS;AAAA;AAAA;;;ACNf,2BAA2B,WAAgB,eAA+B;AAC/E,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,QAAM,oBAAoB,sBAAsB;AAChD,QAAM,6BAA6B,+BAA+B;AAElE,oCAAkC,QAAgB,eAAwB,OAA0B;AAClG,UAAM,QAAQ,eACV,kBAAkB,GAAG,kBACrB,2BAA2B,GAAG;AAClC,UAAM,QAAQ,2BAA2B,GAAG;AAC5C,UAAM,SAAQ,2BAA2B,GAAG;AAE5C,WAAO,CAAE,OAAO,OAAO;AAAA;AAGzB,oCAAkC,QAAgB,eAAwB,OAA0B;AAClG,UAAM,QAAQ,eACV,kBAAkB,GAAG,kBACrB,2BAA2B,GAAG;AAClC,UAAM,QAAQ,2BAA2B,GAAG;AAC5C,UAAM,SAAQ,2BAA2B,GAAG;AAC5C,UAAM,QAAQ,2BAA2B,GAAG;AAE5C,WAAO;AAAA,MACL;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA;AAAA;AAIzB,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;;;AC7BG,oCACL,WACuE;AACvE,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,MACE,kBAAkB,WAAW;AAEjC,QAAM,SAAS;AAAA,IACb,QAAQ,yBAAyB,UAAU;AAAA,IAC3C,QAAQ,yBAAyB;AAAA,IACjC,QAAQ,yBAAyB;AAAA,IACjC,QAAQ,yBAAyB;AAAA;AAGnC,6BAA2B,WAAW;AAEtC,SAAO,CAAE,QAAQ;AAAA;;;ACdZ,yCAAmC,cAAuG;AAAA,EAC/I,cAAc;AACZ,UAAM;AAAA;AAAA,EAGD,aAAa,OAA8B;AAChD,UAAM,CAAE,UAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM;AAAA;AAGlB,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,cAAc,AAAG,sBAAK,MAAM,cAAc,KAAK,OAAO;AAC5D,YAAM,UAAU,CAAC,SAAS,SAAS;AACnC,YAAM,aAAa,UAAU,aAAa,SAAS,IAAI;AAEvD,UAAI,MAAM,YAAY,YAAY,OAAO,QAAQ;AACjD,YAAM,YAAY,KAAK,OAAO;AAC9B,YAAM,YAAY,KAAK,OAAO;AAC9B,YAAM,YAAY,KAAK,OAAO;AAC9B,YAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AAEtC,aAAO;AAAA;AAAA;AAAA,QAIE,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,EAGlC,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,2BAA2B;AAAA;AAAA,EAG1B,cAAc,SAAuB;AAC7C,WAAO,cAAc;AAAA;AAAA;;;AC9ClB,6BACL,GACA,QACa;AACb,SAAO,AAAG,sBAAK,MAAM,AAAG,qBACtB,AAAG,wBAAO,GAAG,OAAO,UACpB,OAAO;AAAA;;;ACPJ,wBAAuB,SAAuB,YAAoB,aAA2E;AAClJ,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM,kBAAkB,uBAAuB,gBAAgB;AAE/D,QAAM,KAAK,gBAAgB,YAAY,aAAa;AAEpD,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL;AAAA,IACA,QAAQ,CAAE;AAAA;AAAA;;;AChBP,qCACL,WACsD;AACtD,QAAM,gBAAgC;AAEtC,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,2BAAyB,QAA0B;AACjD,UAAM,UAAU,mBAAmB,GAAG,kBAAkB;AACxD,UAAM,OAAO,mBAAmB,GAAG,eAAe;AAClD,WAAO,CAAE,SAAS;AAAA;AAGpB,QAAM,SAAS;AAAA,IACb,IAAI,gBAAgB;AAAA;AAGtB,6BAA2B,WAAW;AAEtC,SAAO,CAAE,QAAQ;AAAA;;;ACtBZ,4BAA4B,WAA8B;AAC/D,QAAM,sBAAyC;AAC/C,QAAM,gBAAmC;AAEzC,SAAO,KAAK,WAAW,QAAQ,CAAC,QAAQ;AACtC,UAAM,MAAM,IAAI,WAAW,QAAQ,gBAAgB;AACnD,QAAI,OAAO,UAAU;AAAA;AAGvB,SAAO,CAAE,qBAAqB;AAAA;;;ACAzB,kCAGG,cAAyB;AAAA,EAGjC,YAAY,OAAe,sBAA+D;AACxF,UAAM;AACN,SAAK,wBAAwB;AAAA;AAAA,MAGpB,uBAAgE;AACzE,WAAO,KAAK;AAAA;AAAA,EASP,OAAO,OAA4C;AACxD,UAAM,CAAE,UAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAG1B,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,qBAAqB,iBAAiB,WACxC,KAAK,qBAAqB,aAAa,SACvC;AACJ,aAAO,oBAAoB,mBAAmB,KAAK,mBAAmB,MAAM,IAAI,KAAK,OAAO;AAAA;AAAA;AAAA,EAIzF,QAAQ,mBAA4B,MAAM;AAC/C,SAAK,qBAAqB,QAAQ;AAClC,UAAM,QAAQ;AAAA;AAAA,EAGT,qBAAqB,SAAuB;AACjD,UAAM,CAAE,QAAQ,iBAAkB,KAAK,wBAAwB;AAC/D,SAAK,UAAU;AACf,SAAK,iBAAiB;AAAA;AAAA,EAGjB,wBAAwB,SAAuB;AACpD,WAAO,eAAc,SAAS,KAAK,2BAA2B,KAAK;AAAA;AAAA,EAG3D,2BAA2B,WAA8B;AACjE,UAAM,CAAE,qBAAqB,iBAAkB,mBAAmB;AAElE,SAAK,qBAAqB,kBAAkB;AAE5C,WAAO,4BAA2B;AAAA;AAAA,EAG1B,cAAc,SAAuB;AAC7C,UAAM,MAAM,KAAK;AACjB,UAAM,OAAO,KAAK;AAClB,UAAM,uBAAwB,OAAO,MAAO;AAE5C,UAAM,0BAA0B,QAAQ,MAAM,GAAG,QAAQ,SAAS;AAClE,UAAM,oBAAoB,QAAQ,MAAM,QAAQ,SAAS;AAEzD,SAAK,qBAAqB,eAAe;AACzC,WAAO,KAAK,wBAAwB;AAAA;AAAA;;;AC/EjC,IAAM,yBAAyB,CAAC,WAAW,SAAS,OAAO,SAAS,WAAW,aAAa;AAE5F,4BAAsB;AAAA,EAe3B,YAAY,eAAwC;AAClD,QAAI,cAAc,WAAW,GAAG;AAC9B,YAAM,IAAI,MAAM,8EAA8E,cAAc;AAAA;AAG9G,2BAAuB,QAAQ,CAAC,YAAY,QAAQ;AAClD,WAAK,cAAc,cAAc;AAAA;AAAA;AAAA,EAIrC,gBAAgB;AACd,WAAO,uBACJ,IAAI,CAAC,eAAgB,EAAE,YAAY,aAAa,KAAK,eACrD,KAAK,CAAC,IAAI,OAAO,GAAG,cAAc,GAAG;AAAA;AAAA;;;ACtBrC,sCAAgC,cAA0C;AAAA,EAC/E,YAAY,uBAA6C,IAAI,wBAAwB;AACnF,UAAM,qBAAqB;AAAA;AAAA,EAGtB,aAAa,OAA4C;AAC9D,WAAO,AAAG,sBAAK,MAAM,AAAG,yBAAQ,KAAK,OAAO;AAAA;AAAA,QAGjC,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,QAG/B,mBAAmB,OAAkB;AAChD,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,MAAM,MAAM,KAAK,aAAa;AACpC,UAAM,sBAAsB,MAAM,QAAQ,IAAI,AAAG,yBAAQ,KAAK,IAAI,OAAO,MAAM;AAC7E,YAAM,OAAO,EAAE;AACf,QAAE;AACF,aAAO;AAAA;AAET,QAAI;AAEJ,UAAM,qBAAqB,oBACxB,IAAI,CAAC,iBAAiB,IAAI,gBAAgB;AAE7C,WAAO,SAAS,eACZ,qBACA,mBAAmB;AAAA;AAAA,EAGf,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,0BAAkC;AAC1C,WAAO;AAAA;AAAA,EAGC,2BAAmC;AAC3C,WAAO;AAAA;AAAA;;;AC5CJ,+BAA+B,KAA0C;AAC9E,SAAO,IAAI,uBAAuB;AAAA;AAG7B,mCAA4C,WAAoB,aAA4D;AACjI,QAAM,YAAY,CAAE;AACpB,SAAO,IAAK,cAAc;AAAA;;;ACDrB,6BACL,WACA,iBACA,gBAAgB,KAChB,iBACA;AACA,QAAM,uBAAuB,MAAM,QAAQ,mBAAmB,kBAAkB,CAAC;AAEjF,uBAAqB,QAAQ,CAAC,MAAM;AAElC,UAAM,OAAO,aAAa,kBACtB,IACC,sBAAsB,KAAK,EAAE,cAAc;AAChD,QAAI,CAAC,MAAM;AACT,YAAM,IAAI,MAAM;AAAA;AAGlB,UAAM,SAAS,KAAK;AACpB,UAAM,mBAAmB,OAAO,OAAO,CAAC,cAAc,UAAU,cAAc;AAE9E,UAAM,SAAS,oBAAoB,KAC/B,EAAE,UAAU,IAAI,aACf,mBAAmB,IAAI,MAAM,GAAG;AAErC,UAAM,gBAAgB,IAAI,cACxB,iBAAiB,IAAI,CAAC,cAAc,GAAG,UAAU,eAAe,MAAM,UAAU,kBAChF;AAEF,kBAAc,KAAK;AAAA;AAAA;;;ACvBhB,6BAA6B,KAA0E;AAC5G,SAAO,oBAAoB,QAEtB,IAAI,wBAAwB,iBAE5B,IAAI,iCAAiC,iBAErC,IAAI,0BAA0B;AAAA;AAGrC,4BAA4B,MAAM;AAEhC,QAAM,UAAU,CAAC,IAAI,IAAI,IAAI,OAAQ,KAAK,MAAM,KAAK,IAAI,KAAK,MAAM,KAAK;AAGzE,QAAM,UAAU,CAAC,UAAW,QAAQ,MAAO,KAAK;AAEhD,QAAM,QAAQ,CAAE,MAA0B,QAAW,OAA2B,QAAW,KAAyB;AAEpH,MAAI,CAAC,QAAQ,CAAC,KAAK,cAAc,KAAK,WAAW,WAAW;AAAI,WAAO;AACvE,QAAM,KAAK,KAAK;AAOhB,QAAM,OAAO,CAAC,QAAQ,GAAG,IAAI,IAAI,GAAG,IAAI,IAAI,GAAG,IAAI,IAAI,GAAG,IAAI;AAK9D,QAAM,QAAQ,QAAQ,GAAG,KAAK,IAAI,GAAG,GAAG,KAAK,GAAG,IAAI,MAAM,GAAG,IAAI,IAAI,KAAK,IAAI,KAAK,IAAI,GAAG,IAAI,KAAK,GAAG,IAAI,MAAM,GAAG,IAAI;AAMvH,QAAM,SAAS,GAAG,OAAO,CAAC,MAAM,QAAS,OAAO,IAAI,KAAK,OAAO,IAAI,IAAK;AACzE,QAAM,MAAM,GAAG,OAAO,CAAC,MAAM,QAAS,OAAO,IAAI,KAAK,OAAO,IAAI,IAAK;AACtE,QAAM,MAAM,KAAK,KAAM,MAAK,SAAS,UAAW,OAAM,UAAU,MAAO;AAEvE,SAAO;AAAA;AAGF,iCAEoD,WAAoB,oBAAgF;AAC7J,QAAM,CAAE,KAAK,SAAU,UAAU;AACjC,QAAM,YAAY,mBAAmB,QAAwB,MAAM,GAAG,MAAM;AAE5E,QAAM,OAAO,UAAU;AACvB,QAAM,CAAE,aAAc,UAAU;AAChC,QAAM,cAAc,IAAI,cAAc,UAAU,UAAU,OAAO,KAAK,QAAQ,UAAU,YAAY;AACpG,QAAM,QAAQ,mBAAmB;AAEjC,QAAM,YAAY;AAAA,IAChB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAGF,SAAO,IAAK,cAAc;AAAA;;;AC3DrB,qCAA+B;AAAA,EAapC,YAAY,UAAqC,IAAI;AACnD,UAAM;AAAA,MACJ,YAAY;AAAA,MAAM,aAAa;AAAA,MAAM;AAAA,MAAW;AAAA,MAAW;AAAA,MAAW;AAAA,QACpE;AACJ,SAAK,YAAY;AACjB,SAAK,aAAa;AAClB,SAAK,YAAY,aAAa;AAC9B,SAAK,YAAY,aAAa;AAC9B,SAAK,YAAY,aAAa;AAC9B,SAAK,aAAa,cAAc;AAAA;AAAA;AAI7B,8BAAwB;AAAA,EAK7B,YACE,eACA,UAAqC,IACrC;AACA,SAAK,gBAAgB;AACrB,SAAK,UAAU,IAAI,yBAAyB;AAAA;AAAA,EAG9C,KAAK,WAAkE;AACrE,UAAM,MAAM,oBAAoB;AAEhC,UAAM;AAAA,MACJ;AAAA,MAAW;AAAA,MAAY;AAAA,MAAW;AAAA,MAAW;AAAA,MAAW;AAAA,QACtD,KAAK;AAET,QAAI,aAAa,KAAK,yBAAyB,iBAAiB;AAC9D,UAAI,cAAc;AAClB,UAAI,YAAY;AAChB,kBAAY,KAAK,KAAK,cAAc;AACpC,kBAAY,KAAK,KAAK,cAAc;AACpC,kBAAY,KAAK,KAAK,cAAc;AACpC,kBAAY,KAAK,KAAK,cAAc;AACpC,kBAAY,KAAK,KAAK,cAAc,cAAc;AAClD,kBAAY,KAAK,KAAK,cAAc,eAAe;AACnD,kBAAY,KAAK,KAAK,cAAc,YAAY;AAAA;AAGlD,QAAI,YAAY;AACd,UAAI,cAAc;AAClB,UAAI,YAAY;AAEhB,YAAM,YAAY,CAAC,OAAe;AAChC,YAAI;AACJ,YAAI,IAAI,GAAG,GAAG,GAAG,GAAG,WAAW,GAAG,IAAI,KAAK;AAC3C,YAAI;AAAA;AAEN,WAAK,cAAc,UAAU,QAAQ;AAAA;AAAA;AAAA;AAOpC,2BACL,WACA,eACA;AACA,QAAM,qBAAqB,MAAM,QAAQ,iBAAiB,gBAAgB,CAAC;AAC3E,qBAAmB,QAAQ,CAAC,MAAM;AAEhC,UAAM,YAAY,aAAa,gBAC3B,IACC,oBAAoB,KAAK,EAAE,YAAY;AAC5C,QAAI,CAAC,WAAW;AACd,YAAM,IAAI,MAAM;AAAA;AAGlB,QAAI,kBAAkB,WAAW,KAAK;AAAA;AAAA;;;;;;ACrG1C,4BAA2B,gBAAwC,eAA+B;AAChG,QAAM,oBAAoB,yBAAyB,gBAAgB;AACnE,QAAM,6BAA6B,kCAAkC,gBAAgB;AAErF,uCAAqC,YAAoB,aAAqB,cAA4C;AACxH,UAAM,kBAAkB,2BAA2B,YAAY,aAAa,GAAG;AAC/E,UAAM,kBAAkB,2BAA2B,aAAa,aAAa,GAAG;AAChF,UAAM,iBAAiB,kBAAkB,YAAY,aAAa,GAAG,GAAG;AAExE,WAAO,CAAE,iBAAiB,iBAAiB;AAAA;AAG7C,kCAAgC,UAAkB,cAAuC;AACvF,UAAM,kBAAkB,2BAA2B,UAAU,UAAU,GAAG;AAC1E,UAAM,kBAAkB,2BAA2B,UAAU,UAAU,GAAG;AAC1E,UAAM,kBAAkB,2BAA2B,UAAU,UAAU,GAAG;AAE1E,WAAO,CAAE,iBAAiB,iBAAiB;AAAA;AAG7C,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAAA;AAIG,wBAAuB,SAAuB,eAAsF;AACzI,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,MACE,mBAAkB,gBAAgB;AAEtC,QAAM,qBAAqB,kBAAkB,GAAG,IAAI,GAAG;AACvD,QAAM,+BAA+B,4BAA4B,IAAI,IAAI;AACzE,QAAM,+BAA+B,4BAA4B,IAAI,KAAK;AAE1E,QAAM,aAAa;AAAA,IACjB,SAAS;AAAA,IACT,mBAAmB;AAAA,IACnB,mBAAmB;AAAA;AAGrB,QAAM,cAAc;AACpB,QAAM,eAAe,GAAG,GAAG,QAAQ,CAAC,QAAQ;AAC1C,gBAAY,cAAc,SAAS,uBAAuB,KAAK,0BAA0B;AAAA;AAG3F,QAAM,4BAA4B,4BAA4B,KAAK,KAAK;AACxE,QAAM,2BAA2B,2BAA2B,KAAK,KAAK;AAEtE,QAAM,YAAY;AAAA,IAChB,iBAAiB;AAAA,IACjB,gBAAgB;AAAA;AAGlB,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL;AAAA,IACA,QAAQ,CAAE,YAAY,aAAa;AAAA;AAAA;;;ACtEvC,4BAA2B,WAAgB,eAA+B;AACxE,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,QAAM,oBAAoB,sBAAsB;AAChD,QAAM,6BAA6B,+BAA+B;AAElE,uCAAqC,cAA4C;AAC/E,UAAM,kBAAkB,2BAA2B,GAAG;AACtD,UAAM,kBAAkB,2BAA2B,GAAG;AACtD,UAAM,iBAAiB,kBAAkB,GAAG;AAE5C,WAAO,CAAE,iBAAiB,iBAAiB;AAAA;AAG7C,kCAAgC,cAAuC;AACrE,UAAM,kBAAkB,2BAA2B,GAAG;AACtD,UAAM,kBAAkB,2BAA2B,GAAG;AACtD,UAAM,kBAAkB,2BAA2B,GAAG;AAEtD,WAAO,CAAE,iBAAiB,iBAAiB;AAAA;AAG7C,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAAA;AAIG,qCACL,WACA,eAC+D;AAC/D,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,MACE,mBAAkB,WAAW;AAEjC,QAAM,qBAAqB,kBAAkB;AAC7C,QAAM,+BAA+B,4BAA4B;AACjE,QAAM,+BAA+B,4BAA4B;AAEjE,QAAM,aAAa;AAAA,IACjB,SAAS;AAAA,IACT,mBAAmB;AAAA,IACnB,mBAAmB;AAAA;AAGrB,QAAM,cAAc;AACpB,QAAM,eAAe,GAAG,GAAG,QAAQ,CAAC,QAAQ;AAC1C,gBAAY,cAAc,SAAS,uBAAuB,0BAA0B;AAAA;AAGtF,QAAM,4BAA4B,4BAA4B;AAC9D,QAAM,2BAA2B,2BAA2B;AAE5D,QAAM,YAAY;AAAA,IAChB,iBAAiB;AAAA,IACjB,gBAAgB;AAAA;AAGlB,6BAA2B,WAAW;AAEtC,SAAO,CAAE,QAAQ,CAAE,YAAY,aAAa,YAAa;AAAA;;;AChE3D,cAAc,GAAgB,QAAoB,QAAuC;AACvF,SAAO,AAAG,qBAAI,AAAG,wBAAO,GAAG,OAAO,SAAS,QAAQ,SAAS,OAAO;AAAA;AAGrE,wBAAwB,GAAgB,QAA8B,kBAA2B,MAAmB;AAClH,MAAI,MAAM,kBAAkB,AAAG,sBAAK,KAAK;AACzC,QAAM,uBAAuB,KAAK,OAAO,iBAAiB,CAAC,GAAG;AAC9D,QAAM,uBAAuB,AAAG,sBAAK,MAAM,OAAO,iBAAiB,CAAC,GAAG;AACvE,QAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,QAAM,AAAG,qBAAI,KAAK,KAAK,GAAG,OAAO,gBAAgB,CAAC,GAAG;AACrD,SAAO;AAAA;AAGT,mBAAmB,GAAgB,QAAsC;AACvE,MAAI,MAAM,uBAAuB,AAAG,sBAAK,IAAI,OAAO,iBAAiB,CAAC,GAAG;AACzE,QAAM,uBAAuB,AAAG,sBAAK,MAAM,OAAO,iBAAiB,CAAC,GAAG;AACvE,QAAM,uBAAuB,AAAG,sBAAK,MAAM,OAAO,iBAAiB,CAAC,GAAG;AACvE,QAAM,AAAG,qBAAI,KAAK;AAClB,SAAO;AAAA;AAGF,iCAA2B,cAAkC;AAAA,EAGlE,YAAY,eAAuB;AACjC,UAAM;AACN,SAAK,iBAAiB;AAAA;AAAA,EAGjB,aAAa,OAA8B;AAChD,UAAM,CAAE,UAAW;AACnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM;AAAA;AAElB,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,cAAc,AAAG,sBAAK,MAAM,cAAc,KAAK,OAAO;AAC5D,YAAM,UAAU,CAAC,SAAS,SAAS;AACnC,YAAM,aAAa,UAAU,aAAa,SAAS,IAAI;AACvD,UAAI,MAAM,AAAG,sBAAK,KAAK,YAAY,OAAO,WAAW,SAAS,CAAC,GAAG;AAClE,YAAM,eAAe,KAAK,OAAO,WAAW,mBAAmB;AAC/D,YAAM,eAAe,KAAK,OAAO,WAAW;AAC5C,YAAM,KAAK,gBAAgB,GAAG,GAAG,QAAQ,CAAC,QAAQ;AAChD,cAAM,UAAU,KAAK,OAAO,YAAY,cAAc;AAAA;AAExD,YAAM,eAAe,KAAK,OAAO,UAAU;AAC3C,YAAM,AAAG,sBAAK,uBAAuB,KAAK,OAAO,UAAU,gBAAgB,CAAC,GAAG;AAC/E,aAAO;AAAA;AAAA;AAAA,QAIE,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,EAGlC,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,4BAA2B,WAAW,KAAK;AAAA;AAAA,EAG1C,cAAc,SAAuB;AAC7C,WAAO,eAAc,SAAS,KAAK;AAAA;AAAA;;;ACvEhC,wBAAuB,SAA6E;AACzG,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM,kBAAkB,uBAAuB,gBAAgB;AAE/D,QAAM,MAAM,gBAAgB,KAAK,GAAG;AACpC,QAAM,SAAS,gBAAgB,KAAK,GAAG;AAEvC,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL;AAAA,IACA,QAAQ,CAAE,IAAI,CAAE,KAAK;AAAA;AAAA;;;ACjBlB,qCACL,WACsD;AACtD,QAAM,gBAAgC;AAEtC,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,2BAAyB,QAA0B;AACjD,UAAM,UAAU,mBAAmB,GAAG,kBAAkB;AACxD,UAAM,OAAO,mBAAmB,GAAG,eAAe;AAClD,WAAO,CAAE,SAAS;AAAA;AAGpB,QAAM,SAAS;AAAA,IACb,IAAI;AAAA,MACF,KAAK,gBAAgB;AAAA,MACrB,QAAQ,gBAAgB;AAAA;AAAA;AAI5B,6BAA2B,WAAW;AAEtC,SAAO,CAAE,QAAQ;AAAA;;;ACtBZ,IAAK;AAAL,UAAK,SAAL;AAEL,sBAAS;AAET,oBAAO;AAAA,GAJG;;;ACML,iCAA2B,cAAyB;AAAA,EAGzD,YAAY,uBAAqC,IAAI,aAAa,IAAI;AACpE,UAAM;AACN,SAAK,wBAAwB;AAAA;AAAA,MAGpB,uBAAqC;AAC9C,WAAO,KAAK;AAAA;AAAA,EAGP,OAAO,OAA0C;AACtD,UAAM,CAAE,UAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAG1B,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,qBAAqB,iBAAiB,WACxC,KAAK,qBAAqB,aAAa,SACvC;AAEJ,YAAM,SAAS,AAAG,yBAAQ,oBAAoB,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI,SAAS,KAAK,mBAAmB,MAAM,IAAI;AACzG,YAAM,MAAM,oBAAoB,QAAQ,OAAO,GAAG,KAAK;AACvD,YAAM,SAAS,oBAAoB,QAAQ,OAAO,GAAG;AACrD,aAAO,CAAE,KAAK;AAAA;AAAA;AAAA,EAIX,aAAa,OAA0C;AAC5D,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,CAAE,KAAK,UAAW,KAAK,OAAO;AACpC,aAAO,CAAE,KAAK,QAAQ,AAAG,yBAAQ;AAAA;AAAA;AAAA,QAIxB,QAAQ,OAAsC;AACzD,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,QAG/B,oBAAoB,OAA8E;AAC7G,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,MAAM,MAAM,KAAK,aAAa;AAEpC,UAAM,OAAO,AAAG,yBAAQ,IAAI;AAC5B,UAAM,UAAU,AAAG,yBAAQ,IAAI;AAC/B,UAAM,sBAAsB,KAAK,IAAI,CAAC,WAAW,MAAO;AAAA,MACtD;AAAA,MACA,cAAc,QAAQ;AAAA;AAGxB,UAAM,qBAAqB,MAAM,QAAQ,IACvC,oBAAoB,IAAI,OAAO,CAAE,WAAW,kBAAmB;AAC7D,YAAM,MAAO,UAAU,WAAY;AACnC,YAAM,WAAY,aAAa,WAAY;AAC3C,YAAM,SAAS,WAAW;AAC1B,YAAM,SAAS,SAAS,OAAO,OAAO,OAAO;AAC7C,YAAM,oBAAoB,SAAS,WAAY,IAAI;AAEnD,gBAAU;AACV,mBAAa;AACb,aAAO,CAAE,KAAK,QAAQ;AAAA;AAG1B,QAAI,IAAI;AACR,QAAI,OAAO;AAEX,WAAO,SAAS,eAAe,qBAAiD,mBAAmB;AAAA;AAAA,EAG3F,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGF,QAAQ,mBAA4B,MAAM;AAC/C,SAAK,qBAAqB,QAAQ;AAClC,UAAM,QAAQ;AAAA;AAAA,EAGT,qBAAqB,SAAuB;AACjD,UAAM,CAAE,QAAQ,iBAAkB,KAAK,wBAAwB;AAC/D,SAAK,UAAU;AACf,SAAK,iBAAiB;AAAA;AAAA,EAGjB,wBAAwB,SAAuB;AACpD,WAAO,eAAc;AAAA;AAAA,EAGb,2BAA2B,WAA8B;AACjE,UAAM,CAAE,qBAAqB,iBAAkB,mBAAmB;AAElE,SAAK,qBAAqB,kBAAkB;AAE5C,WAAO,4BAA2B;AAAA;AAAA,EAG1B,cAAc,SAAuB;AAC7C,UAAM,uBAAwB,MAAM,IAAI,IAAM,OAAM,IAAI;AAExD,UAAM,0BAA0B,QAAQ,MAAM,GAAG,QAAQ,SAAS;AAClE,UAAM,oBAAoB,QAAQ,MAAM,QAAQ,SAAS;AAEzD,SAAK,qBAAqB,eAAe;AACzC,WAAO,KAAK,wBAAwB;AAAA;AAAA;;;AC5GjC,0CAGG,cAAgC;AAAA,EACjC,YAAY,QAAqB,WAAmB,oBAAgD;AACzG,UAAM,kBAAkB,mBAAmB,IAAI,CAAC,CAAE,OAAO,YAAa;AACpE,YAAM,SAAQ,YAAY,KAAK,IAAI,QAAQ;AAC3C,aAAO;AAAA,QACL,OAAO,QAAQ;AAAA,QACf,QAAQ,SAAS;AAAA;AAAA;AAIrB,UAAM,YAAY,gBAAgB;AAElC,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,0BAA0B,CAAC,OAAe,UAAkB,AAAG,uBAAM,CAAC,AAAG,sBAAK,CAAC,KAAK,OAAO,YAAY,AAAG,sBAAK,CAAC,KAAK,OAAO,aAAa,GAAG,KAAK,GAAG,KAAK;AAG/J,YAAM,aAAa,CAAC,UAAkB,SAAoD;AACxF,cAAM,CAAE,OAAO,UAAW,gBAAgB;AAC1C,eAAO,KAAK,OAAO,UAAU,KAAK,IAAI,QAAQ,UAAU,IAAI;AAAA;AAG9D,YAAM,cAAc,CAAC,aAAqB,WAAW,UAAU,CAAC,GAAG,MAAM,IAAI;AAC7E,YAAM,cAAc,CAAC,aAAqB,WAAW,UAAU,CAAC,GAAG,MAAM,IAAI;AAE7E,YAAM,kBAAkB,OACrB,IAAI,AAAG,sBAAK,CAAC,WAAW,MAAM,WAAW,YACzC,IAAI,AAAG,uBAAM,MAAM,KAAK,MAAM,YAAY,CAAC,GAAG,aAAa,wBAC1D,YAAY,WACZ,YAAY,cAEb,IAAI,AAAG,uBAAM,MAAM,KAAK,MAAM,YAAY,CAAC,GAAG,aAAa,wBAC1D,gBAAgB,UAAU,OAC1B,gBAAgB,UAAU;AAG9B,aAAO;AAAA;AAAA;AAAA,EAIJ,aAAa,OAA8B;AAChD,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,MAAM,KAAK,OAAO;AACxB,aAAO,KAAK,YACV,KACA,MAAM,WACN,MAAM,gBAAgB,IAAI,CAAC,CAAC,QAAQ,WAAY,EAAE,QAAQ;AAAA;AAAA;AAAA,QAKnD,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,QAG/B,gBAAgB,OAAgE;AAC3F,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,kBAAkB,AAAG,sBACzB,MAAM,AAAG,yBAAQ,KAAK,aAAa;AAGrC,UAAM,oBAAoB,MAAM,QAAQ,IAAI,gBAAgB,IAC1D,OAAO,gBAAgB,aAAa;AAClC,YAAM,iBAAiB,MAAM,KAAK,eAAe;AACjD,YAAM,UAAU,eAAe,OAAO,CAAC,GAAG,MAAM,OAAO;AACvD,YAAM,UAAU,eAAe,OAAO,CAAC,GAAG,MAAM,CAAC,OAAO;AAExD,aAAO,IAAI,gBACT,MAAM,IAAI,KAAK,GAAG,IAAI,CAAC,GAAG,MAAM,IAAI,MAAM,QAAQ,IAAc,QAAQ,MACxE;AAAA,QACE,QAAQ,SAAS,eAAe;AAAA,QAChC,OAAO,SAAS,cAAc;AAAA;AAAA;AAMtC,oBAAgB,QAAQ,CAAC,MAAM,EAAE;AAEjC,WAAO,SAAS,eAAe,oBAAyC,kBAAkB;AAAA;AAAA,EAGlF,2BAAmC;AAC3C,WAAO;AAAA;AAAA;;;AC1FJ,sCAAgC,sBAAkD;AAAA,EACvF,YAAY,uBAA6C,IAAI,wBAAwB;AACnF,UAAM,qBAAqB;AAAA;AAAA,EAGnB,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,0BAAkC;AAC1C,WAAO;AAAA;AAAA;;;ACRJ,wCACL,WAC2E;AAC3E,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,MACE,kBAAkB,WAAW;AAEjC,QAAM,SAAS;AAAA,IACb,QAAQ,yBAAyB,UAAU;AAAA,IAC3C,QAAQ,yBAAyB;AAAA,IACjC,QAAQ,yBAAyB;AAAA;AAGnC,6BAA2B,WAAW;AAEtC,SAAO,CAAE,QAAQ;AAAA;;;ACnBZ,2BAA2B,SAAkG;AAClI,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM;AAAA,IACJ;AAAA,MACE,kBAAkB,gBAAgB;AAEtC,QAAM,SAAS,yBAAyB,GAAG,IAAI,UAAU;AACzD,QAAM,SAAS,yBAAyB,IAAI,IAAI;AAChD,QAAM,SAAS,yBAAyB,IAAI,KAAK;AAEjD,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL;AAAA,IACA,QAAQ,CAAE,QAAQ,QAAQ;AAAA;AAAA;;;AChBvB,6CAAuC,cAA+G;AAAA,EAC3J,cAAc;AACZ,UAAM;AAAA;AAAA,EAGD,aAAa,OAA8B;AAChD,UAAM,CAAE,UAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM;AAAA;AAGlB,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,cAAc,AAAG,sBAAK,MAAM,cAAc,KAAK,OAAO;AAC5D,YAAM,UAAU,CAAC,SAAS,SAAS;AACnC,YAAM,aAAa,UAAU,aAAa,SAAS,IAAI;AAEvD,UAAI,MAAM,YAAY,YAAY,OAAO,QAAQ;AACjD,YAAM,YAAY,KAAK,OAAO;AAC9B,YAAM,YAAY,KAAK,OAAO;AAC9B,YAAM,AAAG,yBAAQ,KAAK,CAAC,IAAI,KAAK,CAAC,GAAG,IAAI;AAExC,aAAO;AAAA;AAAA;AAAA,QAIE,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,EAGlC,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,+BAA+B;AAAA;AAAA,EAG9B,cAAc,SAAuB;AAC7C,WAAO,kBAAkB;AAAA;AAAA;;;AC7CtB,0CAAoC,sBAAsD;AAAA,EAC/F,YAAY,uBAAiD,IAAI,4BAA4B;AAC3F,UAAM,yBAAyB;AAAA;AAAA,EAGvB,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,0BAAkC;AAC1C,WAAO;AAAA;AAAA;;;ACVJ,oCAA8B,kBAAkB;AAAA;;;ACAhD,eAAe,GAAgB,QAAuC;AAC3E,SAAO,AAAG,qBAAI,AAAG,qBAAI,GAAG,OAAO,UAAU,OAAO;AAAA;;;ACAlD,oBACE,GACA,QACA,SACA,UACA,UAA4B,QACf;AACb,QAAM,CAAE,SAAS,QAAS,OAAO;AAEjC,MAAI,MAAM,AAAG,wBAAO,GAAG,SAAS,SAAS;AACzC,QAAM,AAAG,qBAAI,KAAK;AAClB,QAAM,MAAM,KAAK,OAAO;AACxB,SAAO,WAAW,AAAG,sBAAK,OAAO;AAAA;AAG5B,eAAc,GAAgB,QAAyB;AAC5D,SAAO,WAAU,GAAG,QAAQ,CAAC,GAAG,IAAI;AAAA;AAG/B,oBAAoB,GAAgB,QAAyB;AAClE,SAAO,WAAU,GAAG,QAAQ,CAAC,GAAG,IAAI;AAAA;AAG/B,kBAAkB,GAAgB,QAAyB;AAChE,SAAO,WAAU,GAAG,QAAQ,CAAC,GAAG,IAAI,MAAM;AAAA;;;ACvB5C,4BAA2B,gBAAwC,eAA+B;AAChG,+BAA6B,iBAAyB,YAAoB,YAAiC;AACzG,UAAM,UAAU,eAAe;AAC/B,UAAM,QAAQ,QAAQ,SAAU,cAAa,aAAa;AAE1D,QAAI,QAAQ,QAAQ;AAClB,YAAM,IAAI,MAAM,+BAA+B,0BAA0B,QAAQ,uBAAuB,2BAA2B;AAAA;AAGrI,WAAO,AAAG,sBACR,MAAM,AAAG,2BACP,AAAG,0BAAS,SAAS,CAAC,YAAY,OAAO,YAAY,cACrD,CAAC,GAAG,GAAG,GAAG;AAAA;AAKhB,6BACE,iBACA,YACA,YACA,cACY;AACZ,UAAM,UAAU,oBAAoB,iBAAiB,YAAY;AACjE,UAAM,OAAO,AAAG,0BAAS,eAAe;AAExC,kBAAc,KACZ,CAAE,WAAW,GAAG,yBAChB,CAAE,WAAW,GAAG;AAGlB,WAAO,CAAE,SAAS;AAAA;AAGpB,mCAAiC,YAAoB,cAAwC;AAC3F,UAAM,UAAU,AAAG,0BAAS,eAAe;AAC3C,UAAM,SAAS,AAAG,0BAAS,eAAe;AAE1C,kBAAc,KACZ,CAAE,WAAW,GAAG,yBAChB,CAAE,WAAW,GAAG;AAGlB,WAAO;AAAA,MACL;AAAA,MACA;AAAA;AAAA;AAIJ,kCACE,iBACA,YACA,YACA,cACiB;AACjB,UAAM,QAAO,kBAAkB,iBAAiB,YAAY,YAAY,GAAG;AAC3E,UAAM,SAAQ,wBAAwB,YAAY,GAAG;AAErD,WAAO,CAAE,aAAM;AAAA;AAGjB,sCACE,iBACA,YACA,YACA,cACA,SAAkB,OACG;AACrB,UAAM,QAAQ,uBAAwB,UAAS,MAAM,KAAK,iBAAiB,YAAY,YAAY,GAAG;AACtG,UAAM,SAAQ,uBAAuB,iBAAiB,YAAY,YAAY,GAAG;AAEjF,WAAO,CAAE,OAAO;AAAA;AAGlB,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;AAIG,wBAAuB,SAA6E;AACzG,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,mBAAkB,gBAAgB;AAEtC,QAAM,cAAc,uBAAuB,MAAM,IAAI,GAAG;AACxD,QAAM,WAAW,2BAA2B,MAAM,IAAI,GAAG;AACzD,QAAM,WAAW,2BAA2B,MAAM,IAAI,GAAG;AACzD,QAAM,WAAW,2BAA2B,MAAM,IAAI,GAAG;AAEzD,QAAM,cAAc,2BAA2B,OAAO,IAAI,GAAG,eAAe;AAC5E,QAAM,WAAW,2BAA2B,OAAO,IAAI,GAAG;AAC1D,QAAM,WAAW,2BAA2B,OAAO,IAAI,GAAG;AAC1D,QAAM,WAAW,2BAA2B,OAAO,IAAI,GAAG;AAE1D,QAAM,eAAe,2BAA2B,QAAQ,KAAK,GAAG,gBAAgB;AAChF,QAAM,YAAY,2BAA2B,QAAQ,KAAK,GAAG;AAC7D,QAAM,YAAY,2BAA2B,QAAQ,KAAK,GAAG;AAE7D,QAAM,eAAe,2BAA2B,QAAQ,KAAK,GAAG,gBAAgB;AAChF,QAAM,YAAY,2BAA2B,QAAQ,KAAK,GAAG;AAC7D,QAAM,YAAY,2BAA2B,QAAQ,KAAK,GAAG;AAC7D,QAAM,mBAAmB,2BAA2B,QAAQ,KAAK,GAAG;AAEpE,QAAM,KAAK,AAAG,sBACZ,MAAM,AAAG,2BAAU,AAAG,0BAAS,eAAe,MAAM,MAAM,CAAC,KAAK,OAAO,CAAC,GAAG;AAE7E,gBAAc,KAAK,CAAE,WAAW;AAEhC,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAGF,SAAO,CAAE,QAAQ;AAAA;;;AC5InB,4BAA2B,WAAgB,eAA+B;AACxE,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,mCAAiC,QAAkC;AACjE,UAAM,UAAU,mBAAmB,GAAG,wBAAwB;AAC9D,UAAM,SAAS,mBAAmB,GAAG,uBAAuB;AAE5D,WAAO,CAAE,SAAS;AAAA;AAGpB,kCAAgC,QAAiC;AAC/D,UAAM,UAAU,mBAAmB,GAAG,uBAAuB;AAC7D,UAAM,OAAO,mBAAmB,GAAG,oBAAoB;AACvD,UAAM,SAAQ,wBAAwB;AAEtC,WAAO,CAAE,MAAM,CAAE,SAAS,OAAQ;AAAA;AAGpC,sCAAoC,QAAqC;AACvE,WAAO;AAAA,MACL,OAAO,uBAAuB,GAAG;AAAA,MACjC,OAAO,uBAAuB,GAAG;AAAA;AAAA;AAIrC,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;AAIG,qCACL,WACsD;AACtD,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,mBAAkB,WAAW;AAEjC,QAAM,cAAc,uBAAuB;AAC3C,QAAM,WAAW,2BAA2B;AAC5C,QAAM,WAAW,2BAA2B;AAC5C,QAAM,WAAW,2BAA2B;AAE5C,QAAM,cAAc,2BAA2B;AAC/C,QAAM,WAAW,2BAA2B;AAC5C,QAAM,WAAW,2BAA2B;AAC5C,QAAM,WAAW,2BAA2B;AAE5C,QAAM,eAAe,2BAA2B;AAChD,QAAM,YAAY,2BAA2B;AAC7C,QAAM,YAAY,2BAA2B;AAE7C,QAAM,eAAe,2BAA2B;AAChD,QAAM,YAAY,2BAA2B;AAC7C,QAAM,YAAY,2BAA2B;AAC7C,QAAM,mBAAmB,2BAA2B;AAEpD,QAAM,CAAE,MAAO;AACf,gBAAc,KAAK,CAAE,cAAc,MAAM,WAAW;AAEpD,MAAI,CAAC,WAAW,KAAK;AACnB,UAAM,IAAI,MAAM,yDAAyD;AAAA;AAG3E,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAGF,6BAA2B,WAAW;AAEtC,SAAO,CAAE,QAAQ;AAAA;;;ACzFZ,kBAAkB,GAAgB,QAA0C;AACjF,MAAI,MAAM,MAAK,GAAG,OAAO;AACzB,QAAM,WAAW,KAAK,OAAO;AAC7B,QAAM,AAAG,qBAAI,KAAK;AAClB,QAAM,AAAG,sBAAK;AACd,SAAO;AAAA;AAGF,sBAAsB,GAAgB,QAA0C;AACrF,MAAI,MAAM,SAAS,GAAG,OAAO;AAC7B,QAAM,WAAW,KAAK,OAAO;AAE7B,MAAI,SAAS,AAAG,yBAAQ,GAAG,GAAG,GAAG;AACjC,QAAM,SAAQ,AAAG,uBAAkB,OAAO;AAC1C,QAAM,QAAQ,OAAO,MAAM,OAAO,IAAI,MAAM;AAC5C,QAAM,gBAAgB,OAAO,MAAM,OAAO,IAAI,MAAM,MAAM,OAAO,MAAM,OAAO,IAAI,MAAM;AAExF,MAAI,eAAe;AACjB,UAAM,YAAY,CAAC,GAAG,IAAI;AAC1B,cAAU,KAAK;AACf,UAAM,SAAS,AAAG,uBAAkB;AACpC,UAAM,AAAG,wBAAO,CAAC,KAAK,SAAS;AAE/B,UAAM,YAAY,CAAC,GAAG,IAAI;AAC1B,cAAU,KAAK;AACf,UAAM,SAAS,AAAG,uBAAkB;AACpC,UAAM,AAAG,wBAAO,CAAC,KAAK,SAAS;AAAA;AAGjC,WAAS,QAAQ,AAAG,wBAAO,CAAC,QAAQ,SAAQ,KAAK;AACjD,QAAM,AAAG,qBAAI,QAAQ;AAErB,QAAM,AAAG,sBAAK;AACd,SAAO;AAAA;;;AC3BF,uCAAiC,cAAyB;AAAA,EAC/D,cAAc;AACZ,UAAM;AAAA;AAAA,EAGD,aAAa,OAA8B;AAChD,UAAM,CAAE,UAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM;AAAA;AAGlB,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,cAAc,AAAG,sBAAK,MAAM,cAAc,KAAK,OAAO;AAE5D,YAAM,UAAU,CAAC,SAAS,SAAS;AACnC,YAAM,aAAa,UAAU,aAAa,SAAS,IAAI;AAEvD,UAAI,MAAM,SAAS,YAAY,OAAO;AACtC,YAAM,AAAG,yBAAQ,KAAK,GAAG,GAAG;AAE5B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAE3B,YAAM,aAAa,KAAK,OAAO;AAC/B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAE3B,YAAM,aAAa,KAAK,OAAO;AAC/B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAE3B,YAAM,aAAa,KAAK,OAAO;AAC/B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,aAAa,KAAK,OAAO;AAE/B,YAAM,YAAY,IAAI,KAAK,CAAC,GAAG;AAC/B,YAAM,iBAAiB,AAAG,wBAAO,WAAW,OAAO;AAEnD,aAAO;AAAA;AAAA;AAAA,QAIE,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,QAG/B,sBAAsB,OAAwD;AA7D7F;AA8DI,QAAI,qCAAO,UAAP,mBAAc,KAAK,CAAC,QAAQ,OAAO;AAAI,aAAO,IAAI,aAAa;AACnE,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,wBAAwB,AAAG,sBAAK,MAAM,AAAG,yBAAQ,KAAK,aAAa;AACzE,UAAM,0BAA0B,MAAM,QAAQ,IAAI,sBAAsB,IAAI,CAAC,MAAM,EAAE;AACrF,0BAAsB,QAAQ,CAAC,MAAM,EAAE;AACvC,WAAO,SAAS,eAAe,0BAA0B,wBAAwB;AAAA;AAAA,EAGzE,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,4BAA2B;AAAA;AAAA,EAG1B,cAAc,SAAuB;AAC7C,WAAO,eAAc;AAAA;AAAA;;;AC3ElB,kCAAkC,SAAuB;AAC9D,QAAM,MAAM,IAAI;AAChB,MAAI,eAAe;AACnB,SAAO;AAAA;;;ACHF,kCAGL,WACA,YAC6B;AAC7B,QAAM,YAAY,CAAE;AACpB,SAAO,IAAK,cAAc;AAAA;;;ACPrB,mBAAmB,KAA8B;AACtD,SAAO,OAAO,IAAI,QAAQ;AAAA;AAGrB,uBAGL,WACA,KACkB;AAClB,QAAM,YAAY,CAAE;AACpB,SAAO,IAAK,cAAc;AAAA;;;ACPrB,sBAAsB,KAAiC;AAC5D,SAAQ,KAAI,WAAW,OAAO,QAAQ,IAAI,WAAW,OAAO,WACvD,mBAAmB,IAAI;AAAA;AAGvB,0BAGL,WACA,QACA,mBACqB;AACrB,QAAM,YAAY,CAAE,QAAQ;AAC5B,SAAO,IAAK,cAAc;AAAA;;;AChB5B,4BAA2B,gBAAwC,eAA+B;AAChG,sCAAoC,aAAqB,cAAuD;AAC9G,UAAM,UAAU,AAAG,0BAAS,eAAe,IAAI,IAAI,cAAc,CAAC,GAAG,GAAG,aAAa;AACrF,UAAM,mBAAmB,AAAG,0BAAS,eAAe;AACpD,UAAM,oBAAoB,AAAG,0BAAS,eAAe;AACrD,UAAM,kBAAkB,AAAG,0BAAS,eAAe;AACnD,UAAM,sBAAsB,AAAG,0BAAS,eAAe;AAEvD,kBAAc,KACZ,CAAE,WAAW,GAAG,yBAChB,CAAE,WAAW,GAAG,kCAChB,CAAE,WAAW,GAAG,mCAChB,CAAE,WAAW,GAAG,iCAChB,CAAE,WAAW,GAAG;AAGlB,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA;AAIJ,6BACE,YACA,aACA,YACA,cACA,iBACY;AACZ,UAAM,UAAU,AAAG,0BACjB,eAAe,aAAa,cAAc,aAAa,aACvD,CAAC,YAAY,YAAY,YAAY;AAEvC,UAAM,OAAO,AAAG,0BAAS,eAAe;AAExC,kBAAc,KACZ,CAAE,WAAW,GAAG,yBAChB,CAAE,WAAW,GAAG,gBAAgB,kBAAkB,sBAAsB;AAG1E,WAAO,CAAE,SAAS;AAAA;AAGpB,sCACE,YACA,aACA,YACA,cACqB;AACrB,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,QACE,kBAAkB,YAAY,aAAa,YAAY,cAAc;AAEzE,WAAO;AAAA,MACL;AAAA,MACA,mBAAmB;AAAA;AAAA;AAIvB,iCACE,YACA,aACA,cAC4B;AAC5B,UAAM,iBAAiB,2BAA2B,YAAY,GAAG;AACjE,UAAM,iBAAiB,2BAA2B,YAAY,aAAa,GAAG,GAAG;AAEjF,WAAO,CAAE,gBAAgB;AAAA;AAG3B,sCAAwD;AACtD,UAAM,SAAS,2BAA2B,GAAG,IAAI,GAAG;AACpD,UAAM,SAAS,sBAAsB,IAAI,IAAI;AAC7C,UAAM,SAAS,sBAAsB,IAAI,KAAK;AAC9C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,UAAU,sBAAsB,KAAK,KAAK;AAChD,UAAM,UAAU,sBAAsB,KAAK,KAAK;AAChD,UAAM,UAAU,sBAAsB,KAAK,MAAM;AACjD,UAAM,UAAU,sBAAsB,MAAM,MAAM;AAClD,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA;AAIJ,0CAA+D;AAC7D,UAAM,SAAS,2BAA2B,MAAM,KAAK,GAAG;AACxD,UAAM,SAAS,2BAA2B,KAAK,KAAK,GAAG;AACvD,UAAM,SAAS,2BAA2B,KAAK,KAAK,GAAG;AACvD,UAAM,SAAS,2BAA2B,KAAK,KAAK,GAAG;AACvD,UAAM,SAAS,2BAA2B,KAAK,KAAK,GAAG;AACvD,UAAM,SAAS,2BAA2B,KAAK,KAAK,GAAG;AACvD,UAAM,SAAS,2BAA2B,KAAK,IAAI,GAAG;AACtD,UAAM,SAAS,2BAA2B,IAAI,KAAK,GAAG;AACtD,UAAM,2BAA2B,kBAAkB,KAAK,IAAI,GAAG;AAC/D,UAAM,oBAAoB,kBAAkB,KAAK,GAAG,GAAG;AACvD,UAAM,2BAA2B,kBAAkB,MAAM,IAAI,GAAG;AAChE,UAAM,oBAAoB,kBAAkB,MAAM,IAAI,GAAG;AACzD,UAAM,2BAA2B,kBAAkB,KAAK,IAAI,GAAG;AAC/D,UAAM,oBAAoB,kBAAkB,KAAK,IAAI,GAAG;AACxD,UAAM,2BAA2B,kBAAkB,KAAK,IAAI,GAAG;AAC/D,UAAM,oBAAoB,kBAAkB,KAAK,IAAI,GAAG;AACxD,UAAM,2BAA2B,kBAAkB,KAAK,IAAI,GAAG;AAC/D,UAAM,oBAAoB,kBAAkB,KAAK,IAAI,GAAG;AACxD,UAAM,2BAA2B,kBAAkB,KAAK,IAAI,GAAG;AAC/D,UAAM,oBAAoB,kBAAkB,KAAK,IAAI,GAAG;AAExD,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA;AAIJ,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;AAIG,wBAAuB,SAA6E;AACzG,QAAM,gBAAgC;AACtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAC1B,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,mBAAkB,gBAAgB;AACtC,QAAM,cAAc;AACpB,QAAM,mBAAmB;AACzB,QAAM,YAAY,AAAG,0BACnB,eAAe,OAAO,IACtB,CAAC,GAAG,MAAM;AAEZ,QAAM,eAAe;AAAA,IACnB;AAAA;AAEF,gBAAc,KAAK,CAAE,WAAW;AAChC,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL,QAAQ;AAAA,MACN;AAAA,MACA;AAAA,MACA;AAAA;AAAA,IAEF;AAAA;AAAA;;;AC9MJ,4BAA2B,WAAgB,eAA+B;AACxE,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,sCAAoC,QAAgB,KAAa,cAA2C;AAC1G,UAAM,UAAU,mBAAmB,GAAG,iBAAiB,yBAAyB,GAAG,GAAG;AACtF,UAAM,oBAAoB,mBAAmB,GAAG,iBAAiB,uCAAuC,GAAG,GAAG;AAC9G,WAAO,CAAE,SAAS;AAAA;AAGpB,iCAA+B,KAAyC;AACtE,UAAM,eAAe,oBAAoB;AACzC,UAAM,sBAAsB,sBAAsB;AAClD,UAAM,4BAA4B,GAAG;AACrC,UAAM,4BAA4B,GAAG;AAErC,UAAM,UAAU,mBAAmB,GAAG,yCAAyC,GAAG,GAAG;AACrF,UAAM,mBAAmB,mBAAmB,GAAG,uCAAuC,GAAG,GAAG;AAC5F,UAAM,oBAAoB,mBAAmB,GAAG,sCAAsC,GAAG,GAAG;AAC5F,UAAM,kBAAkB,mBAAmB,GAAG,6CAA6C,GAAG,GAAG;AACjG,UAAM,sBAAsB,mBAAmB,GAAG,iDAAiD,GAAG,GAAG;AAEzG,WAAO;AAAA,MACL,gBAAgB;AAAA,QACd;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA;AAAA,MAEF,gBAAgB,2BAA2B,eAAe,KAAK;AAAA;AAAA;AAInE,sCAAwD;AACtD,WAAO;AAAA,MACL,QAAQ,2BAA2B,eAAe,GAAG;AAAA,MACrD,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,SAAS,sBAAsB;AAAA,MAC/B,SAAS,sBAAsB;AAAA,MAC/B,SAAS,sBAAsB;AAAA,MAC/B,SAAS,sBAAsB;AAAA;AAAA;AAInC,6BAA2B,QAAgB,cAAkC;AAC3E,UAAM,UAAU,mBAAmB,GAAG,kBAAkB,GAAG,GAAG;AAC9D,UAAM,OAAO,mBAAmB,GAAG,iBAAiB,GAAG,GAAG;AAC1D,WAAO,CAAE,SAAS;AAAA;AAGpB,qCAAmC,KAAkC;AACnE,UAAM,yBAAyB,kBAC7B,2BAA2B,4BAC3B,kCAAkC;AAEpC,UAAM,kBAAkB,kBACtB,2BAA2B,sBAC3B,kCAAkC;AAEpC,WAAO,CAAE,wBAAwB;AAAA;AAGnC,0CAA+D;AAC7D,WAAO;AAAA,MACL,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,iBAAiB,0BAA0B;AAAA,MAC3C,iBAAiB,0BAA0B;AAAA,MAC3C,iBAAiB,0BAA0B;AAAA,MAC3C,iBAAiB,0BAA0B;AAAA,MAC3C,iBAAiB,0BAA0B;AAAA,MAC3C,iBAAiB,0BAA0B;AAAA;AAAA;AAI/C,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;AAIG,qCACL,WACsD;AACtD,QAAM,gBAAgC;AACtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,mBAAkB,WAAW;AACjC,QAAM,YAAY,UAAU;AAC5B,gBAAc,KAAK,CAAE,cAAc,oBAAoB,WAAW;AAClE,MAAI,CAAC,WAAW,YAAY;AAC1B,UAAM,IAAI,MAAM,yEAAyE;AAAA;AAG3F,QAAM,SAAS;AAAA,IACb,aAAa;AAAA,IACb,kBAAkB;AAAA,IAClB,cAAc;AAAA,MACZ;AAAA;AAAA;AAIJ,6BAA2B,WAAW;AACtC,SAAO,CAAE,QAAQ;AAAA;;;ACxHZ,4BAA4B,GAAgB,QAA6B,SAA2B;AACzG,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI,MAAM,AAAG,wBAAO,GAAG,OAAO,SAAS,SAAS;AAChD,UAAM,AAAG,qBAAI,KAAK,OAAO;AACzB,WAAO,AAAG,6BAAY,KAAK,GAAG;AAAA;AAAA;;;ACHlC,IAAM,UAAU;AAEhB,4BAA4B,GAAgB,QAAyC,SAA2B;AAC9G,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI,MAAM,AAAG,iCAAgB,GAAG,OAAO,SAAS,SAAS;AACzD,UAAM,AAAG,2BACP,KACA,OAAO,iBACP,OAAO,qBACP,OAAO,mBACP,OAAO,kBACP;AAEF,WAAO,AAAG,6BAAY,KAAK,GAAG;AAAA;AAAA;AAIlC,+BAA+B,UAAoC;AACjE,SAAO,CAAC,GAAG,GAAG,GAAG,IAAI,KAAK,CAAC,QAAQ,QAAQ,YAAY,CAAC,GAAG,KAAK,CAAC,GAAG;AAAA;AAG/D,qBAAqB,GAAgB,QAA4B;AACtE,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI;AACJ,QAAI,MAAM,mBAAmB,GAAG,OAAO,QAAQ,CAAC,GAAG;AAEnD,UAAM,iBAAiB;AAAA,MACrB,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA;AAGT,mBAAe,QAAQ,CAAC,OAAO,MAAM;AACnC,YAAM,WAAW,IAAI;AACrB,YAAM,uBAAuB,sBAAsB;AACnD,YAAM,mBAAmB,KAAK,MAAM,gBAAgB;AACpD,YAAM,mBAAmB,KAAK,MAAM,gBAAgB,CAAC,GAAG;AACxD,UAAI,aAAa;AAAI,iBAAS;AAAA;AAGhC,QAAI,WAAW,MAAM;AACnB,YAAM,IAAI,MAAM;AAAA;AAGlB,WAAO;AAAA,MACL;AAAA,MACA;AAAA;AAAA;AAAA;;;AC3DN,aAAa,OAAoB,GAAW,GAAW;AACrD,QAAM,YAAY,MAAM;AACxB,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAS,SAAQ,SAAU,SAAQ;AACzC,QAAM,QAAS,SAAQ,SAAU,SAAQ;AACzC,MAAI,SAAS,KAAK,SAAS;AAAG,WAAO;AACrC,QAAM,mBAAmB,KAAK,IAAI,OAAO;AACzC,QAAM,mBAAmB,KAAK,IAAI,OAAO;AACzC,QAAM,mBAAmB,KAAK,IAAI,OAAO;AACzC,QAAM,mBAAmB,KAAK,IAAI,OAAO;AACzC,QAAM,mBAAmB,KAAK,IAAI,mBAAmB,kBAAkB,KAAO,KAAK,IAAI,mBAAmB,kBAAkB;AAC5H,SAAO,mBAAoB,SAAQ,QAAQ;AAAA;AAGtC,4BACL,OACA,QACA,eACA,cACA,gBACU;AACV,QAAM,WAAW,MAAM,MAAM;AAC7B,QAAM,aAAa,KAAK,IAAI,eAAe;AAE3C,QAAM,aAAa,OAChB,IAAI,CAAC,OAAO,aAAc,EAAE,OAAO,YACnC,OAAO,CAAC,MAAM,EAAE,QAAQ,gBACxB,KAAK,CAAC,IAAI,OAAO,GAAG,QAAQ,GAAG;AAElC,QAAM,eAAe,CAAC,MAAe,KAAK,eAAe,IAAI;AAC7D,QAAM,WAAqB;AAE3B,aAAW,QAAQ,CAAC,MAAM;AACxB,QAAI,SAAS,UAAU;AAAY;AACnC,UAAM,gBAAgB,EAAE;AACxB,aAAS,IAAI,SAAS,SAAS,GAAG,KAAK,GAAG,EAAE,GAAG;AAC7C,YAAM,OAAM,IAAI,OAAO,EAAE,UAAU,SAAS;AAC5C,UAAI,SAAQ;AAAK;AACjB,QAAE,SAAS,aAAa;AACxB,UAAI,EAAE,SAAS;AAAgB;AAAA;AAEjC,QAAI,kBAAkB,EAAE,OAAO;AAC7B,eAAS,KAAK,EAAE;AAAA;AAAA;AAGpB,SAAO;AAAA;;;AClDT,2CAA2C,GAAgB;AACzD,QAAM,MAAM,AAAG,yBAAQ,AAAG,2BAAU,GAAG,CAAC,GAAG;AAE3C,QAAM,QAAQ;AAAA,IACZ,AAAG,qBAAI,IAAI,IAAI,IAAI;AAAA,IACnB,AAAG,qBAAI,IAAI,IAAI,IAAI;AAAA;AAErB,QAAM,UAAU;AAAA,IACd,AAAG,qBAAI,IAAI,IAAI,AAAG,qBAAI,MAAM,IAAI;AAAA,IAChC,AAAG,qBAAI,IAAI,IAAI,AAAG,qBAAI,MAAM,IAAI;AAAA;AAElC,SAAO,CAAE,OAAO;AAAA;AAGlB,0BAA0B,IAAiB,IAAiB;AAC1D,QAAM,CAAE,OAAO,WAAY,kCAAkC;AAE7D,QAAM,MAAM,AAAG,yBAAQ,AAAG,2BAAU,IAAI,CAAC,GAAG;AAC5C,QAAM,WAAW,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,IAAI,IAAI,KAAK,MAAM,KAAK;AACrE,QAAM,WAAW,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,IAAI,IAAI,KAAK,MAAM,KAAK,QAAQ;AACtE,QAAM,WAAW,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,IAAI,IAAI,KAAK,MAAM,KAAK;AACrE,QAAM,WAAW,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,IAAI,IAAI,KAAK,MAAM,KAAK,QAAQ;AAEtE,SAAO,AAAG,2BACR,AAAG,uBAAM;AAAA,IACP,AAAG,qBAAI,UAAU;AAAA,IACjB,AAAG,qBAAI,UAAU;AAAA,IACjB,AAAG,qBAAI,UAAU;AAAA,IACjB,AAAG,qBAAI,UAAU;AAAA,MAEnB,CAAC,GAAG;AAAA;AAID,qBAAqB,gBAA6B,kBAA+B,QAA2B;AACjH,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,YAAY,eAAe,MAAM;AAEvC,QAAI,QAAQ,iBACV,AAAG,yBAAQ,AAAG,sBAAK,OAAO,WAAW,CAAC,WAAW,GAAG,KAAK,CAAC,IAAI,KAC9D,AAAG,yBAAQ,gBAAgB,CAAC,IAAI;AAElC,YAAQ,AAAG,yBAAQ,OAAO,CAAC,WAAY,MAAM,MAAM,KAAK,WAAY;AAEpE,UAAM,mBAAmB,AAAG,yBAAQ,AAAG,uBAAM,kBAAkB,CAAC,GAAG,GAAG,IAAI,CAAC,IAAI,IAAI;AACnF,QAAI,SAAS,AAAG,uBAAM,kBAAkB,CAAC,GAAG,GAAG,IAAI,CAAC,IAAI,IAAI;AAE5D,aAAS,AAAG,yBAAQ,QAAQ,CAAC,WAAW,OAAO,MAAM;AAErD,UAAM,eAAe,AAAG,yBAAQ;AAChC,UAAM,gBAAgB,AAAG,yBAAQ;AAEjC,WAAO,CAAE,OAAO,cAAc,QAAQ;AAAA;AAAA;;;ACnDnC,4BACL,GACA,QACA;AACA,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,YAAY,EAAE,MAAM;AAC1B,UAAM,wBAAwB,AAAG,yBAC/B,UAAU,GAAG,OAAO,yBACpB,CAAC,WAAW,IAAI,GAAG;AAErB,UAAM,kBAAkB,AAAG,yBACzB,UAAU,GAAG,OAAO,kBACpB,CAAC,WAAW,IAAI;AAElB,WAAO,CAAE,uBAAuB;AAAA;AAAA;;;ACb7B,yBACL,GACA,QACA,QACA;AACA,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,QAAQ,mBAAmB,GAAG,OAAO,QAAQ,CAAC,GAAG;AACvD,UAAM,QAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,SAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,QAAQ,mBAAmB,QAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,QAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,QAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,QAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,QAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAE3D,UAAM,iBAAiB,mBAAmB,QAAQ,OAAO;AACzD,UAAM,iBAAiB,mBAAmB,GAAG,OAAO;AACpD,UAAM,iBAAiB,mBAAmB,OAAO,OAAO;AACxD,UAAM,iBAAiB,mBAAmB,OAAO,OAAO;AACxD,UAAM,iBAAiB,mBAAmB,OAAO,OAAO;AACxD,UAAM,iBAAiB,mBAAmB,OAAO,OAAO;AAExD,UAAM,iBAAiB,AAAG,wBAAO;AAAA,MAC/B,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,OACd;AAEH,UAAM,mBAAmB,AAAG,wBAAO;AAAA,MACjC,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,OACd;AAEH,WAAO;AAAA,MACL;AAAA,MACA;AAAA;AAAA;AAAA;;;AC3CC,kCAA4B;AAAA,EAOjC,YAAY,CAAE,eAAe,cAAuC,IAAI;AAN9D,iBAAgB;AAOxB,SAAK,iBAAiB,iBAAiB;AACvC,SAAK,cAAc,cAAc;AAEjC,QAAI,OAAO,KAAK,mBAAmB,YAAY,KAAK,kBAAkB,KAAK,KAAK,kBAAkB,GAAG;AACnG,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAG1B,QAAI,OAAO,KAAK,gBAAgB,UAAU;AACxC,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAAA;AAAA,MAIxB,gBAAwB;AAAE,WAAO,KAAK;AAAA;AAAA,MAEtC,aAAqB;AAAE,WAAO,KAAK;AAAA;AAAA;;;ACZlC,mCAA6B,cAAyB;AAAA,EAC3D,cAAc;AACZ,UAAM;AAAA;AAAA,EAGD,aAAa,OAAiB;AACnC,UAAM,CAAE,UAAW;AACnB,QAAI,CAAC;AAAQ,YAAM,IAAI,MAAM;AAC7B,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,cAAc,AAAG,sBAAK,MAAM,cAAc,KAAK,QAAQ;AAC7D,YAAM,IAAI,AAAG,qBAAI,AAAG,qBAAI,aAAa,QAAQ;AAC7C,YAAM,WAAW,YAAY,GAAG,OAAO;AACvC,YAAM,CAAE,gBAAgB,oBAAqB,gBAAgB,SAAS,KAAK,SAAS,QAAQ,OAAO;AACnG,aAAO,YAAY,gBAAgB,kBAAkB,OAAO;AAAA;AAAA;AAAA,QAInD,QAAQ,OAAkB;AACrC,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,QAG/B,YAAY,OAAkB,UAAkC,IAA8B;AACzG,UAAM,CAAE,YAAY,iBAAkB,IAAI,sBAAsB;AAChE,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,CAAE,OAAO,QAAQ,QAAQ,WAAY,KAAK,aAAa;AAC7D,UAAM,QAAQ,OAAO;AACrB,UAAM,SAAS,QAAQ;AACvB,aAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACtC,aAAO,GAAG;AACV,cAAQ,GAAG;AAAA;AAEb,UAAM,aAAa,MAAM,KAAK,OAAO;AACrC,UAAM,eAAe;AACrB,UAAM,UAAU,mBAAkB,OAAO,YAAwB,YAAY,cAAc;AAC3F,UAAM,eAAe,SAAS,2BAA2B;AACzD,UAAM,YAAY,SAAS;AAC3B,UAAM,OAAO,YAAY,aAAa;AACtC,UAAM,OAAO,YAAY,aAAa;AACtC,UAAM,YAAY,MAAM;AACxB,UAAM,UAAU,QACb,IAAI,CAAC,QAAQ;AACZ,YAAM,CAAC,KAAK,UAAU;AAAA,QACpB,KAAK,IAAI,GAAG,UAAU,KAAK;AAAA,QAC3B,KAAK,IAAI,GAAK,UAAU,KAAK;AAAA,QAC7B,IAAI,CAAC,QAAQ,MAAM;AACrB,YAAM,CAAC,MAAM,SAAS;AAAA,QACpB,KAAK,IAAI,GAAG,UAAU,KAAK;AAAA,QAC3B,KAAK,IAAI,GAAK,UAAU,KAAK;AAAA,QAC7B,IAAI,CAAC,QAAQ,MAAM;AACrB,aAAO,IAAI,cACT,WAAW,MACX,IAAI,KAAK,MAAM,KAAK,QAAQ,MAAM,SAAS,MAC3C,CAAE,QAAQ,SAAS,eAAe,IAAI,OAAO,SAAS,cAAc;AAAA;AAG1E,UAAM;AACN,WAAO;AACP,WAAO;AAAA;AAAA,EAGC,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,4BAA2B;AAAA;AAAA,EAG1B,cAAc,SAAuB;AAC7C,WAAO,eAAc;AAAA;AAAA;;;AC/ElB,8BAA8B,SAAuB;AAC1D,QAAM,MAAM,IAAI;AAChB,MAAI,eAAe;AACnB,SAAO;AAAA;AAGF,gCAAgC,SAAuB;AAC5D,SAAO,qBAAqB;AAAA;AAIvB,qCAA+B,eAAe;AAAA;;;ACd9C,IAAM,gBAAgB;AAEtB,IAAM,cAAc;AAAA,EACzB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,SAAS;AAAA,EACnB,IAAI,MAAM,SAAS;AAAA,EACnB,IAAI,MAAM,QAAQ;AAAA,EAClB,IAAI,MAAM,SAAS;AAAA;AAGd,IAAM,wBAAwB;AAAA,EACnC,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA;AAGf,IAAM,qBAA+C,CAAC,SAAS,SAAS;AAExE,IAAM,qBAAqB;AAC3B,IAAM,oCAAoC;;;ACVjD,IAAM,WAAW,CAAC,QAAa,OAAO,QAAQ;AAEvC,wBAAwB,QAAa;AAC1C,MAAI,CAAC,QAAQ;AACX,UAAM,IAAI,MAAM,mBAAmB;AAAA;AAGrC,MAAI,OAAO,OAAO,uBAAuB,WAAW;AAClD,UAAM,IAAI,MAAM,wDAAwD,OAAO;AAAA;AAGjF,MAAI,CAAC,SAAS,OAAO,iBAAiB,OAAO,eAAe,KAAK,OAAO,eAAe,GAAK;AAC1F,UAAM,IAAI,MAAM,gEAAgE,OAAO;AAAA;AAGzF,MACE,CAAC,MAAM,QAAQ,OAAO,YACnB,CAAC,OAAO,QAAQ,UAChB,CAAC,OAAO,QAAQ,MAAM,CAAC,MAAW,OAAO,MAAM,WAClD;AACA,UAAM,IAAI,MAAM,kEAAkE,KAAK,UAAU,OAAO;AAAA;AAG1G,MACE,CAAC,MAAM,QAAQ,OAAO,YACnB,CAAC,OAAO,QAAQ,UAChB,CAAC,OAAO,QAAQ,IAAI,CAAC,MAAW,KAAK,IAAI,MAAM,CAAC,MAAW,SAAS,EAAE,MAAM,SAAS,EAAE,KAC1F;AACA,UAAM,IAAI,MAAM,wEAAwE,KAAK,UAAU,OAAO;AAAA;AAGhH,MAAI,OAAO,WACT,EAAC,MAAM,QAAQ,OAAO,YACnB,OAAO,QAAQ,WAAW,KAC1B,CAAC,OAAO,QAAQ,MAAM,YACxB;AACD,UAAM,IAAI,MAAM,8EAA8E,KAAK,UAAU,OAAO;AAAA;AAAA;;;AC/CjH,eAAe,GAA6B;AACjD,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,MAAM,AAAG,qBAAI,GAAG,AAAG,wBAAO;AAChC,WAAO,AAAG,qBAAI,AAAG,sBAAK,AAAG,qBAAI,GAAG,OAAO;AAAA;AAAA;;;ACApC,2BAA2B,GAAgB,QAAwC;AACxF,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI,MAAM,AAAG,qBAAI,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI,CAAC,GAAG;AACjD,UAAM,AAAG,wBAAO,KAAK,OAAO,KAAK,SAAS,CAAC,GAAG,IAAI;AAClD,UAAM,AAAG,qBAAI,KAAK,OAAO,GAAG;AAC5B,UAAM,AAAG,qBAAI,KAAK,OAAO,GAAG;AAC5B,UAAM,AAAG,qBAAI,KAAK,OAAO,KAAK;AAC9B,WAAO,MAAM;AAAA;AAAA;;;ACPV,iCAAgC,GAAgB,QAA0C;AAC/F,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI,MAAM,AAAG,qBAAI,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI,CAAC,GAAG;AACjD,UAAM,AAAG,iCAAgB,KAAK,OAAO,kBAAkB,OAAO,kBAAkB,CAAC,GAAG,IAAI;AACxF,UAAM,AAAG,qBAAI,KAAK,OAAO;AACzB,WAAO,MAAM;AAAA;AAAA;;;ACDjB,4BAA2B,gBAAwC,eAA+B;AAChG,QAAM,oBAAoB,yBAAyB,gBAAgB;AAEnE,kCAAgC,MAAc,cAAiC;AAC7E,UAAM,OAAM,AAAG,0BAAS,eAAe;AACvC,UAAM,UAAU,AAAG,0BAAS,eAAe;AAE3C,kBAAc,KACZ,CAAE,WAAW,GAAG,qBAChB,CAAE,WAAW,GAAG;AAElB,WAAO,CAAE,WAAK;AAAA;AAGhB,0CAAwC,YAAoB,aAAqB,cAAyC;AACxH,UAAM,QAAO,kBAAkB,YAAY,aAAa,GAAG,GAAG;AAC9D,UAAM,KAAK,uBAAuB,aAAa,GAAG;AAClD,WAAO,CAAE,aAAM;AAAA;AAEjB,QAAM,6BAA6B,kCAAkC,gBAAgB;AAErF,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA;AAAA;AAIG,wBACL,SACA,QACA,iBACA,aACgE;AAChE,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM,gBAAgC;AACtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,MACE,mBAAkB,gBAAgB;AACtC,MAAI;AAEJ,MAAI,OAAO,oBAAoB;AAC7B,UAAM,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,MAAM;AAC7C,UAAM,QAAQ,OAAO,qBACjB,kBAAkB,IAAI,IAAI,GAAG,WAC7B,2BAA2B,IAAI,IAAI;AACvC,UAAM,QAAQ,2BAA2B,IAAI,IAAI;AACjD,UAAM,SAAQ,2BAA2B,IAAI,IAAI;AACjD,UAAM,QAAQ,2BAA2B,IAAI,IAAI;AACjD,UAAM,QAAQ,2BAA2B,IAAI,IAAI;AACjD,UAAM,QAAQ,2BAA2B,IAAI,IAAI;AACjD,UAAM,QAAQ,KAAK,2BAA2B,IAAI,IAAI,WAAW;AACjE,UAAM,QAAQ,KAAK,2BAA2B,IAAI,IAAI,WAAW;AACjE,UAAM,QAAQ,kBAAkB,MAAM,MAAM,IAAI,IAAI,iBAAiB,GAAG;AACxE,aAAS;AAAA,MACP;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA;AAAA,SAErD;AACL,UAAM,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,MAAM;AAC7C,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,SAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,kBAAkB,IAAI,IAAI,iBAAiB,GAAG;AAC5D,aAAS;AAAA,MACP;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA;AAAA;AAG5D,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAE1E,SAAO,CAAE,QAAQ;AAAA;;;AChFnB,4BAA2B,WAAgB,eAA+B;AACxE,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,kCAAgC,QAA2B;AACzD,UAAM,OAAM,mBAAmB,GAAG,cAAc;AAChD,UAAM,UAAU,mBAAmB,GAAG,kBAAkB;AACxD,WAAO,CAAE,WAAK;AAAA;AAGhB,6BAA2B,QAA4B;AACrD,UAAM,UAAU,mBAAmB,GAAG,kBAAkB;AACxD,UAAM,OAAO,mBAAmB,GAAG,eAAe;AAClD,WAAO,CAAE,SAAS;AAAA;AAGpB,0CAAwC,QAAmC;AACzE,UAAM,QAAO,kBAAkB,GAAG;AAClC,UAAM,KAAK,uBAAuB,GAAG;AACrC,WAAO,CAAE,aAAM;AAAA;AAGjB,QAAM,6BAA6B,+BAA+B;AAClE,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA;AAAA;AAIG,qCACL,WACA,QACgE;AAChE,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,MACE,mBAAkB,WAAW;AAEjC,MAAI;AAEJ,MAAI,OAAO,oBAAoB;AAE7B,UAAM,aAAc,OAAO,eAAe,OAAO,YAAY,UAAU;AACvE,aAAS;AAAA,MACP,OAAO,OAAO,qBAAqB,kBAAkB,WAAW,2BAA2B;AAAA,MAC3F,OAAO,2BAA2B;AAAA,MAClC,OAAO,2BAA2B;AAAA,MAClC,OAAO,2BAA2B;AAAA,MAClC,OAAO,2BAA2B;AAAA,MAClC,OAAO,2BAA2B;AAAA,MAClC,OAAO,aAAa,IAAI,2BAA2B,WAAW;AAAA,MAC9D,OAAO,aAAa,IAAI,2BAA2B,WAAW;AAAA,MAC9D,OAAO,kBAAkB;AAAA;AAAA,SAEtB;AACL,aAAS;AAAA,MACP,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,kBAAkB;AAAA;AAAA;AAI7B,6BAA2B,WAAW;AACtC,SAAO,CAAE,QAAQ;AAAA;;;AC7EZ,8BAAwB;AAAA,EAO7B,YAAY,CAAE,WAAW,kBAAuC,IAAI;AAN1D,iBAAgB;AAOxB,SAAK,aAAa,aAAa;AAC/B,SAAK,kBAAkB,kBAAkB;AAEzC,QAAI,OAAO,KAAK,eAAe,YAAY,KAAK,aAAa,OAAO,GAAG;AACrE,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAG1B,QAAI,OAAO,KAAK,oBAAoB,YAAY,KAAK,mBAAmB,KAAK,KAAK,mBAAmB,GAAG;AACtG,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAAA;AAAA,MAIxB,YAAoB;AAAE,WAAO,KAAK;AAAA;AAAA,MAElC,iBAAyB;AAAE,WAAO,KAAK;AAAA;AAAA;;;ACJtC,oCAA6B,cAAmC;AAAA,EAKrE,YAAY,QAA0B;AACpC,UAAM;AACN,mBAAe;AACf,SAAK,UAAU;AAAA;AAAA,MAGN,SAA2B;AACpC,WAAO,KAAK;AAAA;AAAA,MAGH,kBAA2B;AACpC,WAAO,KAAK,OAAO,mBAAmB,KAAK,OAAO,QAAQ,SAAS;AAAA;AAAA,MAG1D,kBAA0B;AACnC,WAAO,IAAK,MAAK,kBAAkB,KAAK,OAAO,QAAQ,SAAS;AAAA;AAAA,EAG3D,cAAc,GAAgB,QAAiD;AACpF,QAAI,MAAM,kBAAkB,GAAG,OAAO;AACtC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,kBAAkB,KAAK,OAAO;AACpC,WAAO,UAAU,KAAK,OAAO,OAAO,SAAS;AAAA;AAAA,EAGxC,aAAa,GAAgB,QAAsC;AACxE,QAAI,MAAM,KAAK,OAAO,qBAClB,MAAM,UAAU,GAAG,OAAO,OAAqB,SAAS,UACxD,wBAAuB,GAAG,OAAO;AACrC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,wBAAuB,KAAK,OAAO;AACzC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,wBAAuB,KAAK,OAAO;AACzC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,wBAAuB,KAAK,OAAO;AACzC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,wBAAuB,KAAK,OAAO;AACzC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,wBAAuB,KAAK,OAAO;AACzC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,OAAO,QAAQ,wBAAuB,KAAK,OAAO,SAAS;AACjE,UAAM,OAAO,QAAQ,wBAAuB,KAAK,OAAO,SAAS;AACjE,WAAO,UAAU,KAAK,OAAO,OAAO,SAAS;AAAA;AAAA,EAGxC,aAAa,OAAiB,WAAgC;AACnE,UAAM,CAAE,UAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM;AAAA;AAGlB,WAAO,AAAG,sBAAK,MAAM;AACnB,UAAI,cAAc,AAAG,sBAAK,MAAM,cAAc,WAAW,QAAQ;AACjE,oBAAc,KAAK,OAAO,UACtB,UAAU,aAAa,KAAK,OAAO,WACnC;AACJ,oBAAc,YAAY,IAAI;AAC9B,aAAO,KAAK,OAAO,qBACf,KAAK,aAAa,aAAa,UAC/B,KAAK,cAAc,aAAa;AAAA;AAAA;AAAA,QAI3B,QAAQ,OAAkB,WAAyC;AAC9E,WAAO,KAAK,aAAa,MAAM,WAAW,QAAQ;AAAA;AAAA,QAGvC,OAAO,OAAkB,gBAAoC,IAAgC;AACxG,UAAM,CAAE,WAAW,kBAAmB,IAAI,kBAAkB;AAC5D,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,MAAM,MAAM,KAAK,aAAa,UAAU;AAC9C,UAAM,OAAO,AAAG,sBAAK,MAAM,AAAG,yBAAQ,KAAK,GAAG;AAC9C,UAAM,kBAAkB;AAAA,MACtB,OAAO,SAAS,cAAc;AAAA,MAC9B,QAAQ,SAAS,eAAe;AAAA;AAGlC,UAAM,UAAU,MAAM,KAAK,aAAa,MAAM,SAAS,2BAA2B,IAAI;AACtF,QAAI;AACJ,SAAK;AAEL,UAAM,QAAQ,QAAQ,IAAI,CAAC,QAAQ,IAAI;AACvC,UAAM,SAAS,QAAQ,IAAI,CAAC,QAAQ,IAAI;AACxC,UAAM,cAAc,QAAQ,IAAI,CAAC,QAAQ,IAAI;AAC7C,UAAM,aAAa,QAAQ,IAAI,CAAC,QAAQ,KAAK,OAAO,QAAQ,IAAI;AAEhE,UAAM,UAAU,kBACd,MAAM,IAAI,CAAC,QAAQ,IAAI,QAAQ,aAC/B,QACA,KAAK,OAAO,cACZ;AAGF,UAAM,aAAa,QAAQ,IAAI,CAAC,QAAQ,IAAI,gBAC1C,OAAO,MACP,YAAY,MACZ,WAAW,MACX,MAAM,MACN;AAEF,WAAO;AAAA;AAAA,EAGC,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,4BAA2B,WAAW,KAAK;AAAA;AAAA,EAG1C,cAAc,SAAuB;AAC7C,UAAM,cAAc,KAAK,OAAO,eAAe,gBAAe;AAE9D,UAAM,aAAa,cAAc,YAAY,SAAS;AACtD,QAAI,eAAe,KAAK,eAAe,KAAK,eAAe,GAAG;AAC5D,YAAM,IAAI,MAAM,oEAAoE;AAAA;AAEtF,WAAO,eAAc,SAAS,KAAK,QAAQ,KAAK,iBAAiB;AAAA;AAAA,QAGnD,aACd,cACA,qBACA,gBACA;AACA,UAAM,CAAE,OAAO,UAAW;AAC1B,UAAM,YAAY,KAAK,IAAI,OAAO;AAClC,UAAM,oBAAoB,YAAY;AACtC,UAAM,oBAAoB,YAAY;AAEtC,UAAM,WAAW,aAAa,MAAM;AACpC,UAAM,WAAW,KAAK,OAAO,QAAQ;AAErC,UAAM,CAAC,aAAa,cAAc,qBAAqB,AAAG,sBAAK,MAAM;AACnE,YAAM,WAAW,aAAa,QAAQ,CAAC,UAAU,UAAU,UAAU,KAAK;AAE1E,YAAM,QAAQ,SAAS,MAAM,CAAC,GAAG,GAAG,GAAG,IAAI,CAAC,UAAU,UAAU,UAAU;AAC1E,YAAM,SAAS,SAAS,MAAM,CAAC,GAAG,GAAG,GAAG,IAAI,CAAC,UAAU,UAAU,UAAU;AAC3E,YAAM,cAAc,KAAK,kBACrB,AAAG,yBAAQ,SAAS,MAAM,CAAC,GAAG,GAAG,GAAG,IAAI,CAAC,UAAU,UAAU,UAAU,KAAK,OAAO,QAAQ,UAAU,KACrG,AAAG,wBAAO;AACd,aAAO,CAAC,OAAO,QAAQ;AAAA;AAGzB,UAAM,UAAU;AAChB,UAAM,aAAa,MAAM,aAAa;AACtC,UAAM,YAAY,MAAM,YAAY;AACpC,aAAS,MAAM,GAAG,MAAM,UAAU,OAAO;AACvC,eAAS,MAAM,GAAG,MAAM,UAAU,OAAO;AACvC,iBAAS,SAAS,GAAG,SAAS,UAAU,UAAU;AAChD,gBAAM,QAAQ,QAAQ,WAAW,KAAK,KAAK,QAAQ;AACnD,cAAI,CAAC,kBAAkB,QAAQ,gBAAgB;AAC7C,kBAAM,MAAQ,OAAM,QAAQ,UAAU,KAAK,KAAK,QAAQ,OAAO,WAAY;AAC3E,kBAAM,MAAQ,OAAM,QAAQ,UAAU,KAAK,KAAK,QAAQ,OAAO,WAAY;AAC3E,kBAAM,aAAe,KAAK,IAAI,UAAU,KAAK,KAAK,QAAQ,MAAM,KAAK,OAAO,QAAQ,QAAQ,IAAK,WAAY;AAC7G,kBAAM,cAAgB,KAAK,IAAI,UAAU,KAAK,KAAK,QAAQ,MAAM,KAAK,OAAO,QAAQ,QAAQ,IAAK,WAAY;AAC9G,kBAAM,IAAK,MAAO,aAAa;AAC/B,kBAAM,IAAK,MAAO,cAAc;AAChC,kBAAM,MAAM,CAAE,KAAK,KAAK;AACxB,kBAAM,CAAE,YAAY,SAAU,KAAK,kBAC/B,MAAM,KAAK,sBAAsB,mBAAkC,OACnE,CAAE,YAAY,GAAG,OAAO;AAC5B,oBAAQ,KAAK;AAAA,cACX,KAAK,IAAI,YAAY,GAAG,GAAG,IAAI,YAAY,IAAI;AAAA,cAC/C;AAAA,cACA,YAAY,QAAQ;AAAA,cACpB;AAAA,iBACG;AAAA;AAAA;AAAA;AAAA;AAAA;AAOb,gBAAY;AACZ,iBAAa;AACb,sBAAkB;AAClB,WAAO;AAAA;AAAA,QAGK,sBAAsB,eAA4B,KAAmD;AACjH,UAAM,CAAE,KAAK,KAAK,UAAW;AAC7B,UAAM,cAAc,MAAM,cAAc;AACxC,WAAO,MAAM,KAAK,OAAO,QAAQ,QAAQ,KAAK,GAC3C,IAAI,CAAC,GAAG,MAAM,YAAY,KAAK,KAAK,QAAQ,IAC5C,IAAI,CAAC,YAAY,UAAW;AAAA,MAC3B;AAAA,MACA;AAAA,QAED,OAAO,CAAC,KAAK,SAAU,IAAI,aAAa,KAAK,aAAa,MAAM;AAAA;AAAA;AA/MhE;AACS,AADT,eACS,uBAAuB,CAAC,GAAG,IAAI,IAAI,IAAI,KAAK,KAAK,KAAK,MAAM;;;ACPrE,+BAAyB,eAAe;AAAA,EAC7C,YAAY,qBAA8B,MAAM;AAC9C,UAAM,SAAS;AAAA,MACb;AAAA,MACA,cAAc;AAAA,MACd,SAAS,CAAC;AAAA,SACN,qBACA;AAAA,QACA,SAAS;AAAA,QACT,SAAS;AAAA,UAET;AAAA,QACA,SAAS;AAAA,QACT,iBAAiB;AAAA;AAAA;AAIvB,UAAM;AAAA;AAAA,MAGG,qBAA8B;AACvC,WAAO,KAAK,OAAO;AAAA;AAAA,MAGV,UAAmB;AAC5B,WAAO,KAAK,OAAO;AAAA;AAAA,QAGR,YAAY,OAAkB,eAA6D;AACtG,UAAM,mBAAmB,MAAM,KAAK,OAAO,OAAO;AAClD,WAAO,iBAAiB,IAAI,CAAC,QAAQ,IAAI,cAAc,IAAI,OAAO,IAAI,aAAa,CAAE,OAAO,IAAI,YAAY,QAAQ,IAAI;AAAA;AAAA,EAGhH,sBAA8B;AACtC,WAAO,KAAK,qBAAqB,oCAAoC;AAAA;AAAA,EAG7D,2BAA2B,WAA8F;AACjI,WAAO,MAAM,2BAA2B;AAAA;AAAA;;;AChDrC,0BAA0B,SAAuB,qBAA8B,MAAM;AAC1F,QAAM,MAAM,IAAI,WAAW;AAC3B,MAAI,eAAe;AACnB,SAAO;AAAA;;;ACNF,4CAAsC,kBAAkB;AAAA,EAAxD,cAJP;AAIO;AACK,iBAAgB;AAAA;AAAA;;;ACLrB,2BAAwB;AAAA,QAEhB,KAAK,aAA2D;AAC3E,WAAO,YAAY,MAAM,KAAK;AAAA;AAAA,QAGnB,MAAkB;AAC7B,UAAM,IAAI,MAAM;AAAA;AAAA;;;ACApB,gDACE,eACA,OAEA,gBACA,gBAEA,sBAAwF,CAAC,CAAE,iBAAkB,aAC7G;AACA,QAAM,YAAY,cAAc,IAAI,CAAC,iBAAkB,oBAAoB,gBACvE,oBAAoB,gBACpB,aAAa;AACjB,QAAM,QAAgD,kBACpD,kBAAoB,0BAChB,MAAM,mBAAmB,OAAO,aAChC,MAAM,aAAa,OAAO;AAEhC,QAAM,UAAU,MAAM,eAAe;AACrC,QAAM,QAAQ,CAAC,MAAM,aAAgB,2BAAU,EAAE;AACjD,SAAO;AAAA;AAGT,iDACE,cACA,OAEA,eACA,gBAEA,qBACA;AACA,SAAO,iCACL,CAAC,eACD,OACA,OAAO,UAAU,cAAc,MAAM,KACrC,gBACA;AAAA;;;ACzCG,IAAM,iBAAgB;AAEtB,IAAM,eAAc;AAAA,EACzB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA;AAGf,IAAM,WAAqC,CAAC,SAAS,SAAS;;;ACF9D,qCAA+B,eAAe;AAAA,EACnD,cAAc;AACZ,UAAM,SAAS;AAAA,MACb,oBAAoB;AAAA,MACpB,cAAc;AAAA,MACd,SAAS,CAAC;AAAA,MACV,SAAS;AAAA,MACT,SAAS;AAAA,MACT,oBAAoB;AAAA,MACpB,aAAa,CAAC,GAAG,IAAI,IAAI,IAAI,KAAK,KAAK;AAAA;AAGzC,UAAM;AAAA;AAAA,MAGG,UAAmB;AAC5B,WAAO,KAAK,OAAO;AAAA;AAAA,QAGR,YAAY,OAAkB,eAA6D;AACtG,UAAM,mBAAmB,MAAM,KAAK,OAAO,OAAO;AAClD,WAAO,iBAAiB,IAAI,CAAC,QAAQ,IAAI,cAAc,IAAI,OAAO,IAAI,aAAa,CAAE,OAAO,IAAI,YAAY,QAAQ,IAAI;AAAA;AAAA,EAGhH,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8F;AACjI,WAAO,MAAM,2BAA2B;AAAA;AAAA;;;ACvBrC,IAAM,OAAO;AAAA,EAClB,gBAAgB,IAAI;AAAA,EACpB,kBAAkB,IAAI;AAAA,EACtB,YAAY,IAAI;AAAA,EAChB,mBAAmB,IAAI;AAAA,EACvB,uBAAuB,IAAI;AAAA,EAC3B,oBAAoB,IAAI;AAAA,EACxB,mBAAmB,IAAI;AAAA,EACvB,cAAc,IAAI;AAAA;AAUb,IAAM,iBAAiB,CAAC,OAAkB,YAA6D,KAAK,eAAe,YAAY,OAAO;AAS9I,IAAM,mBAAmB,CAAC,OAAkB,YAA+D,KAAK,iBAAiB,YAAY,OAAO;AASpJ,IAAM,aAAa,CAAC,OAAkB,YAA0D,KAAK,WAAW,YAAY,OAAO;AASnI,IAAM,sBAAsB,CAAC,UAAmE,KAAK,kBAAkB,gBAAgB;AAWvI,IAAM,0BAA0B,CAAC,UAAmE,KAAK,sBAAsB,gBAAgB;AAY/I,IAAM,wBAAwB,CAAC,UAA6D,KAAK,mBAAmB,sBAAsB;AAS1I,IAAM,2BAA2B,CAAC,UAAmE,KAAK,kBAAkB,mBAAmB;AAS/I,IAAM,sBAAsB,CAAC,UAAiF,KAAK,aAAa,oBAAoB;AAEpJ,IAAM,0BAA0B,CAAC,QAAgB,KAAK,eAAe,KAAK;AAC1E,IAAM,4BAA4B,CAAC,QAAgB,KAAK,iBAAiB,KAAK;AAC9E,IAAM,sBAAsB,CAAC,QAAgB,KAAK,WAAW,KAAK;AAClE,IAAM,wBAAwB,CAAC,QAAgB,KAAK,kBAAkB,KAAK;AAC3E,IAAM,4BAA4B,CAAC,QAAgB,KAAK,sBAAsB,KAAK;AACnF,IAAM,2BAA2B,CAAC,QAAgB,KAAK,mBAAmB,KAAK;AAC/E,IAAM,0BAA0B,CAAC,QAAgB,KAAK,kBAAkB,KAAK;AAC7E,IAAM,qBAAqB,CAAC,QAAgB,KAAK,aAAa,KAAK;AAGnE,IAAM,yBAAyB;AAC/B,IAAM,cAAc;AACpB,IAAM,kBAAkB;;;ACtGxB,mDAAqE,eAAwB;AAAA,EAClG,YAEY,YAEA,OAEA,gBACV;AACA;AANU;AAEA;AAEA;AAAA;AAAA;AAMP,kDAAmF,+BAA0E;AAAA,QACrJ,MAA+C;AAC1D,UAAM,gBAAgB,MAAM,KAAK;AAEjC,UAAM,wBAAwB,MAAM,iCAClC,eACA,KAAK,OACL,OAAO,UAAU,QAAQ,IACvB,MAAM,IAAI,CAAC,SAAS,KAAK,kBAAkB,mBAAmB,SAEhE,KAAK;AAGP,WAAO,cAAc,IACnB,CAAC,cAAc,MAAM,0BAAmC,cAAc,sBAAsB;AAAA;AAAA,EAIhG,mBAAmB;AACjB,WAAO,IAAI,2BAA2B,MAAM,KAAK;AAAA;AAAA;AAI9C,qDAAsF,+BAA8F;AAAA,QAC5K,MAAyD;AACpE,UAAM,eAAe,MAAM,KAAK;AAChC,QAAI,CAAC,cAAc;AACjB,aAAO;AAAA;AAGT,UAAM,kBAAkB,MAAM,kCAC5B,cACA,KAAK,OACL,CAAC,SAAS,KAAK,kBAAkB,mBAAmB,OACpD,KAAK;AAGP,WAAO,0BAA0B,cAAc;AAAA;AAAA,EAGjD,mBAAmB;AACjB,WAAO,IAAI,8BAA8B,MAAM,KAAK;AAAA;AAAA;AAIjD,mEAAuH,8BAAuC;AAAA,EACnK,mBAAmB;AACjB,WAAO,IAAI,4CAA4C,MAAM,KAAK;AAAA;AAAA,EAGpE,sBAAsB;AACpB,WAAO,IAAI,8BAA8B,MAAM,KAAK;AAAA;AAAA;AAIjD,sEAA0H,iCAA0C;AAAA,EACzK,mBAAmB;AACjB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA,EAGvE,qBAAqB;AACnB,WAAO,IAAI,gCAAgC,MAAM,KAAK;AAAA;AAAA;;;ACzEnD,gDAAkE,eAAwB;AAAA,EAC/F,YAEY,YAEA,OAEA,gBACV;AACA;AANU;AAEA;AAEA;AAAA;AAAA;AAMP,+CAAgF,4BAAuE;AAAA,QAC/I,MAA+C;AAC1D,UAAM,gBAAgB,MAAM,KAAK;AACjC,UAAM,qBAAqB,MAAM,iCAC/B,eACA,KAAK,OACL,OAAO,UAAU,QAAQ,IAAI,MAAM,IAAI,CAAC,SAAS,KAAK,aAAa,oBAAoB,SACvF,KAAK;AAEP,WAAO,cAAc,IAAI,CAAC,cAAc,MAAM;AAC5C,YAAM,CAAE,KAAK,QAAQ,qBAAsB,mBAAmB;AAC9D,aAAO,cAAc,iBAAiB,cAAc,QAAQ,oBAAoB;AAAA;AAAA;AAAA,EAIpF,sBAAsB;AACpB,WAAO,IAAI,8BAA8B,MAAM,KAAK;AAAA;AAAA;AAIjD,kDAAmF,4BAA2F;AAAA,QACtK,MAAyD;AACpE,UAAM,eAAe,MAAM,KAAK;AAChC,QAAI,CAAC;AAAc,aAAO;AAC1B,UAAM,CAAE,KAAK,QAAQ,qBAAsB,MAAM,kCAC/C,cACA,KAAK,OACL,CAAC,SAAS,KAAK,aAAa,oBAAoB,OAChD,KAAK;AAEP,WAAO,cAAc,iBAAiB,cAAc,QAAQ,oBAAoB;AAAA;AAAA,EAGlF,sBAAsB;AACpB,WAAO,IAAI,iCAAiC,MAAM,KAAK;AAAA;AAAA;AAIpD,gEAAoH,2BAAoC;AAAA,EAC7J,sBAAsB;AACpB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA,EAGvE,sBAAsB;AACpB,WAAO,IAAI,8BAA8B,MAAM,KAAK;AAAA;AAAA;AAIjD,mEAAuH,8BAAuC;AAAA,EACnK,sBAAsB;AACpB,WAAO,IAAI,kDAAkD,MAAM,KAAK;AAAA;AAAA,EAG1E,qBAAqB;AACnB,WAAO,IAAI,gCAAgC,MAAM,KAAK;AAAA;AAAA;;;ACvEnD,mDAAqE,eAAwB;AAAA,EAClG,YAEY,YAEA,OACV;AACA;AAJU;AAEA;AAAA;AAAA;AAMP,kDAAsG,+BAAyE;AAAA,QACvK,MAA8C;AACzD,UAAM,gBAAgB,MAAM,KAAK;AACjC,UAAM,cAAc,MAAM,iCACxB,eACA,KAAK,OACL,CAAC,UAAU,QAAQ,IAAI,MAAM,IAAI,CAAC,SAAS,KAAK,mBAAmB,sBAAsB,SACzF,MACA,CAAC,iBAAiB,aAAa,UAAU,MAAM,MAAM,CAAE,kBAAkB;AAE3E,WAAO,YAAY,IAAI,CAAC,YAAY,MAAM,yBAAkC,cAAc,IAAI;AAAA;AAAA,EAGhG,sBAAsB;AACpB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA,EAGvE,mBAAmB;AACjB,WAAO,IAAI,4CAA4C,MAAM,KAAK;AAAA;AAAA;AAI/D,oDAAwG,+BAA6F;AAAA,QAC7L,MAAwD;AACnE,UAAM,eAAe,MAAM,KAAK;AAChC,QAAI,CAAC,cAAc;AACjB,aAAO;AAAA;AAET,UAAM,aAAa,MAAM,kCACvB,cACA,KAAK,OACL,CAAC,SAAS,KAAK,mBAAmB,sBAAsB,OACxD,MAEA,CAAC,kBAAiB,cAAa,UAAU,MAAM,MAAM,CAAE,kBAAkB;AAG3E,WAAO,yBAAyB,cAAc;AAAA;AAAA,EAGhD,sBAAsB;AACpB,WAAO,IAAI,kDAAkD,MAAM,KAAK;AAAA;AAAA,EAG1E,mBAAmB;AACjB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA;;;ACpDlE,gDAAkE,eAAwB;AAAA,EAC/F,YAEY,YAEA,OAEA,oBACV;AACA;AANU;AAEA;AAEA;AAAA;AAAA,MAKE,cAAyD;AACrE,WAAO,KAAK,qBACR,KAAK,wBACL,KAAK;AAAA;AAAA;AAIN,+CAAgF,4BAAqE;AAAA,QAC7I,MAA6C;AACxD,UAAM,gBAAgB,MAAM,KAAK;AACjC,UAAM,aAAa,cAAc,IAAI,CAAC,QAAQ,IAAI;AAClD,UAAM,QAAgD,KAAK,iBAAoB,0BAC3E,MAAM,mBAAmB,KAAK,OAAO,cACrC,MAAM,aAAa,KAAK,OAAO;AACnC,UAAM,sBAAsB,MAAM,QAAQ,IACxC,MAAM,IAAI,CAAC,SAAS,KAAK,YAAY,gBAAgB;AAEvD,UAAM,QAAQ,CAAC,MAAM,aAAgB,2BAAU,EAAE;AACjD,WAAO,cAAc,IAAI,CAAC,cAAc,MAAM,wBAAiC,cAAc,oBAAoB;AAAA;AAAA,EAGnH,sBAAsB;AACpB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA,EAGvE,mBAAmB;AACjB,WAAO,IAAI,4CAA4C,MAAM,KAAK;AAAA;AAAA,EAGpE,sBAAsB;AACpB,WAAO,IAAI,8BAA8B,MAAM,KAAK;AAAA;AAAA;AAIjD,kDAAmF,4BAAyF;AAAA,QACpK,MAAuD;AAClE,UAAM,eAAe,MAAM,KAAK;AAChC,QAAI,CAAC,cAAc;AACjB,aAAO;AAAA;AAET,UAAM,CAAE,aAAc;AACtB,UAAM,QAAgD,KAAK,iBAAoB,0BAC3E,MAAM,mBAAmB,KAAK,OAAO,CAAC,cACtC,MAAM,aAAa,KAAK,OAAO,CAAC;AACpC,UAAM,YAAY,MAAM,KAAK,YAAY,gBAAgB,MAAM;AAC/D,UAAM,QAAQ,CAAC,MAAM,aAAgB,2BAAU,EAAE;AACjD,WAAO,wBAAiC,cAAc;AAAA;AAAA,EAGxD,sBAAsB;AACpB,WAAO,IAAI,kDAAkD,MAAM,KAAK;AAAA;AAAA,EAG1E,mBAAmB;AACjB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA,EAGvE,qBAAqB;AACnB,WAAO,IAAI,gCAAgC,MAAM,KAAK;AAAA;AAAA;;;ACvEnD,wCAA2C,eAAwB;AAAA,EAExE,YAAsB,OAA4B,UAAgC,IAAI,yBAAyB;AAC7G;AADoB;AAA4B;AAAA;AAAA;AAK7C,uCAAiC,oBAAqC;AAAA,QAC9D,MAAgC;AAC3C,UAAM,CAAE,OAAO,WAAY;AAC3B,QAAI;AACJ,QAAI,mBAAmB;AAAyB,eAAS,KAAK,iBAAiB,YAAY,OAAO;AAAA,aACzF,mBAAmB;AAAuB,eAAS,KAAK,eAAe,YAAY,OAAO;AAAA,aAC1F,mBAAmB;AAAmB,eAAS,KAAK,WAAW,YAAY,OAAO;AAAA;AACtF,YAAM,IAAI,MAAM;AACrB,WAAO;AAAA;AAAA,EAGD,iCAAmE;AACzE,WAAO,IAAI,QAAiC,CAAC,SAAS,WAAW;AAC/D,WAAK,MACF,KAAK,CAAC,eAAe,QAAQ,WAAW,IAAI,CAAC,cAAc,wBAAwB,IAAI,cACvF,MAAM,CAAC,QAAQ,OAAO;AAAA;AAAA;AAAA,EAI7B,kBAAkB,qBAA8B,OAAO;AACrD,WAAO,IAAI,2BACT,KAAK,kCACL,KAAK,OACL;AAAA;AAAA,EAIJ,sBAAsB;AACpB,WAAO,IAAI,8BACT,KAAK,kCACL,KAAK;AAAA;AAAA,EAIT,mBAAmB;AACjB,WAAO,IAAI,2BACT,KAAK,kCACL,KAAK;AAAA;AAAA;AAKJ,yCAAmC,oBAA+C;AAAA,QAC1E,MAA0C;AACrD,UAAM,iBAAiB,MAAM,IAAI,mBAAmB,KAAK,OAAO,KAAK;AACrE,QAAI,gCAAgC,eAAe;AACnD,mBAAe,QAAQ,CAAC,kBAAkB;AACxC,UAAI,cAAc,QAAQ,8BAA8B;AAAO,wCAAgC;AAAA;AAEjG,WAAO;AAAA;AAAA,EAGD,gCAA4E;AAElF,WAAO,IAAI,QAA2C,OAAO,YAAY;AACvE,YAAM,YAAY,MAAM,KAAK;AAC7B,cAAQ,YAAY,wBAA4B,IAAI,aAAa;AAAA;AAAA;AAAA,EAIrE,kBAAkB,qBAA8B,OAAO;AACrD,WAAO,IAAI,8BACT,KAAK,iCACL,KAAK,OACL;AAAA;AAAA,EAIJ,sBAAsB;AACpB,WAAO,IAAI,iCACT,KAAK,iCACL,KAAK;AAAA;AAAA,EAIT,mBAAmB;AACjB,WAAO,IAAI,8BACT,KAAK,iCACL,KAAK;AAAA;AAAA;;;AC9FJ,0BAA0B,OAAkB,UAAgC,IAAI,yBAA+C;AACpI,SAAO,IAAI,qBAAqB,OAAO;AAAA;AAGlC,wBAAwB,OAAkB,UAAgC,IAAI,yBAA6C;AAChI,SAAO,IAAI,mBAAmB,OAAO;AAAA;;;ACJvC,sCAA6C,OAAkB,eAAiG;AAC9J,SAAO,eAAe,OAAO,IAAI,sBAAsB,gBAAgB,CAAE,iBAAkB,KACxF,oBACA;AAAA;AAGL,kCAAyC,OAAkB,gBAAoC,IAA6E;AAC1K,SAAO,eAAe,OAAO,IAAI,kBAAkB,gBAChD,oBACA;AAAA;AAGE,IAAM,WAAW;;;AClBjB,2BAA2B,MAA+B,MAA+B;AAC9F,MAAI,KAAK,WAAW,KAAK;AAAQ,UAAM,IAAI,MAAM;AAEjD,QAAM,QAAQ,MAAM,KAAK;AACzB,QAAM,QAAQ,MAAM,KAAK;AAEzB,SAAO,KAAK,KACV,MACG,IAAI,CAAC,KAAK,MAAM,MAAM,MAAM,IAC5B,OAAO,CAAC,KAAK,SAAS,MAAO,QAAQ,GAAI;AAAA;;;ACJzC,wBAAkB;AAAA,EAKvB,YACE,QACA,oBAA4B,KAC5B;AACA,SAAK,qBAAqB;AAE1B,UAAM,aAAa,MAAM,QAAQ,UAAU,SAAS,CAAC;AAErD,QAAI,CAAC,WAAW,QAAQ;AACtB,YAAM,IAAI,MAAM;AAAA;AAGlB,QAAI,QAAQ;AACZ,UAAM,oBAAoB,MAAM,UAAU;AAE1C,SAAK,sBAAsB,WAAW,IAAI,CAAC,SAAS;AAClD,UAAI,gBAAgB,wBAAwB;AAC1C,eAAO;AAAA;AAGT,UAAI,gBAAgB,cAAc;AAChC,eAAO,IAAI,uBAAuB,qBAAqB,CAAC;AAAA;AAG1D,UAAI,KAAK,cAAc,KAAK,sBAAsB,cAAc;AAC9D,eAAO,IAAI,uBAAuB,qBAAqB,CAAC,KAAK;AAAA;AAG/D,YAAM,IAAI,MAAM;AAAA;AAAA;AAAA,MAIT,qBAA+C;AAAE,WAAO,KAAK;AAAA;AAAA,MAE7D,oBAA4B;AAAE,WAAO,KAAK;AAAA;AAAA,EAE9C,oBAAoB,iBAA+B,aAAqC;AAC7F,WAAO,YACJ,IAAI,CAAC,MAAM,kBAAkB,GAAG,kBAChC,OAAO,CAAC,IAAI,OAAO,KAAK,IAAI,KACxB,aAAY,UAAU;AAAA;AAAA,EAGxB,gBAAgB,iBAA0C;AAC/D,WAAO,KAAK,mBACT,IAAI,CAAC,CAAE,aAAa,WAAY,IAAI,UACnC,OACA,KAAK,oBAAoB,iBAAiB,eAE3C,OAAO,CAAC,MAAM,SAAU,KAAK,WAAW,KAAK,WAAW,OAAO;AAAA;AAAA,EAG7D,cAAc,iBAA0C;AAC7D,UAAM,YAAY,KAAK,gBAAgB;AACvC,WAAO,UAAU,WAAW,KAAK,oBAC7B,YACA,IAAI,UAAU,WAAW,UAAU;AAAA;AAAA,EAGlC,SAAc;AACnB,WAAO;AAAA,MACL,mBAAmB,KAAK;AAAA,MACxB,oBAAoB,KAAK,mBAAmB,IAAI,CAAC,OAAO,GAAG;AAAA;AAAA;AAAA,SAIjD,SAAS,MAAwB;AAC7C,UAAM,qBAAqB,KAAK,mBAC7B,IAAI,CAAC,OAAY,uBAAuB,SAAS;AACpD,WAAO,IAAI,YAAY,oBAAoB,KAAK;AAAA;AAAA;;;AC1E7C,gCAAgC,SAAuB;AAC5D,QAAM,MAAM,IAAI;AAChB,MAAI,eAAe;AACnB,SAAO;AAAA;;;ACFF,uBAA0B,SAAY,YAA4B;AACvE,QAAM,CAAE,OAAO,UAAW,IAAI,WAAW,WAAW,OAAO,WAAW;AAEtE,MAAI,SAAS,KAAK,UAAU,GAAG;AAC7B,UAAM,IAAI,MAAM,uCAAuC,KAAK,UAAU,CAAE,OAAO;AAAA;AAGjF,MAAI,MAAM,QAAQ,UAAU;AAE1B,WAAQ,QAAuB,IAAI,CAAC,QAAQ,cAAc,KAAK,CAAE,OAAO;AAAA;AAG1E,MAAI,oBAAoB,UAAU;AAChC,UAAM,mBAAmB,QAAQ,UAAU,QAAQ,OAAO;AAC1D,UAAM,mBAAmB,QAAQ,mBAAmB,QAAQ,iBAAiB,IAAI,OAAO,iBAAiB,IAAI;AAC7G,WAAO,wBAAwB,wBAAwB,SAAS,mBAAmB;AAAA;AAGrF,MAAI,oBAAoB,UAAU;AAChC,WAAO,wBAAwB,SAAS,QAAQ,UAAU,QAAQ,OAAO;AAAA;AAG3E,MAAI,mBAAmB,iBAAiB,mBAAmB,eAAe;AACxE,WAAQ,QAAgB,QAAQ,OAAO;AAAA;AAGzC,SAAO;AAAA;;;ACRT,IAAM,OAAQ,OAAO,YAAY;AACjC,IAAM,WAAW,OAAO,cAAc,eAAiB,OAAO,UAAU,cAAc;AAC/E,IAAM,WAAU,CAAE,SAAa,SAAmB,MAAM;", + "sources": ["../src/tfjs/tf-browser.ts", "../src/draw/index.ts", "../src/draw/drawContour.ts", "../src/utils/index.ts", "../src/classes/Dimensions.ts", "../src/classes/Point.ts", "../src/classes/Box.ts", "../src/classes/BoundingBox.ts", "../src/classes/ObjectDetection.ts", "../src/classes/FaceDetection.ts", "../src/ops/iou.ts", "../src/ops/minBbox.ts", "../src/ops/nonMaxSuppression.ts", "../src/ops/normalize.ts", "../src/ops/padToSquare.ts", "../src/ops/shuffleArray.ts", "../src/ops/index.ts", "../src/classes/Rect.ts", "../src/classes/FaceLandmarks.ts", "../src/classes/FaceLandmarks5.ts", "../src/classes/FaceLandmarks68.ts", "../src/classes/FaceMatch.ts", "../src/classes/LabeledBox.ts", "../src/classes/LabeledFaceDescriptors.ts", "../src/classes/PredictedBox.ts", "../src/factories/WithFaceDetection.ts", "../src/env/createBrowserEnv.ts", "../src/env/createFileSystem.ts", "../src/env/createNodejsEnv.ts", "../src/env/isBrowser.ts", "../src/env/isNodejs.ts", "../src/env/index.ts", "../src/dom/resolveInput.ts", "../src/dom/getContext2dOrThrow.ts", "../src/draw/DrawTextField.ts", "../src/draw/DrawBox.ts", "../src/draw/drawDetections.ts", "../src/dom/isMediaLoaded.ts", "../src/dom/awaitMediaLoaded.ts", "../src/dom/bufferToImage.ts", "../src/dom/getMediaDimensions.ts", "../src/dom/createCanvas.ts", "../src/dom/imageTensorToCanvas.ts", "../src/dom/isMediaElement.ts", "../src/dom/imageToSquare.ts", "../src/dom/NetInput.ts", "../src/dom/toNetInput.ts", "../src/dom/extractFaces.ts", "../src/dom/extractFaceTensors.ts", "../src/dom/fetchOrThrow.ts", "../src/dom/fetchImage.ts", "../src/dom/fetchJson.ts", "../src/dom/fetchNetWeights.ts", "../src/dom/bufferToVideo.ts", "../src/dom/fetchVideo.ts", "../src/common/getModelUris.ts", "../src/dom/loadWeightMap.ts", "../src/dom/matchDimensions.ts", "../src/NeuralNetwork.ts", "../src/common/depthwiseSeparableConv.ts", "../src/faceFeatureExtractor/denseBlock.ts", "../src/common/convLayer.ts", "../src/common/disposeUnusedWeightTensors.ts", "../src/common/extractConvParamsFactory.ts", "../src/common/extractFCParamsFactory.ts", "../src/common/types.ts", "../src/common/extractSeparableConvParamsFactory.ts", "../src/common/extractWeightEntryFactory.ts", "../src/common/extractWeightsFactory.ts", "../src/faceFeatureExtractor/extractorsFactory.ts", "../src/faceFeatureExtractor/extractParams.ts", "../src/common/loadConvParamsFactory.ts", "../src/faceFeatureExtractor/loadParamsFactory.ts", "../src/faceFeatureExtractor/extractParamsFromWeightMap.ts", "../src/faceFeatureExtractor/FaceFeatureExtractor.ts", "../src/common/fullyConnectedLayer.ts", "../src/faceProcessor/extractParams.ts", "../src/faceProcessor/extractParamsFromWeightMap.ts", "../src/faceProcessor/util.ts", "../src/faceProcessor/FaceProcessor.ts", "../src/faceExpressionNet/FaceExpressions.ts", "../src/faceExpressionNet/FaceExpressionNet.ts", "../src/factories/WithFaceExpressions.ts", "../src/draw/drawFaceExpressions.ts", "../src/factories/WithFaceLandmarks.ts", "../src/draw/DrawFaceLandmarks.ts", "../src/xception/extractParams.ts", "../src/xception/extractParamsFromWeightMap.ts", "../src/xception/TinyXception.ts", "../src/ageGenderNet/extractParams.ts", "../src/ageGenderNet/extractParamsFromWeightMap.ts", "../src/ageGenderNet/types.ts", "../src/ageGenderNet/AgeGenderNet.ts", "../src/faceLandmarkNet/FaceLandmark68NetBase.ts", "../src/faceLandmarkNet/FaceLandmark68Net.ts", "../src/faceFeatureExtractor/extractParamsFromWeightMapTiny.ts", "../src/faceFeatureExtractor/extractParamsTiny.ts", "../src/faceFeatureExtractor/TinyFaceFeatureExtractor.ts", "../src/faceLandmarkNet/FaceLandmark68TinyNet.ts", "../src/faceLandmarkNet/index.ts", "../src/faceRecognitionNet/scaleLayer.ts", "../src/faceRecognitionNet/convLayer.ts", "../src/faceRecognitionNet/extractParams.ts", "../src/faceRecognitionNet/extractParamsFromWeightMap.ts", "../src/faceRecognitionNet/residualLayer.ts", "../src/faceRecognitionNet/FaceRecognitionNet.ts", "../src/faceRecognitionNet/index.ts", "../src/factories/WithFaceDescriptor.ts", "../src/factories/WithAge.ts", "../src/factories/WithGender.ts", "../src/ssdMobilenetv1/extractParams.ts", "../src/ssdMobilenetv1/extractParamsFromWeightMap.ts", "../src/ssdMobilenetv1/pointwiseConvLayer.ts", "../src/ssdMobilenetv1/mobileNetV1.ts", "../src/ssdMobilenetv1/nonMaxSuppression.ts", "../src/ssdMobilenetv1/outputLayer.ts", "../src/ssdMobilenetv1/boxPredictionLayer.ts", "../src/ssdMobilenetv1/predictionLayer.ts", "../src/ssdMobilenetv1/SsdMobilenetv1Options.ts", "../src/ssdMobilenetv1/SsdMobilenetv1.ts", "../src/ssdMobilenetv1/index.ts", "../src/tinyYolov2/const.ts", "../src/tinyYolov2/config.ts", "../src/tinyYolov2/leaky.ts", "../src/tinyYolov2/convWithBatchNorm.ts", "../src/tinyYolov2/depthwiseSeparableConv.ts", "../src/tinyYolov2/extractParams.ts", "../src/tinyYolov2/extractParamsFromWeightMap.ts", "../src/tinyYolov2/TinyYolov2Options.ts", "../src/tinyYolov2/TinyYolov2Base.ts", "../src/tinyYolov2/TinyYolov2.ts", "../src/tinyYolov2/index.ts", "../src/tinyFaceDetector/TinyFaceDetectorOptions.ts", "../src/globalApi/ComposableTask.ts", "../src/globalApi/extractFacesAndComputeResults.ts", "../src/tinyFaceDetector/const.ts", "../src/tinyFaceDetector/TinyFaceDetector.ts", "../src/globalApi/nets.ts", "../src/globalApi/PredictFaceExpressionsTask.ts", "../src/globalApi/PredictAgeAndGenderTask.ts", "../src/globalApi/ComputeFaceDescriptorsTasks.ts", "../src/globalApi/DetectFaceLandmarksTasks.ts", "../src/globalApi/DetectFacesTasks.ts", "../src/globalApi/detectFaces.ts", "../src/globalApi/allFaces.ts", "../src/euclideanDistance.ts", "../src/globalApi/FaceMatcher.ts", "../src/tinyFaceDetector/index.ts", "../src/resizeResults.ts", "../src/index.ts"], + "sourcesContent": ["/* eslint-disable import/no-extraneous-dependencies */\n/* eslint-disable node/no-unpublished-import */\n\n// wrapper to load tfjs in a single place so version can be changed quickly\n\nexport * from '@tensorflow/tfjs/dist/index.js';\nexport * from '@tensorflow/tfjs-backend-wasm';\n", "export * from './drawContour';\nexport * from './drawDetections';\nexport * from './drawFaceExpressions';\nexport * from './DrawBox';\nexport * from './DrawFaceLandmarks';\nexport * from './DrawTextField';\n", "import { Point } from '../classes/index';\n\nexport function drawContour(\n ctx: CanvasRenderingContext2D,\n points: Point[],\n isClosed: boolean = false,\n) {\n ctx.beginPath();\n\n points.slice(1).forEach(({ x, y }, prevIdx) => {\n const from = points[prevIdx];\n ctx.moveTo(from.x, from.y);\n ctx.lineTo(x, y);\n });\n\n if (isClosed) {\n const from = points[points.length - 1];\n const to = points[0];\n if (!from || !to) {\n return;\n }\n\n ctx.moveTo(from.x, from.y);\n ctx.lineTo(to.x, to.y);\n }\n\n ctx.stroke();\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { Point } from '../classes/index';\nimport { Dimensions, IDimensions } from '../classes/Dimensions';\n\nexport function isTensor(tensor: any, dim: number) {\n return tensor instanceof tf.Tensor && tensor.shape.length === dim;\n}\n\nexport function isTensor1D(tensor: any): tensor is tf.Tensor1D {\n return isTensor(tensor, 1);\n}\n\nexport function isTensor2D(tensor: any): tensor is tf.Tensor2D {\n return isTensor(tensor, 2);\n}\n\nexport function isTensor3D(tensor: any): tensor is tf.Tensor3D {\n return isTensor(tensor, 3);\n}\n\nexport function isTensor4D(tensor: any): tensor is tf.Tensor4D {\n return isTensor(tensor, 4);\n}\n\nexport function isFloat(num: number) {\n return num % 1 !== 0;\n}\n\nexport function isEven(num: number) {\n return num % 2 === 0;\n}\n\nexport function round(num: number, prec: number = 2) {\n const f = 10 ** prec;\n return Math.floor(num * f) / f;\n}\n\nexport function isDimensions(obj: any): boolean {\n return obj && obj.width && obj.height;\n}\n\nexport function computeReshapedDimensions({ width, height }: IDimensions, inputSize: number) {\n const scale = inputSize / Math.max(height, width);\n return new Dimensions(Math.round(width * scale), Math.round(height * scale));\n}\n\nexport function getCenterPoint(pts: Point[]): Point {\n return pts.reduce((sum, pt) => sum.add(pt), new Point(0, 0))\n .div(new Point(pts.length, pts.length));\n}\n\nexport function range(num: number, start: number, step: number): number[] {\n return Array(num).fill(0).map((_, i) => start + (i * step));\n}\n\nexport function isValidNumber(num: any) {\n return !!num && (num !== Infinity) && (num !== -Infinity) && !Number.isNaN(num) || num === 0;\n}\n\nexport function isValidProbablitiy(num: any) {\n return isValidNumber(num) && num >= 0 && num <= 1.0;\n}\n", "import { isValidNumber } from '../utils/index';\n\nexport interface IDimensions {\n width: number\n height: number\n}\n\nexport class Dimensions implements IDimensions {\n private _width: number\n\n private _height: number\n\n constructor(width: number, height: number) {\n if (!isValidNumber(width) || !isValidNumber(height)) {\n throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({ width, height })}`);\n }\n\n this._width = width;\n this._height = height;\n }\n\n public get width(): number { return this._width; }\n\n public get height(): number { return this._height; }\n\n public reverse(): Dimensions {\n return new Dimensions(1 / this.width, 1 / this.height);\n }\n}\n", "export interface IPoint {\n x: number\n y: number\n}\n\nexport class Point implements IPoint {\n private _x: number\n\n private _y: number\n\n constructor(x: number, y: number) {\n this._x = x;\n this._y = y;\n }\n\n get x(): number { return this._x; }\n\n get y(): number { return this._y; }\n\n public add(pt: IPoint): Point {\n return new Point(this.x + pt.x, this.y + pt.y);\n }\n\n public sub(pt: IPoint): Point {\n return new Point(this.x - pt.x, this.y - pt.y);\n }\n\n public mul(pt: IPoint): Point {\n return new Point(this.x * pt.x, this.y * pt.y);\n }\n\n public div(pt: IPoint): Point {\n return new Point(this.x / pt.x, this.y / pt.y);\n }\n\n public abs(): Point {\n return new Point(Math.abs(this.x), Math.abs(this.y));\n }\n\n public magnitude(): number {\n return Math.sqrt((this.x ** 2) + (this.y ** 2));\n }\n\n public floor(): Point {\n return new Point(Math.floor(this.x), Math.floor(this.y));\n }\n}\n", "import { isDimensions, isValidNumber } from '../utils/index';\nimport { IBoundingBox } from './BoundingBox';\nimport { IDimensions } from './Dimensions';\nimport { Point } from './Point';\nimport { IRect } from './Rect';\n\nexport class Box implements IBoundingBox, IRect {\n public static isRect(rect: any): boolean {\n return !!rect && [rect.x, rect.y, rect.width, rect.height].every(isValidNumber);\n }\n\n public static assertIsValidBox(box: any, callee: string, allowNegativeDimensions: boolean = false) {\n if (!Box.isRect(box)) {\n throw new Error(`${callee} - invalid box: ${JSON.stringify(box)}, expected object with properties x, y, width, height`);\n }\n\n if (!allowNegativeDimensions && (box.width < 0 || box.height < 0)) {\n throw new Error(`${callee} - width (${box.width}) and height (${box.height}) must be positive numbers`);\n }\n }\n\n private _x: number\n\n private _y: number\n\n private _width: number\n\n private _height: number\n\n constructor(_box: IBoundingBox | IRect, allowNegativeDimensions: boolean = true) {\n const box = (_box || {}) as any;\n\n const isBbox = [box.left, box.top, box.right, box.bottom].every(isValidNumber);\n const isRect = [box.x, box.y, box.width, box.height].every(isValidNumber);\n\n if (!isRect && !isBbox) {\n throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(box)}`);\n }\n\n const [x, y, width, height] = isRect\n ? [box.x, box.y, box.width, box.height]\n : [box.left, box.top, box.right - box.left, box.bottom - box.top];\n\n Box.assertIsValidBox({\n x, y, width, height,\n }, 'Box.constructor', allowNegativeDimensions);\n\n this._x = x;\n this._y = y;\n this._width = width;\n this._height = height;\n }\n\n public get x(): number { return this._x; }\n\n public get y(): number { return this._y; }\n\n public get width(): number { return this._width; }\n\n public get height(): number { return this._height; }\n\n public get left(): number { return this.x; }\n\n public get top(): number { return this.y; }\n\n public get right(): number { return this.x + this.width; }\n\n public get bottom(): number { return this.y + this.height; }\n\n public get area(): number { return this.width * this.height; }\n\n public get topLeft(): Point { return new Point(this.left, this.top); }\n\n public get topRight(): Point { return new Point(this.right, this.top); }\n\n public get bottomLeft(): Point { return new Point(this.left, this.bottom); }\n\n public get bottomRight(): Point { return new Point(this.right, this.bottom); }\n\n public round(): Box {\n const [x, y, width, height] = [this.x, this.y, this.width, this.height]\n .map((val) => Math.round(val));\n return new Box({\n x, y, width, height,\n });\n }\n\n public floor(): Box {\n const [x, y, width, height] = [this.x, this.y, this.width, this.height]\n .map((val) => Math.floor(val));\n return new Box({\n x, y, width, height,\n });\n }\n\n public toSquare(): Box {\n let {\n x, y, width, height,\n } = this;\n const diff = Math.abs(width - height);\n if (width < height) {\n x -= (diff / 2);\n width += diff;\n }\n if (height < width) {\n y -= (diff / 2);\n height += diff;\n }\n\n return new Box({ x, y, width, height });\n }\n\n public rescale(s: IDimensions | number): Box {\n const scaleX = isDimensions(s) ? (s as IDimensions).width : s as number;\n const scaleY = isDimensions(s) ? (s as IDimensions).height : s as number;\n return new Box({\n x: this.x * scaleX,\n y: this.y * scaleY,\n width: this.width * scaleX,\n height: this.height * scaleY,\n });\n }\n\n public pad(padX: number, padY: number): Box {\n const [x, y, width, height] = [\n this.x - (padX / 2),\n this.y - (padY / 2),\n this.width + padX,\n this.height + padY,\n ];\n return new Box({\n x, y, width, height,\n });\n }\n\n public clipAtImageBorders(imgWidth: number, imgHeight: number): Box {\n const { x, y, right, bottom } = this;\n const clippedX = Math.max(x, 0);\n const clippedY = Math.max(y, 0);\n\n const newWidth = right - clippedX;\n const newHeight = bottom - clippedY;\n const clippedWidth = Math.min(newWidth, imgWidth - clippedX);\n const clippedHeight = Math.min(newHeight, imgHeight - clippedY);\n\n return (new Box({\n x: clippedX, y: clippedY, width: clippedWidth, height: clippedHeight,\n })).floor();\n }\n\n public shift(sx: number, sy: number): Box {\n const { width, height } = this;\n const x = this.x + sx;\n const y = this.y + sy;\n\n return new Box({\n x, y, width, height,\n });\n }\n\n public padAtBorders(imageHeight: number, imageWidth: number) {\n const w = this.width + 1;\n const h = this.height + 1;\n\n const dx = 1;\n const dy = 1;\n let edx = w;\n let edy = h;\n\n let x = this.left;\n let y = this.top;\n let ex = this.right;\n let ey = this.bottom;\n\n if (ex > imageWidth) {\n edx = -ex + imageWidth + w;\n ex = imageWidth;\n }\n if (ey > imageHeight) {\n edy = -ey + imageHeight + h;\n ey = imageHeight;\n }\n if (x < 1) {\n edy = 2 - x;\n x = 1;\n }\n if (y < 1) {\n edy = 2 - y;\n y = 1;\n }\n\n return {\n dy, edy, dx, edx, y, ey, x, ex, w, h,\n };\n }\n\n public calibrate(region: Box) {\n return new Box({\n left: this.left + (region.left * this.width),\n top: this.top + (region.top * this.height),\n right: this.right + (region.right * this.width),\n bottom: this.bottom + (region.bottom * this.height),\n }).toSquare().round();\n }\n}\n", "import { Box } from './Box';\n\nexport interface IBoundingBox {\n left: number\n top: number\n right: number\n bottom: number\n}\n\nexport class BoundingBox extends Box implements IBoundingBox {\n constructor(left: number, top: number, right: number, bottom: number, allowNegativeDimensions: boolean = false) {\n super({\n left, top, right, bottom,\n }, allowNegativeDimensions);\n }\n}\n", "import { Box } from './Box';\nimport { Dimensions, IDimensions } from './Dimensions';\nimport { IRect, Rect } from './Rect';\n\nexport class ObjectDetection {\n private _score: number\n\n private _classScore: number\n\n private _className: string\n\n private _box: Rect\n\n private _imageDims: Dimensions\n\n constructor(\n score: number,\n classScore: number,\n className: string,\n relativeBox: IRect,\n imageDims: IDimensions,\n ) {\n this._imageDims = new Dimensions(imageDims.width, imageDims.height);\n this._score = score;\n this._classScore = classScore;\n this._className = className;\n this._box = new Box(relativeBox).rescale(this._imageDims);\n }\n\n public get score(): number { return this._score; }\n\n public get classScore(): number { return this._classScore; }\n\n public get className(): string { return this._className; }\n\n public get box(): Box { return this._box; }\n\n public get imageDims(): Dimensions { return this._imageDims; }\n\n public get imageWidth(): number { return this.imageDims.width; }\n\n public get imageHeight(): number { return this.imageDims.height; }\n\n public get relativeBox(): Box { return new Box(this._box).rescale(this.imageDims.reverse()); }\n\n public forSize(width: number, height: number): ObjectDetection {\n return new ObjectDetection(\n this.score,\n this.classScore,\n this.className,\n this.relativeBox,\n { width, height },\n );\n }\n}\n", "import { Box } from './Box';\nimport { IDimensions } from './Dimensions';\nimport { ObjectDetection } from './ObjectDetection';\nimport { Rect } from './Rect';\n\nexport interface IFaceDetecion {\n score: number\n box: Box\n}\n\nexport class FaceDetection extends ObjectDetection implements IFaceDetecion {\n constructor(\n score: number,\n relativeBox: Rect,\n imageDims: IDimensions,\n ) {\n super(score, score, '', relativeBox, imageDims);\n }\n\n public forSize(width: number, height: number): FaceDetection {\n const { score, relativeBox, imageDims } = super.forSize(width, height);\n return new FaceDetection(score, relativeBox, imageDims);\n }\n}\n", "import { Box } from '../classes/Box';\n\nexport function iou(box1: Box, box2: Box, isIOU: boolean = true) {\n const width = Math.max(0.0, Math.min(box1.right, box2.right) - Math.max(box1.left, box2.left));\n const height = Math.max(0.0, Math.min(box1.bottom, box2.bottom) - Math.max(box1.top, box2.top));\n const interSection = width * height;\n\n return isIOU\n ? interSection / (box1.area + box2.area - interSection)\n : interSection / Math.min(box1.area, box2.area);\n}\n", "import { BoundingBox, IPoint } from '../classes/index';\n\nexport function minBbox(pts: IPoint[]): BoundingBox {\n const xs = pts.map((pt) => pt.x);\n const ys = pts.map((pt) => pt.y);\n const minX = xs.reduce((min, x) => (x < min ? x : min), Infinity);\n const minY = ys.reduce((min, y) => (y < min ? y : min), Infinity);\n const maxX = xs.reduce((max, x) => (max < x ? x : max), 0);\n const maxY = ys.reduce((max, y) => (max < y ? y : max), 0);\n\n return new BoundingBox(minX, minY, maxX, maxY);\n}\n", "import { Box } from '../classes/Box';\nimport { iou } from './iou';\n\nexport function nonMaxSuppression(\n boxes: Box[],\n scores: number[],\n iouThreshold: number,\n isIOU: boolean = true,\n): number[] {\n let indicesSortedByScore = scores\n .map((score, boxIndex) => ({ score, boxIndex }))\n .sort((c1, c2) => c1.score - c2.score)\n .map((c) => c.boxIndex);\n\n const pick: number[] = [];\n\n while (indicesSortedByScore.length > 0) {\n const curr = indicesSortedByScore.pop() as number;\n pick.push(curr);\n\n const indices = indicesSortedByScore;\n\n const outputs: number[] = [];\n for (let i = 0; i < indices.length; i++) {\n const idx = indices[i];\n\n const currBox = boxes[curr];\n const idxBox = boxes[idx];\n\n outputs.push(iou(currBox, idxBox, isIOU));\n }\n\n indicesSortedByScore = indicesSortedByScore.filter(\n (_, j) => outputs[j] <= iouThreshold,\n );\n }\n\n return pick;\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nexport function normalize(x: tf.Tensor4D, meanRgb: number[]): tf.Tensor4D {\n return tf.tidy(() => {\n const [r, g, b] = meanRgb;\n const avg_r = tf.fill([...x.shape.slice(0, 3), 1], r, 'float32');\n const avg_g = tf.fill([...x.shape.slice(0, 3), 1], g, 'float32');\n const avg_b = tf.fill([...x.shape.slice(0, 3), 1], b, 'float32');\n const avg_rgb = tf.concat([avg_r, avg_g, avg_b], 3);\n\n return tf.sub(x, avg_rgb);\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\n/**\n * Pads the smaller dimension of an image tensor with zeros, such that width === height.\n *\n * @param imgTensor The image tensor.\n * @param isCenterImage (optional, default: false) If true, add an equal amount of padding on\n * both sides of the minor dimension oof the image.\n * @returns The padded tensor with width === height.\n */\nexport function padToSquare(\n imgTensor: tf.Tensor4D,\n isCenterImage: boolean = false,\n): tf.Tensor4D {\n return tf.tidy(() => {\n const [height, width] = imgTensor.shape.slice(1);\n if (height === width) {\n return imgTensor;\n }\n\n const dimDiff = Math.abs(height - width);\n const paddingAmount = Math.round(dimDiff * (isCenterImage ? 0.5 : 1));\n const paddingAxis = height > width ? 2 : 1;\n\n const createPaddingTensor = (paddingAmountLocal: number): tf.Tensor => {\n const paddingTensorShape = imgTensor.shape.slice();\n paddingTensorShape[paddingAxis] = paddingAmountLocal;\n return tf.fill(paddingTensorShape, 0, 'float32');\n };\n\n const paddingTensorAppend = createPaddingTensor(paddingAmount);\n const remainingPaddingAmount = dimDiff - (paddingTensorAppend.shape[paddingAxis] as number);\n\n const paddingTensorPrepend = isCenterImage && remainingPaddingAmount\n ? createPaddingTensor(remainingPaddingAmount)\n : null;\n\n const tensorsToStack = [\n paddingTensorPrepend,\n imgTensor,\n paddingTensorAppend,\n ]\n .filter((t) => !!t)\n .map((t: tf.Tensor) => tf.cast(t, 'float32')) as tf.Tensor4D[];\n return tf.concat(tensorsToStack, paddingAxis);\n });\n}\n", "export function shuffleArray(inputArray: any[]) {\n const array = inputArray.slice();\n for (let i = array.length - 1; i > 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n const x = array[i];\n array[i] = array[j];\n array[j] = x;\n }\n return array;\n}\n", "export * from './iou';\nexport * from './minBbox';\nexport * from './nonMaxSuppression';\nexport * from './normalize';\nexport * from './padToSquare';\nexport * from './shuffleArray';\n\nexport function sigmoid(x: number) {\n return 1 / (1 + Math.exp(-x));\n}\n\nexport function inverseSigmoid(x: number) {\n return Math.log(x / (1 - x));\n}\n", "import { Box } from './Box';\n\nexport interface IRect {\n x: number\n y: number\n width: number\n height: number\n}\n\nexport class Rect extends Box implements IRect {\n constructor(x: number, y: number, width: number, height: number, allowNegativeDimensions: boolean = false) {\n super({\n x, y, width, height,\n }, allowNegativeDimensions);\n }\n}\n", "import { minBbox } from '../ops/index';\nimport { getCenterPoint } from '../utils/index';\nimport { IBoundingBox } from './BoundingBox';\nimport { Box } from './Box';\nimport { Dimensions, IDimensions } from './Dimensions';\nimport { FaceDetection } from './FaceDetection';\nimport { Point } from './Point';\nimport { IRect, Rect } from './Rect';\n\n// face alignment constants\nconst relX = 0.5;\nconst relY = 0.43;\nconst relScale = 0.45;\n\nexport interface IFaceLandmarks {\n positions: Point[]\n shift: Point\n}\n\nexport class FaceLandmarks implements IFaceLandmarks {\n protected _shift: Point\n\n protected _positions: Point[]\n\n protected _imgDims: Dimensions\n\n constructor(\n relativeFaceLandmarkPositions: Point[],\n imgDims: IDimensions,\n shift: Point = new Point(0, 0),\n ) {\n const { width, height } = imgDims;\n this._imgDims = new Dimensions(width, height);\n this._shift = shift;\n this._positions = relativeFaceLandmarkPositions.map(\n (pt) => pt.mul(new Point(width, height)).add(shift),\n );\n }\n\n public get shift(): Point { return new Point(this._shift.x, this._shift.y); }\n\n public get imageWidth(): number { return this._imgDims.width; }\n\n public get imageHeight(): number { return this._imgDims.height; }\n\n public get positions(): Point[] { return this._positions; }\n\n public get relativePositions(): Point[] {\n return this._positions.map(\n (pt) => pt.sub(this._shift).div(new Point(this.imageWidth, this.imageHeight)),\n );\n }\n\n public forSize(width: number, height: number): T {\n return new (this.constructor as any)(\n this.relativePositions,\n { width, height },\n );\n }\n\n public shiftBy(x: number, y: number): T {\n return new (this.constructor as any)(\n this.relativePositions,\n this._imgDims,\n new Point(x, y),\n );\n }\n\n public shiftByPoint(pt: Point): T {\n return this.shiftBy(pt.x, pt.y);\n }\n\n /**\n * Aligns the face landmarks after face detection from the relative positions of the faces\n * bounding box, or it's current shift. This function should be used to align the face images\n * after face detection has been performed, before they are passed to the face recognition net.\n * This will make the computed face descriptor more accurate.\n *\n * @param detection (optional) The bounding box of the face or the face detection result. If\n * no argument was passed the position of the face landmarks are assumed to be relative to\n * it's current shift.\n * @returns The bounding box of the aligned face.\n */\n public align(\n detection?: FaceDetection | IRect | IBoundingBox | null,\n options: { useDlibAlignment?: boolean, minBoxPadding?: number } = { },\n ): Box {\n if (detection) {\n const box = detection instanceof FaceDetection\n ? detection.box.floor()\n : new Box(detection);\n\n return this.shiftBy(box.x, box.y).align(null, options);\n }\n\n const { useDlibAlignment, minBoxPadding } = { useDlibAlignment: false, minBoxPadding: 0.2, ...options };\n\n if (useDlibAlignment) {\n return this.alignDlib();\n }\n\n return this.alignMinBbox(minBoxPadding);\n }\n\n private alignDlib(): Box {\n const centers = this.getRefPointsForAlignment();\n\n const [leftEyeCenter, rightEyeCenter, mouthCenter] = centers;\n const distToMouth = (pt: Point) => mouthCenter.sub(pt).magnitude();\n const eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2;\n\n const size = Math.floor(eyeToMouthDist / relScale);\n\n const refPoint = getCenterPoint(centers);\n // TODO: pad in case rectangle is out of image bounds\n const x = Math.floor(Math.max(0, refPoint.x - (relX * size)));\n const y = Math.floor(Math.max(0, refPoint.y - (relY * size)));\n\n return new Rect(x, y, Math.min(size, this.imageWidth + x), Math.min(size, this.imageHeight + y));\n }\n\n private alignMinBbox(padding: number): Box {\n const box = minBbox(this.positions);\n return box.pad(box.width * padding, box.height * padding);\n }\n\n protected getRefPointsForAlignment(): Point[] {\n throw new Error('getRefPointsForAlignment not implemented by base class');\n }\n}\n", "import { getCenterPoint } from '../utils/index';\nimport { FaceLandmarks } from './FaceLandmarks';\nimport { Point } from './Point';\n\nexport class FaceLandmarks5 extends FaceLandmarks {\n protected getRefPointsForAlignment(): Point[] {\n const pts = this.positions;\n return [\n pts[0],\n pts[1],\n getCenterPoint([pts[3], pts[4]]),\n ];\n }\n}\n", "import { getCenterPoint } from '../utils/index';\nimport { FaceLandmarks } from './FaceLandmarks';\nimport { Point } from './Point';\n\nexport class FaceLandmarks68 extends FaceLandmarks {\n public getJawOutline(): Point[] {\n return this.positions.slice(0, 17);\n }\n\n public getLeftEyeBrow(): Point[] {\n return this.positions.slice(17, 22);\n }\n\n public getRightEyeBrow(): Point[] {\n return this.positions.slice(22, 27);\n }\n\n public getNose(): Point[] {\n return this.positions.slice(27, 36);\n }\n\n public getLeftEye(): Point[] {\n return this.positions.slice(36, 42);\n }\n\n public getRightEye(): Point[] {\n return this.positions.slice(42, 48);\n }\n\n public getMouth(): Point[] {\n return this.positions.slice(48, 68);\n }\n\n protected getRefPointsForAlignment(): Point[] {\n return [\n this.getLeftEye(),\n this.getRightEye(),\n this.getMouth(),\n ].map(getCenterPoint);\n }\n}\n", "import { round } from '../utils/index';\n\nexport interface IFaceMatch {\n label: string\n distance: number\n}\n\nexport class FaceMatch implements IFaceMatch {\n private _label: string\n\n private _distance: number\n\n constructor(label: string, distance: number) {\n this._label = label;\n this._distance = distance;\n }\n\n public get label(): string { return this._label; }\n\n public get distance(): number { return this._distance; }\n\n public toString(withDistance: boolean = true): string {\n return `${this.label}${withDistance ? ` (${round(this.distance)})` : ''}`;\n }\n}\n", "import { isValidNumber } from '../utils/index';\nimport { IBoundingBox } from './BoundingBox';\nimport { Box } from './Box';\nimport { IRect } from './Rect';\n\nexport class LabeledBox extends Box {\n public static assertIsValidLabeledBox(box: any, callee: string) {\n Box.assertIsValidBox(box, callee);\n\n if (!isValidNumber(box.label)) {\n throw new Error(`${callee} - expected property label (${box.label}) to be a number`);\n }\n }\n\n private _label: number\n\n constructor(box: IBoundingBox | IRect | any, label: number) {\n super(box);\n this._label = label;\n }\n\n public get label(): number { return this._label; }\n}\n", "export class LabeledFaceDescriptors {\n private _label: string\n\n private _descriptors: Float32Array[]\n\n constructor(label: string, descriptors: Float32Array[]) {\n if (!(typeof label === 'string')) {\n throw new Error('LabeledFaceDescriptors - constructor expected label to be a string');\n }\n\n if (!Array.isArray(descriptors) || descriptors.some((desc) => !(desc instanceof Float32Array))) {\n throw new Error('LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array');\n }\n\n this._label = label;\n this._descriptors = descriptors;\n }\n\n public get label(): string { return this._label; }\n\n public get descriptors(): Float32Array[] { return this._descriptors; }\n\n public toJSON(): any {\n return {\n label: this.label,\n descriptors: this.descriptors.map((d) => Array.from(d)),\n };\n }\n\n public static fromJSON(json: any): LabeledFaceDescriptors {\n const descriptors = json.descriptors.map((d: any) => new Float32Array(d));\n return new LabeledFaceDescriptors(json.label, descriptors);\n }\n}\n", "import { isValidProbablitiy } from '../utils/index';\nimport { IBoundingBox } from './BoundingBox';\nimport { LabeledBox } from './LabeledBox';\nimport { IRect } from './Rect';\n\nexport class PredictedBox extends LabeledBox {\n public static assertIsValidPredictedBox(box: any, callee: string) {\n LabeledBox.assertIsValidLabeledBox(box, callee);\n\n if (\n !isValidProbablitiy(box.score)\n || !isValidProbablitiy(box.classScore)\n ) {\n throw new Error(`${callee} - expected properties score (${box.score}) and (${box.classScore}) to be a number between [0, 1]`);\n }\n }\n\n private _score: number\n\n private _classScore: number\n\n constructor(box: IBoundingBox | IRect | any, label: number, score: number, classScore: number) {\n super(box, label);\n this._score = score;\n this._classScore = classScore;\n }\n\n public get score(): number { return this._score; }\n\n public get classScore(): number { return this._classScore; }\n}\n", "import { FaceDetection } from '../classes/FaceDetection';\n\nexport type WithFaceDetection = TSource & {\n detection: FaceDetection\n}\n\nexport function isWithFaceDetection(obj: any): obj is WithFaceDetection<{}> {\n return obj.detection instanceof FaceDetection;\n}\n\nexport function extendWithFaceDetection(sourceObj: TSource, detection: FaceDetection): WithFaceDetection {\n const extension = { detection };\n return { ...sourceObj, ...extension };\n}\n", "import { Environment } from './types';\n\nexport function createBrowserEnv(): Environment {\n const fetch = window.fetch;\n if (!fetch) throw new Error('fetch - missing fetch implementation for browser environment');\n\n const readFile = () => {\n throw new Error('readFile - filesystem not available for browser environment');\n };\n\n return {\n Canvas: HTMLCanvasElement,\n CanvasRenderingContext2D,\n Image: HTMLImageElement,\n ImageData,\n Video: HTMLVideoElement,\n createCanvasElement: () => document.createElement('canvas'),\n createImageElement: () => document.createElement('img'),\n createVideoElement: () => document.createElement('video'),\n fetch,\n readFile,\n };\n}\n", "import { FileSystem } from './types';\n\nexport function createFileSystem(fs?: any): FileSystem {\n let requireFsError = '';\n\n if (!fs) {\n try {\n // eslint-disable-next-line global-require\n fs = require('fs');\n } catch (err) {\n requireFsError = err.toString();\n }\n }\n\n const readFile = fs\n ? (filePath: string) => new Promise((resolve, reject) => {\n fs.readFile(filePath, (err: any, buffer: Buffer) => (err ? reject(err) : resolve(buffer)));\n })\n : () => {\n throw new Error(`readFile - failed to require fs in nodejs environment with error: ${requireFsError}`);\n };\n\n return {\n readFile,\n };\n}\n", "/* eslint-disable max-classes-per-file */\nimport { createFileSystem } from './createFileSystem';\nimport { Environment } from './types';\n\nexport function createNodejsEnv(): Environment {\n // eslint-disable-next-line dot-notation\n const Canvas = global['Canvas'] || global.HTMLCanvasElement;\n const Image = global.Image || global.HTMLImageElement;\n // eslint-disable-next-line dot-notation\n const Video = global['Video'] || global.HTMLVideoElement;\n\n const createCanvasElement = () => {\n if (Canvas) return new Canvas();\n throw new Error('createCanvasElement - missing Canvas implementation for nodejs environment');\n };\n\n const createImageElement = () => {\n if (Image) return new Image();\n throw new Error('createImageElement - missing Image implementation for nodejs environment');\n };\n\n const createVideoElement = () => {\n if (Video) return new Video();\n throw new Error('createVideoElement - missing Video implementation for nodejs environment');\n };\n\n const fetch = global.fetch;\n // if (!fetch) throw new Error('fetch - missing fetch implementation for nodejs environment');\n\n const fileSystem = createFileSystem();\n\n return {\n Canvas: Canvas || class {},\n CanvasRenderingContext2D: global.CanvasRenderingContext2D || class {},\n Image: Image || class {},\n ImageData: global.ImageData || class {},\n Video: global.HTMLVideoElement || class {},\n createCanvasElement,\n createImageElement,\n createVideoElement,\n fetch,\n ...fileSystem,\n };\n}\n", "export function isBrowser(): boolean {\n return typeof window === 'object'\n && typeof document !== 'undefined'\n && typeof HTMLImageElement !== 'undefined'\n && typeof HTMLCanvasElement !== 'undefined'\n && typeof HTMLVideoElement !== 'undefined'\n && typeof ImageData !== 'undefined'\n && typeof CanvasRenderingContext2D !== 'undefined';\n}\n", "export function isNodejs(): boolean {\n return typeof global === 'object'\n && typeof require === 'function'\n && typeof module !== 'undefined'\n && typeof process !== 'undefined' && !!process.version;\n}\n", "import { createBrowserEnv } from './createBrowserEnv';\nimport { createFileSystem } from './createFileSystem';\nimport { createNodejsEnv } from './createNodejsEnv';\nimport { isBrowser } from './isBrowser';\nimport { isNodejs } from './isNodejs';\nimport { Environment } from './types';\n\nlet environment: Environment | null;\n\nfunction getEnv(): Environment {\n if (!environment) {\n throw new Error('getEnv - environment is not defined, check isNodejs() and isBrowser()');\n }\n return environment;\n}\n\nfunction setEnv(env: Environment) {\n environment = env;\n}\n\nfunction initialize() {\n // check for isBrowser() first to prevent electron renderer process\n // to be initialized with wrong environment due to isNodejs() returning true\n if (isBrowser()) return setEnv(createBrowserEnv());\n if (isNodejs()) return setEnv(createNodejsEnv());\n return null;\n}\n\nfunction monkeyPatch(env: Partial) {\n if (!environment) {\n initialize();\n }\n\n if (!environment) {\n throw new Error('monkeyPatch - environment is not defined, check isNodejs() and isBrowser()');\n }\n\n const { Canvas = environment.Canvas, Image = environment.Image } = env;\n environment.Canvas = Canvas;\n environment.Image = Image;\n environment.createCanvasElement = env.createCanvasElement || (() => new Canvas());\n environment.createImageElement = env.createImageElement || (() => new Image());\n\n environment.ImageData = env.ImageData || environment.ImageData;\n environment.Video = env.Video || environment.Video;\n environment.fetch = env.fetch || environment.fetch;\n environment.readFile = env.readFile || environment.readFile;\n}\n\nexport const env = {\n getEnv,\n setEnv,\n initialize,\n createBrowserEnv,\n createFileSystem,\n createNodejsEnv,\n monkeyPatch,\n isBrowser,\n isNodejs,\n};\n\ninitialize();\n\nexport * from './types';\n", "import { env } from '../env/index';\n\nexport function resolveInput(arg: string | any) {\n if (!env.isNodejs() && typeof arg === 'string') {\n return document.getElementById(arg);\n }\n return arg;\n}\n", "import { env } from '../env/index';\nimport { resolveInput } from './resolveInput';\n\nexport function getContext2dOrThrow(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): CanvasRenderingContext2D {\n const { Canvas, CanvasRenderingContext2D } = env.getEnv();\n\n if (canvasArg instanceof CanvasRenderingContext2D) {\n return canvasArg;\n }\n\n const canvas = resolveInput(canvasArg);\n\n if (!(canvas instanceof Canvas)) {\n throw new Error('resolveContext2d - expected canvas to be of instance of Canvas');\n }\n\n const ctx = canvas.getContext('2d');\n if (!ctx) {\n throw new Error('resolveContext2d - canvas 2d context is null');\n }\n\n return ctx;\n}\n", "/* eslint-disable max-classes-per-file */\nimport { IDimensions, IPoint } from '../classes/index';\nimport { getContext2dOrThrow } from '../dom/getContext2dOrThrow';\nimport { resolveInput } from '../dom/resolveInput';\n\n// eslint-disable-next-line no-shadow\nexport enum AnchorPosition {\n // eslint-disable-next-line no-unused-vars\n TOP_LEFT = 'TOP_LEFT',\n // eslint-disable-next-line no-unused-vars\n TOP_RIGHT = 'TOP_RIGHT',\n // eslint-disable-next-line no-unused-vars\n BOTTOM_LEFT = 'BOTTOM_LEFT',\n // eslint-disable-next-line no-unused-vars\n BOTTOM_RIGHT = 'BOTTOM_RIGHT'\n}\n\nexport interface IDrawTextFieldOptions {\n anchorPosition?: AnchorPosition\n backgroundColor?: string\n fontColor?: string\n fontSize?: number\n fontStyle?: string\n padding?: number\n}\n\nexport class DrawTextFieldOptions implements IDrawTextFieldOptions {\n public anchorPosition: AnchorPosition\n\n public backgroundColor: string\n\n public fontColor: string\n\n public fontSize: number\n\n public fontStyle: string\n\n public padding: number\n\n constructor(options: IDrawTextFieldOptions = {}) {\n const {\n anchorPosition, backgroundColor, fontColor, fontSize, fontStyle, padding,\n } = options;\n this.anchorPosition = anchorPosition || AnchorPosition.TOP_LEFT;\n this.backgroundColor = backgroundColor || 'rgba(0, 0, 0, 0.5)';\n this.fontColor = fontColor || 'rgba(255, 255, 255, 1)';\n this.fontSize = fontSize || 14;\n this.fontStyle = fontStyle || 'Georgia';\n this.padding = padding || 4;\n }\n}\n\nexport class DrawTextField {\n public text: string[]\n\n public anchor : IPoint\n\n public options: DrawTextFieldOptions\n\n constructor(\n text: string | string[] | DrawTextField,\n anchor: IPoint,\n options: IDrawTextFieldOptions = {},\n ) {\n // eslint-disable-next-line no-nested-ternary\n this.text = typeof text === 'string'\n ? [text]\n : (text instanceof DrawTextField ? text.text : text);\n this.anchor = anchor;\n this.options = new DrawTextFieldOptions(options);\n }\n\n measureWidth(ctx: CanvasRenderingContext2D): number {\n const { padding } = this.options;\n return this.text.map((l) => ctx.measureText(l).width).reduce((w0, w1) => (w0 < w1 ? w1 : w0), 0) + (2 * padding);\n }\n\n measureHeight(): number {\n const { fontSize, padding } = this.options;\n return this.text.length * fontSize + (2 * padding);\n }\n\n getUpperLeft(ctx: CanvasRenderingContext2D, canvasDims?: IDimensions): IPoint {\n const { anchorPosition } = this.options;\n const isShiftLeft = anchorPosition === AnchorPosition.BOTTOM_RIGHT || anchorPosition === AnchorPosition.TOP_RIGHT;\n const isShiftTop = anchorPosition === AnchorPosition.BOTTOM_LEFT || anchorPosition === AnchorPosition.BOTTOM_RIGHT;\n\n const textFieldWidth = this.measureWidth(ctx);\n const textFieldHeight = this.measureHeight();\n const x = (isShiftLeft ? this.anchor.x - textFieldWidth : this.anchor.x);\n const y = isShiftTop ? this.anchor.y - textFieldHeight : this.anchor.y;\n\n // adjust anchor if text box exceeds canvas borders\n if (canvasDims) {\n const { width, height } = canvasDims;\n const newX = Math.max(Math.min(x, width - textFieldWidth), 0);\n const newY = Math.max(Math.min(y, height - textFieldHeight), 0);\n return { x: newX, y: newY };\n }\n return { x, y };\n }\n\n draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D) {\n const canvas = resolveInput(canvasArg);\n const ctx = getContext2dOrThrow(canvas);\n\n const {\n backgroundColor, fontColor, fontSize, fontStyle, padding,\n } = this.options;\n\n ctx.font = `${fontSize}px ${fontStyle}`;\n const maxTextWidth = this.measureWidth(ctx);\n const textHeight = this.measureHeight();\n\n ctx.fillStyle = backgroundColor;\n const upperLeft = this.getUpperLeft(ctx, canvas);\n ctx.fillRect(upperLeft.x, upperLeft.y, maxTextWidth, textHeight);\n\n ctx.fillStyle = fontColor;\n this.text.forEach((textLine, i) => {\n const x = padding + upperLeft.x;\n const y = padding + upperLeft.y + ((i + 1) * fontSize);\n ctx.fillText(textLine, x, y);\n });\n }\n}\n", "/* eslint-disable max-classes-per-file */\nimport { Box, IBoundingBox, IRect } from '../classes/index';\nimport { getContext2dOrThrow } from '../dom/getContext2dOrThrow';\nimport { AnchorPosition, DrawTextField, DrawTextFieldOptions, IDrawTextFieldOptions } from './DrawTextField';\n\nexport interface IDrawBoxOptions {\n boxColor?: string\n lineWidth?: number\n drawLabelOptions?: IDrawTextFieldOptions\n label?: string\n}\n\nexport class DrawBoxOptions {\n public boxColor: string\n\n public lineWidth: number\n\n public drawLabelOptions: DrawTextFieldOptions\n\n public label?: string\n\n constructor(options: IDrawBoxOptions = {}) {\n const {\n boxColor, lineWidth, label, drawLabelOptions,\n } = options;\n this.boxColor = boxColor || 'rgba(0, 0, 255, 1)';\n this.lineWidth = lineWidth || 2;\n this.label = label;\n\n const defaultDrawLabelOptions = {\n anchorPosition: AnchorPosition.BOTTOM_LEFT,\n backgroundColor: this.boxColor,\n };\n this.drawLabelOptions = new DrawTextFieldOptions({ ...defaultDrawLabelOptions, ...drawLabelOptions });\n }\n}\n\nexport class DrawBox {\n public box: Box\n\n public options: DrawBoxOptions\n\n constructor(\n box: IBoundingBox | IRect,\n options: IDrawBoxOptions = {},\n ) {\n this.box = new Box(box);\n this.options = new DrawBoxOptions(options);\n }\n\n draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D) {\n const ctx = getContext2dOrThrow(canvasArg);\n\n const { boxColor, lineWidth } = this.options;\n\n const {\n x, y, width, height,\n } = this.box;\n ctx.strokeStyle = boxColor;\n ctx.lineWidth = lineWidth;\n ctx.strokeRect(x, y, width, height);\n\n const { label } = this.options;\n if (label) {\n new DrawTextField([label], { x: x - (lineWidth / 2), y }, this.options.drawLabelOptions).draw(canvasArg);\n }\n }\n}\n", "import { Box, IBoundingBox, IRect } from '../classes/index';\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { isWithFaceDetection, WithFaceDetection } from '../factories/WithFaceDetection';\nimport { round } from '../utils/index';\nimport { DrawBox } from './DrawBox';\n\nexport type TDrawDetectionsInput = IRect | IBoundingBox | FaceDetection | WithFaceDetection<{}>\n\nexport function drawDetections(\n canvasArg: string | HTMLCanvasElement,\n detections: TDrawDetectionsInput | Array,\n) {\n const detectionsArray = Array.isArray(detections) ? detections : [detections];\n\n detectionsArray.forEach((det) => {\n // eslint-disable-next-line no-nested-ternary\n const score = det instanceof FaceDetection\n ? det.score\n : (isWithFaceDetection(det) ? det.detection.score : undefined);\n\n // eslint-disable-next-line no-nested-ternary\n const box = det instanceof FaceDetection\n ? det.box\n : (isWithFaceDetection(det) ? det.detection.box : new Box(det));\n\n const label = score ? `${round(score)}` : undefined;\n new DrawBox(box, { label }).draw(canvasArg);\n });\n}\n", "import { env } from '../env/index';\n\nexport function isMediaLoaded(media: HTMLImageElement | HTMLVideoElement) : boolean {\n const { Image, Video } = env.getEnv();\n\n return (media instanceof Image && media.complete)\n || (media instanceof Video && media.readyState >= 3);\n}\n", "import { env } from '../env/index';\nimport { isMediaLoaded } from './isMediaLoaded';\n\nexport function awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement) {\n // eslint-disable-next-line consistent-return\n return new Promise((resolve, reject) => {\n if (media instanceof env.getEnv().Canvas || isMediaLoaded(media)) return resolve(null);\n\n function onError(e: Event) {\n if (!e.currentTarget) return;\n // eslint-disable-next-line no-use-before-define\n e.currentTarget.removeEventListener('load', onLoad);\n e.currentTarget.removeEventListener('error', onError);\n reject(e);\n }\n\n function onLoad(e: Event) {\n if (!e.currentTarget) return;\n e.currentTarget.removeEventListener('load', onLoad);\n e.currentTarget.removeEventListener('error', onError);\n resolve(e);\n }\n\n media.addEventListener('load', onLoad);\n media.addEventListener('error', onError);\n });\n}\n", "import { env } from '../env/index';\n\nexport function bufferToImage(buf: Blob): Promise {\n return new Promise((resolve, reject) => {\n if (!(buf instanceof Blob)) reject(new Error('bufferToImage - expected buf to be of type: Blob'));\n const reader = new FileReader();\n reader.onload = () => {\n if (typeof reader.result !== 'string') reject(new Error('bufferToImage - expected reader.result to be a string, in onload'));\n const img = env.getEnv().createImageElement();\n img.onload = () => resolve(img);\n img.onerror = reject;\n img.src = reader.result as string;\n };\n reader.onerror = reject;\n reader.readAsDataURL(buf);\n });\n}\n", "import { Dimensions, IDimensions } from '../classes/Dimensions';\nimport { env } from '../env/index';\n\nexport function getMediaDimensions(input: HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | IDimensions): Dimensions {\n const { Image, Video } = env.getEnv();\n\n if (input instanceof Image) {\n return new Dimensions(input.naturalWidth, input.naturalHeight);\n }\n if (input instanceof Video) {\n return new Dimensions(input.videoWidth, input.videoHeight);\n }\n return new Dimensions(input.width, input.height);\n}\n", "import { IDimensions } from '../classes/Dimensions';\nimport { env } from '../env/index';\nimport { getContext2dOrThrow } from './getContext2dOrThrow';\nimport { getMediaDimensions } from './getMediaDimensions';\nimport { isMediaLoaded } from './isMediaLoaded';\n\nexport function createCanvas({ width, height }: IDimensions): HTMLCanvasElement {\n const { createCanvasElement } = env.getEnv();\n const canvas = createCanvasElement();\n canvas.width = width;\n canvas.height = height;\n return canvas;\n}\n\nexport function createCanvasFromMedia(media: HTMLImageElement | HTMLVideoElement | ImageData, dims?: IDimensions): HTMLCanvasElement {\n const { ImageData } = env.getEnv();\n\n if (!(media instanceof ImageData) && !isMediaLoaded(media)) {\n throw new Error('createCanvasFromMedia - media has not finished loading yet');\n }\n\n const { width, height } = dims || getMediaDimensions(media);\n const canvas = createCanvas({ width, height });\n\n if (media instanceof ImageData) {\n getContext2dOrThrow(canvas).putImageData(media, 0, 0);\n } else {\n getContext2dOrThrow(canvas).drawImage(media, 0, 0, width, height);\n }\n return canvas;\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { env } from '../env/index';\nimport { isTensor4D } from '../utils/index';\n\nexport async function imageTensorToCanvas(\n imgTensor: tf.Tensor,\n canvas?: HTMLCanvasElement,\n): Promise {\n const targetCanvas = canvas || env.getEnv().createCanvasElement();\n\n const [height, width, numChannels] = imgTensor.shape.slice(isTensor4D(imgTensor) ? 1 : 0);\n const imgTensor3D = tf.tidy(() => imgTensor.as3D(height, width, numChannels).toInt());\n await tf.browser.toPixels(imgTensor3D, targetCanvas);\n\n imgTensor3D.dispose();\n\n return targetCanvas;\n}\n", "import { env } from '../env/index';\n\nexport function isMediaElement(input: any) {\n const { Image, Canvas, Video } = env.getEnv();\n\n return input instanceof Image\n || input instanceof Canvas\n || input instanceof Video;\n}\n", "import { env } from '../env/index';\nimport { createCanvas, createCanvasFromMedia } from './createCanvas';\nimport { getContext2dOrThrow } from './getContext2dOrThrow';\nimport { getMediaDimensions } from './getMediaDimensions';\n\nexport function imageToSquare(input: HTMLImageElement | HTMLCanvasElement, inputSize: number, centerImage: boolean = false) {\n const { Image, Canvas } = env.getEnv();\n\n if (!(input instanceof Image || input instanceof Canvas)) {\n throw new Error('imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement');\n }\n\n if (inputSize <= 0) return createCanvas({ width: 1, height: 1 });\n const dims = getMediaDimensions(input);\n const scale = inputSize / Math.max(dims.height, dims.width);\n const width = scale * dims.width;\n const height = scale * dims.height;\n\n const targetCanvas = createCanvas({ width: inputSize, height: inputSize });\n const inputCanvas = input instanceof Canvas ? input : createCanvasFromMedia(input);\n\n const offset = Math.abs(width - height) / 2;\n const dx = centerImage && width < height ? offset : 0;\n const dy = centerImage && height < width ? offset : 0;\n if (inputCanvas.width > 0 && inputCanvas.height > 0) getContext2dOrThrow(targetCanvas).drawImage(inputCanvas, dx, dy, width, height);\n\n return targetCanvas;\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { Dimensions } from '../classes/Dimensions';\nimport { env } from '../env/index';\nimport { padToSquare } from '../ops/padToSquare';\nimport { computeReshapedDimensions, isTensor3D, isTensor4D, range } from '../utils/index';\nimport { createCanvasFromMedia } from './createCanvas';\nimport { imageToSquare } from './imageToSquare';\nimport { TResolvedNetInput } from './types';\n\nexport class NetInput {\n private _imageTensors: Array = []\n\n private _canvases: HTMLCanvasElement[] = []\n\n private _batchSize: number\n\n private _treatAsBatchInput: boolean = false\n\n private _inputDimensions: number[][] = []\n\n private _inputSize: number\n\n constructor(inputs: Array, treatAsBatchInput: boolean = false) {\n if (!Array.isArray(inputs)) {\n throw new Error(`NetInput.constructor - expected inputs to be an Array of TResolvedNetInput or to be instanceof tf.Tensor4D, instead have ${inputs}`);\n }\n\n this._treatAsBatchInput = treatAsBatchInput;\n this._batchSize = inputs.length;\n\n inputs.forEach((input, idx) => {\n if (isTensor3D(input)) {\n this._imageTensors[idx] = input;\n this._inputDimensions[idx] = input.shape;\n return;\n }\n\n if (isTensor4D(input)) {\n const batchSize = (input as any).shape[0];\n if (batchSize !== 1) {\n throw new Error(`NetInput - tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`);\n }\n\n this._imageTensors[idx] = input;\n this._inputDimensions[idx] = (input as any).shape.slice(1);\n return;\n }\n\n const canvas = (input as any) instanceof env.getEnv().Canvas ? input : createCanvasFromMedia(input);\n this._canvases[idx] = canvas;\n this._inputDimensions[idx] = [canvas.height, canvas.width, 3];\n });\n }\n\n public get imageTensors(): Array {\n return this._imageTensors;\n }\n\n public get canvases(): HTMLCanvasElement[] {\n return this._canvases;\n }\n\n public get isBatchInput(): boolean {\n return this.batchSize > 1 || this._treatAsBatchInput;\n }\n\n public get batchSize(): number {\n return this._batchSize;\n }\n\n public get inputDimensions(): number[][] {\n return this._inputDimensions;\n }\n\n public get inputSize(): number | undefined {\n return this._inputSize;\n }\n\n public get reshapedInputDimensions(): Dimensions[] {\n return range(this.batchSize, 0, 1).map(\n (_, batchIdx) => this.getReshapedInputDimensions(batchIdx),\n );\n }\n\n public getInput(batchIdx: number): tf.Tensor3D | tf.Tensor4D | HTMLCanvasElement {\n return this.canvases[batchIdx] || this.imageTensors[batchIdx];\n }\n\n public getInputDimensions(batchIdx: number): number[] {\n return this._inputDimensions[batchIdx];\n }\n\n public getInputHeight(batchIdx: number): number {\n return this._inputDimensions[batchIdx][0];\n }\n\n public getInputWidth(batchIdx: number): number {\n return this._inputDimensions[batchIdx][1];\n }\n\n public getReshapedInputDimensions(batchIdx: number): Dimensions {\n if (typeof this.inputSize !== 'number') {\n throw new Error('getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet');\n }\n\n const width = this.getInputWidth(batchIdx);\n const height = this.getInputHeight(batchIdx);\n return computeReshapedDimensions({ width, height }, this.inputSize);\n }\n\n /**\n * Create a batch tensor from all input canvases and tensors\n * with size [batchSize, inputSize, inputSize, 3].\n *\n * @param inputSize Height and width of the tensor.\n * @param isCenterImage (optional, default: false) If true, add an equal amount of padding on\n * both sides of the minor dimension oof the image.\n * @returns The batch tensor.\n */\n public toBatchTensor(inputSize: number, isCenterInputs: boolean = true): tf.Tensor4D {\n this._inputSize = inputSize;\n\n return tf.tidy(() => {\n const inputTensors = range(this.batchSize, 0, 1).map((batchIdx) => {\n const input = this.getInput(batchIdx);\n\n if (input instanceof tf.Tensor) {\n let imgTensor = isTensor4D(input) ? input : tf.expandDims(input);\n imgTensor = padToSquare(imgTensor, isCenterInputs);\n\n if (imgTensor.shape[1] !== inputSize || imgTensor.shape[2] !== inputSize) {\n imgTensor = tf.image.resizeBilinear(imgTensor, [inputSize, inputSize], false, false);\n }\n\n return imgTensor.as3D(inputSize, inputSize, 3);\n }\n\n if (input instanceof env.getEnv().Canvas) {\n return tf.browser.fromPixels(imageToSquare(input, inputSize, isCenterInputs));\n }\n\n throw new Error(`toBatchTensor - at batchIdx ${batchIdx}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${input}`);\n });\n\n const batchTensor = tf.stack(inputTensors.map((t) => tf.cast(t, 'float32'))).as4D(this.batchSize, inputSize, inputSize, 3);\n\n return batchTensor;\n });\n }\n}\n", "import { isTensor3D, isTensor4D } from '../utils/index';\nimport { awaitMediaLoaded } from './awaitMediaLoaded';\nimport { isMediaElement } from './isMediaElement';\nimport { NetInput } from './NetInput';\nimport { resolveInput } from './resolveInput';\nimport { TNetInput } from './types';\n\n/**\n * Validates the input to make sure, they are valid net inputs and awaits all media elements\n * to be finished loading.\n *\n * @param input The input, which can be a media element or an array of different media elements.\n * @returns A NetInput instance, which can be passed into one of the neural networks.\n */\nexport async function toNetInput(inputs: TNetInput): Promise {\n if (inputs instanceof NetInput) return inputs;\n const inputArgArray = Array.isArray(inputs) ? inputs : [inputs];\n if (!inputArgArray.length) throw new Error('toNetInput - empty array passed as input');\n const getIdxHint = (idx: number) => (Array.isArray(inputs) ? ` at input index ${idx}:` : '');\n const inputArray = inputArgArray.map(resolveInput);\n inputArray.forEach((input, i) => {\n if (!isMediaElement(input) && !isTensor3D(input) && !isTensor4D(input)) {\n if (typeof inputArgArray[i] === 'string') throw new Error(`toNetInput -${getIdxHint(i)} string passed, but could not resolve HTMLElement for element id ${inputArgArray[i]}`);\n throw new Error(`toNetInput -${getIdxHint(i)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`);\n }\n if (isTensor4D(input)) {\n // if tf.Tensor4D is passed in the input array, the batch size has to be 1\n const batchSize = input.shape[0];\n if (batchSize !== 1) throw new Error(`toNetInput -${getIdxHint(i)} tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`);\n }\n });\n // wait for all media elements being loaded\n await Promise.all(inputArray.map((input) => isMediaElement(input) && awaitMediaLoaded(input)));\n return new NetInput(inputArray, Array.isArray(inputs));\n}\n", "import { FaceDetection } from '../classes/FaceDetection';\nimport { Rect } from '../classes/Rect';\nimport { env } from '../env/index';\nimport { createCanvas } from './createCanvas';\nimport { getContext2dOrThrow } from './getContext2dOrThrow';\nimport { imageTensorToCanvas } from './imageTensorToCanvas';\nimport { toNetInput } from './toNetInput';\nimport { TNetInput } from './types';\n\n/**\n * Extracts the image regions containing the detected faces.\n *\n * @param input The image that face detection has been performed on.\n * @param detections The face detection results or face bounding boxes for that image.\n * @returns The Canvases of the corresponding image region for each detected face.\n */\nexport async function extractFaces(input: TNetInput, detections: Array): Promise {\n const { Canvas } = env.getEnv();\n let canvas = input as HTMLCanvasElement;\n if (!(input instanceof Canvas)) {\n const netInput = await toNetInput(input);\n if (netInput.batchSize > 1) throw new Error('extractFaces - batchSize > 1 not supported');\n const tensorOrCanvas = netInput.getInput(0);\n canvas = tensorOrCanvas instanceof Canvas ? tensorOrCanvas : await imageTensorToCanvas(tensorOrCanvas);\n }\n const ctx = getContext2dOrThrow(canvas);\n const boxes = detections\n .map((det) => (det instanceof FaceDetection ? det.forSize(canvas.width, canvas.height).box.floor() : det))\n .map((box) => box.clipAtImageBorders(canvas.width, canvas.height));\n return boxes.map(({ x, y, width, height }) => {\n const faceImg = createCanvas({ width, height });\n if (width > 0 && height > 0) getContext2dOrThrow(faceImg).putImageData(ctx.getImageData(x, y, width, height), 0, 0);\n return faceImg;\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { Rect } from '../classes/index';\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { isTensor3D, isTensor4D } from '../utils/index';\n\n/**\n * Extracts the tensors of the image regions containing the detected faces.\n * Useful if you want to compute the face descriptors for the face images.\n * Using this method is faster then extracting a canvas for each face and\n * converting them to tensors individually.\n *\n * @param imageTensor The image tensor that face detection has been performed on.\n * @param detections The face detection results or face bounding boxes for that image.\n * @returns Tensors of the corresponding image region for each detected face.\n */\nexport async function extractFaceTensors(imageTensor: tf.Tensor3D | tf.Tensor4D, detections: Array): Promise {\n if (!isTensor3D(imageTensor) && !isTensor4D(imageTensor)) {\n throw new Error('extractFaceTensors - expected image tensor to be 3D or 4D');\n }\n\n if (isTensor4D(imageTensor) && imageTensor.shape[0] > 1) {\n throw new Error('extractFaceTensors - batchSize > 1 not supported');\n }\n\n return tf.tidy(() => {\n const [imgHeight, imgWidth, numChannels] = imageTensor.shape.slice(isTensor4D(imageTensor) ? 1 : 0);\n\n const boxes = detections\n .map((det) => (det instanceof FaceDetection\n ? det.forSize(imgWidth, imgHeight).box\n : det))\n .map((box) => box.clipAtImageBorders(imgWidth, imgHeight));\n\n const faceTensors = boxes.map(({\n x, y, width, height,\n }) => tf.slice3d(imageTensor.as3D(imgHeight, imgWidth, numChannels), [y, x, 0], [height, width, numChannels]));\n\n return faceTensors;\n });\n}\n", "import { env } from '../env/index';\n\nexport async function fetchOrThrow(\n url: string,\n // eslint-disable-next-line no-undef\n init?: RequestInit,\n): Promise {\n const { fetch } = env.getEnv();\n const res = await fetch(url, init);\n if (!(res.status < 400)) {\n throw new Error(`failed to fetch: (${res.status}) ${res.statusText}, from url: ${res.url}`);\n }\n return res;\n}\n", "import { bufferToImage } from './bufferToImage';\nimport { fetchOrThrow } from './fetchOrThrow';\n\nexport async function fetchImage(uri: string): Promise {\n const res = await fetchOrThrow(uri);\n const blob = await (res).blob();\n\n if (!blob.type.startsWith('image/')) {\n throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${blob.type}, for url: ${res.url}`);\n }\n return bufferToImage(blob);\n}\n", "import { fetchOrThrow } from './fetchOrThrow';\n\nexport async function fetchJson(uri: string): Promise {\n return (await fetchOrThrow(uri)).json();\n}\n", "import { fetchOrThrow } from './fetchOrThrow';\n\nexport async function fetchNetWeights(uri: string): Promise {\n return new Float32Array(await (await fetchOrThrow(uri)).arrayBuffer());\n}\n", "import { env } from '../env/index';\n\nexport function bufferToVideo(buf: Blob): Promise {\n return new Promise((resolve, reject) => {\n if (!(buf instanceof Blob)) reject(new Error('bufferToVideo - expected buf to be of type: Blob'));\n\n const video = env.getEnv().createVideoElement();\n video.oncanplay = () => resolve(video);\n video.onerror = reject;\n // video.type = buf.type;\n video.playsInline = true;\n video.autoplay = true;\n video.muted = true;\n video.src = URL.createObjectURL(buf);\n });\n}\n", "import { bufferToVideo } from './bufferToVideo';\nimport { fetchOrThrow } from './fetchOrThrow';\n\nexport async function fetchVideo(uri: string): Promise {\n const res = await fetchOrThrow(uri);\n const blob = await (res).blob();\n\n if (!blob.type.startsWith('video/')) {\n throw new Error(`fetchVideo - expected blob type to be of type video/*, instead have: ${blob.type}, for url: ${res.url}`);\n }\n return bufferToVideo(blob);\n}\n", "export function getModelUris(uri: string | undefined, defaultModelName: string) {\n const defaultManifestFilename = `${defaultModelName}-weights_manifest.json`;\n\n if (!uri) {\n return {\n modelBaseUri: '',\n manifestUri: defaultManifestFilename,\n };\n }\n\n if (uri === '/') {\n return {\n modelBaseUri: '/',\n manifestUri: `/${defaultManifestFilename}`,\n };\n }\n // eslint-disable-next-line no-nested-ternary\n const protocol = uri.startsWith('http://') ? 'http://' : uri.startsWith('https://') ? 'https://' : '';\n uri = uri.replace(protocol, '');\n\n const parts = uri.split('/').filter((s) => s);\n\n const manifestFile = uri.endsWith('.json')\n ? parts[parts.length - 1]\n : defaultManifestFilename;\n\n let modelBaseUri = protocol + (uri.endsWith('.json') ? parts.slice(0, parts.length - 1) : parts).join('/');\n modelBaseUri = uri.startsWith('/') ? `/${modelBaseUri}` : modelBaseUri;\n\n return {\n modelBaseUri,\n manifestUri: modelBaseUri === '/' ? `/${manifestFile}` : `${modelBaseUri}/${manifestFile}`,\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { getModelUris } from '../common/getModelUris';\nimport { fetchJson } from './fetchJson';\n\nexport async function loadWeightMap(\n uri: string | undefined,\n defaultModelName: string,\n): Promise {\n const { manifestUri, modelBaseUri } = getModelUris(uri, defaultModelName);\n const manifest = await fetchJson(manifestUri);\n // if (manifest['weightsManifest']) manifest = manifest['weightsManifest'];\n return tf.io.loadWeights(manifest, modelBaseUri);\n}\n", "import { IDimensions } from '../classes/index';\nimport { getMediaDimensions } from './getMediaDimensions';\n\nexport function matchDimensions(input: IDimensions, reference: IDimensions, useMediaDimensions: boolean = false) {\n const { width, height } = useMediaDimensions\n ? getMediaDimensions(reference)\n : reference;\n input.width = width;\n input.height = height;\n return { width, height };\n}\n", "import * as tf from '../dist/tfjs.esm';\n\nimport { ParamMapping } from './common/index';\nimport { getModelUris } from './common/getModelUris';\nimport { loadWeightMap } from './dom/index';\nimport { env } from './env/index';\n\nexport abstract class NeuralNetwork {\n constructor(name: string) {\n this._name = name;\n }\n\n protected _params: TNetParams | undefined = undefined\n\n protected _paramMappings: ParamMapping[] = []\n\n public _name: any;\n\n public get params(): TNetParams | undefined { return this._params; }\n\n public get paramMappings(): ParamMapping[] { return this._paramMappings; }\n\n public get isLoaded(): boolean { return !!this.params; }\n\n public getParamFromPath(paramPath: string): tf.Tensor {\n const { obj, objProp } = this.traversePropertyPath(paramPath);\n return obj[objProp];\n }\n\n public reassignParamFromPath(paramPath: string, tensor: tf.Tensor) {\n const { obj, objProp } = this.traversePropertyPath(paramPath);\n obj[objProp].dispose();\n obj[objProp] = tensor;\n }\n\n public getParamList() {\n return this._paramMappings.map(({ paramPath }) => ({\n path: paramPath,\n tensor: this.getParamFromPath(paramPath),\n }));\n }\n\n public getTrainableParams() {\n return this.getParamList().filter((param) => param.tensor instanceof tf.Variable);\n }\n\n public getFrozenParams() {\n return this.getParamList().filter((param) => !(param.tensor instanceof tf.Variable));\n }\n\n public variable() {\n this.getFrozenParams().forEach(({ path, tensor }) => {\n this.reassignParamFromPath(path, tensor.variable());\n });\n }\n\n public freeze() {\n this.getTrainableParams().forEach(({ path, tensor: variable }) => {\n const tensor = tf.tensor(variable.dataSync());\n variable.dispose();\n this.reassignParamFromPath(path, tensor);\n });\n }\n\n public dispose(throwOnRedispose: boolean = true) {\n this.getParamList().forEach((param) => {\n if (throwOnRedispose && param.tensor.isDisposed) {\n throw new Error(`param tensor has already been disposed for path ${param.path}`);\n }\n param.tensor.dispose();\n });\n this._params = undefined;\n }\n\n public serializeParams(): Float32Array {\n return new Float32Array(\n this.getParamList()\n .map(({ tensor }) => Array.from(tensor.dataSync()) as number[])\n .reduce((flat, arr) => flat.concat(arr)),\n );\n }\n\n public async load(weightsOrUrl: Float32Array | string | undefined): Promise {\n if (weightsOrUrl instanceof Float32Array) {\n this.extractWeights(weightsOrUrl);\n return;\n }\n await this.loadFromUri(weightsOrUrl);\n }\n\n public async loadFromUri(uri: string | undefined) {\n if (uri && typeof uri !== 'string') {\n throw new Error(`${this._name}.loadFromUri - expected model uri`);\n }\n const weightMap = await loadWeightMap(uri, this.getDefaultModelName());\n this.loadFromWeightMap(weightMap);\n }\n\n public async loadFromDisk(filePath: string | undefined) {\n if (filePath && typeof filePath !== 'string') {\n throw new Error(`${this._name}.loadFromDisk - expected model file path`);\n }\n const { readFile } = env.getEnv();\n const { manifestUri, modelBaseUri } = getModelUris(filePath, this.getDefaultModelName());\n const fetchWeightsFromDisk = (filePaths: string[]) => Promise.all(filePaths.map((fp) => readFile(fp).then((buf) => buf.buffer)));\n const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk);\n const manifest = JSON.parse((await readFile(manifestUri)).toString());\n const weightMap = await loadWeights(manifest, modelBaseUri);\n this.loadFromWeightMap(weightMap);\n }\n\n public loadFromWeightMap(weightMap: tf.NamedTensorMap) {\n const { paramMappings, params } = this.extractParamsFromWeightMap(weightMap);\n this._paramMappings = paramMappings;\n this._params = params;\n }\n\n public extractWeights(weights: Float32Array) {\n const { paramMappings, params } = this.extractParams(weights);\n this._paramMappings = paramMappings;\n this._params = params;\n }\n\n private traversePropertyPath(paramPath: string) {\n if (!this.params) {\n throw new Error('traversePropertyPath - model has no loaded params');\n }\n\n const result = paramPath.split('/').reduce((res: { nextObj: any, obj?: any, objProp?: string }, objProp) => {\n // eslint-disable-next-line no-prototype-builtins\n if (!res.nextObj.hasOwnProperty(objProp)) {\n throw new Error(`traversePropertyPath - object does not have property ${objProp}, for path ${paramPath}`);\n }\n return { obj: res.nextObj, objProp, nextObj: res.nextObj[objProp] };\n }, { nextObj: this.params });\n\n const { obj, objProp } = result;\n if (!obj || !objProp || !(obj[objProp] instanceof tf.Tensor)) {\n throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${paramPath}`);\n }\n\n return { obj, objProp };\n }\n\n protected abstract getDefaultModelName(): string\n\n // eslint-disable-next-line no-unused-vars\n protected abstract extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): { params: TNetParams, paramMappings: ParamMapping[] }\n\n // eslint-disable-next-line no-unused-vars\n protected abstract extractParams(weights: Float32Array): { params: TNetParams, paramMappings: ParamMapping[] }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { SeparableConvParams } from './types';\n\nexport function depthwiseSeparableConv(\n x: tf.Tensor4D,\n params: SeparableConvParams,\n stride: [number, number],\n): tf.Tensor4D {\n return tf.tidy(() => {\n let out = tf.separableConv2d(x, params.depthwise_filter, params.pointwise_filter, stride, 'same');\n out = tf.add(out, params.bias);\n return out;\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams, SeparableConvParams } from '../common/index';\nimport { depthwiseSeparableConv } from '../common/depthwiseSeparableConv';\nimport { DenseBlock3Params, DenseBlock4Params } from './types';\n\nexport function denseBlock3(\n x: tf.Tensor4D,\n denseBlockParams: DenseBlock3Params,\n isFirstLayer: boolean = false,\n): tf.Tensor4D {\n return tf.tidy(() => {\n const out1 = tf.relu(\n isFirstLayer\n ? tf.add(\n tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, [2, 2], 'same'),\n denseBlockParams.conv0.bias,\n )\n : depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, [2, 2]),\n ) as tf.Tensor4D;\n const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1]);\n\n const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D;\n const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1]);\n\n return tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D;\n });\n}\n\nexport function denseBlock4(\n x: tf.Tensor4D,\n denseBlockParams: DenseBlock4Params,\n isFirstLayer: boolean = false,\n isScaleDown: boolean = true,\n): tf.Tensor4D {\n return tf.tidy(() => {\n const out1 = tf.relu(\n isFirstLayer\n ? tf.add(\n tf.conv2d(x, (denseBlockParams.conv0 as ConvParams).filters, isScaleDown ? [2, 2] : [1, 1], 'same'),\n denseBlockParams.conv0.bias,\n )\n : depthwiseSeparableConv(x, denseBlockParams.conv0 as SeparableConvParams, isScaleDown ? [2, 2] : [1, 1]),\n ) as tf.Tensor4D;\n const out2 = depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1]);\n\n const in3 = tf.relu(tf.add(out1, out2)) as tf.Tensor4D;\n const out3 = depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1]);\n\n const in4 = tf.relu(tf.add(out1, tf.add(out2, out3))) as tf.Tensor4D;\n const out4 = depthwiseSeparableConv(in4, denseBlockParams.conv3, [1, 1]);\n\n return tf.relu(tf.add(out1, tf.add(out2, tf.add(out3, out4)))) as tf.Tensor4D;\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams } from './types';\n\nexport function convLayer(\n x: tf.Tensor4D,\n params: ConvParams,\n padding: 'valid' | 'same' = 'same',\n withRelu: boolean = false,\n): tf.Tensor4D {\n return tf.tidy(() => {\n const out = tf.add(\n tf.conv2d(x, params.filters, [1, 1], padding),\n params.bias,\n ) as tf.Tensor4D;\n\n return withRelu ? tf.relu(out) : out;\n });\n}\n", "import { ParamMapping } from './types';\n\nexport function disposeUnusedWeightTensors(weightMap: any, paramMappings: ParamMapping[]) {\n Object.keys(weightMap).forEach((path) => {\n if (!paramMappings.some((pm) => pm.originalPath === path)) {\n weightMap[path].dispose();\n }\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams, ExtractWeightsFunction, ParamMapping } from './types';\n\nexport function extractConvParamsFactory(\n extractWeights: ExtractWeightsFunction,\n paramMappings: ParamMapping[],\n) {\n return (\n channelsIn: number,\n channelsOut: number,\n filterSize: number,\n mappedPrefix: string,\n ): ConvParams => {\n const filters = tf.tensor4d(\n extractWeights(channelsIn * channelsOut * filterSize * filterSize),\n [filterSize, filterSize, channelsIn, channelsOut],\n );\n const bias = tf.tensor1d(extractWeights(channelsOut));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/filters` },\n { paramPath: `${mappedPrefix}/bias` },\n );\n\n return { filters, bias };\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ExtractWeightsFunction, FCParams, ParamMapping } from './types';\n\nexport function extractFCParamsFactory(\n extractWeights: ExtractWeightsFunction,\n paramMappings: ParamMapping[],\n) {\n return (\n channelsIn: number,\n channelsOut: number,\n mappedPrefix: string,\n ): FCParams => {\n const fc_weights = tf.tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut]);\n const fc_bias = tf.tensor1d(extractWeights(channelsOut));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/weights` },\n { paramPath: `${mappedPrefix}/bias` },\n );\n\n return {\n weights: fc_weights,\n bias: fc_bias,\n };\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\n// eslint-disable-next-line no-unused-vars\nexport type ExtractWeightsFunction = (numWeights: number) => Float32Array\n\nexport type ParamMapping = {\n originalPath?: string\n paramPath: string\n}\n\nexport type ConvParams = {\n filters: tf.Tensor4D\n bias: tf.Tensor1D\n}\n\nexport type FCParams = {\n weights: tf.Tensor2D\n bias: tf.Tensor1D\n}\n\nexport class SeparableConvParams {\n // eslint-disable-next-line no-useless-constructor\n constructor(\n // eslint-disable-next-line no-unused-vars\n public depthwise_filter: tf.Tensor4D,\n // eslint-disable-next-line no-unused-vars\n public pointwise_filter: tf.Tensor4D,\n // eslint-disable-next-line no-unused-vars\n public bias: tf.Tensor1D,\n // eslint-disable-next-line no-empty-function\n ) {}\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ExtractWeightsFunction, ParamMapping, SeparableConvParams } from './types';\n\nexport function extractSeparableConvParamsFactory(\n extractWeights: ExtractWeightsFunction,\n paramMappings: ParamMapping[],\n) {\n return (channelsIn: number, channelsOut: number, mappedPrefix: string): SeparableConvParams => {\n const depthwise_filter = tf.tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1]);\n const pointwise_filter = tf.tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut]);\n const bias = tf.tensor1d(extractWeights(channelsOut));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/depthwise_filter` },\n { paramPath: `${mappedPrefix}/pointwise_filter` },\n { paramPath: `${mappedPrefix}/bias` },\n );\n\n return new SeparableConvParams(\n depthwise_filter,\n pointwise_filter,\n bias,\n );\n };\n}\n\nexport function loadSeparableConvParamsFactory(\n // eslint-disable-next-line no-unused-vars\n extractWeightEntry: (originalPath: string, paramRank: number) => T,\n) {\n return (prefix: string): SeparableConvParams => {\n const depthwise_filter = extractWeightEntry(`${prefix}/depthwise_filter`, 4);\n const pointwise_filter = extractWeightEntry(`${prefix}/pointwise_filter`, 4);\n const bias = extractWeightEntry(`${prefix}/bias`, 1);\n\n return new SeparableConvParams(\n depthwise_filter,\n pointwise_filter,\n bias,\n );\n };\n}\n", "import { isTensor } from '../utils/index';\nimport { ParamMapping } from './types';\n\nexport function extractWeightEntryFactory(weightMap: any, paramMappings: ParamMapping[]) {\n return (originalPath: string, paramRank: number, mappedPath?: string) => {\n const tensor = weightMap[originalPath];\n\n if (!isTensor(tensor, paramRank)) {\n throw new Error(`expected weightMap[${originalPath}] to be a Tensor${paramRank}D, instead have ${tensor}`);\n }\n\n paramMappings.push(\n { originalPath, paramPath: mappedPath || originalPath },\n );\n\n return tensor;\n };\n}\n", "export function extractWeightsFactory(weights: Float32Array) {\n let remainingWeights = weights;\n\n function extractWeights(numWeights: number): Float32Array {\n const ret = remainingWeights.slice(0, numWeights);\n remainingWeights = remainingWeights.slice(numWeights);\n return ret;\n }\n\n function getRemainingWeights(): Float32Array {\n return remainingWeights;\n }\n\n return {\n extractWeights,\n getRemainingWeights,\n };\n}\n", "import { extractConvParamsFactory, extractSeparableConvParamsFactory, ExtractWeightsFunction, ParamMapping } from '../common/index';\nimport { DenseBlock3Params, DenseBlock4Params } from './types';\n\nexport function extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {\n const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings);\n const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings);\n\n function extractDenseBlock3Params(channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer: boolean = false): DenseBlock3Params {\n const conv0 = isFirstLayer\n ? extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv0`)\n : extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/conv0`);\n const conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv1`);\n const conv2 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv2`);\n\n return { conv0, conv1, conv2 };\n }\n\n function extractDenseBlock4Params(channelsIn: number, channelsOut: number, mappedPrefix: string, isFirstLayer: boolean = false): DenseBlock4Params {\n const { conv0, conv1, conv2 } = extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer);\n const conv3 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv3`);\n\n return {\n conv0, conv1, conv2, conv3,\n };\n }\n\n return {\n extractDenseBlock3Params,\n extractDenseBlock4Params,\n };\n}\n", "import { extractWeightsFactory, ParamMapping } from '../common/index';\nimport { extractorsFactory } from './extractorsFactory';\nimport { FaceFeatureExtractorParams } from './types';\n\nexport function extractParams(weights: Float32Array): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const {\n extractDenseBlock4Params,\n } = extractorsFactory(extractWeights, paramMappings);\n\n const dense0 = extractDenseBlock4Params(3, 32, 'dense0', true);\n const dense1 = extractDenseBlock4Params(32, 64, 'dense1');\n const dense2 = extractDenseBlock4Params(64, 128, 'dense2');\n const dense3 = extractDenseBlock4Params(128, 256, 'dense3');\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n paramMappings,\n params: {\n dense0, dense1, dense2, dense3,\n },\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams } from './types';\n\n// eslint-disable-next-line no-unused-vars\nexport function loadConvParamsFactory(extractWeightEntry: (originalPath: string, paramRank: number) => T) {\n return (prefix: string): ConvParams => {\n const filters = extractWeightEntry(`${prefix}/filters`, 4);\n const bias = extractWeightEntry(`${prefix}/bias`, 1);\n\n return { filters, bias };\n };\n}\n", "import { extractWeightEntryFactory, loadSeparableConvParamsFactory, ParamMapping } from '../common/index';\nimport { loadConvParamsFactory } from '../common/loadConvParamsFactory';\nimport { DenseBlock3Params, DenseBlock4Params } from './types';\n\nexport function loadParamsFactory(weightMap: any, paramMappings: ParamMapping[]) {\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n const extractConvParams = loadConvParamsFactory(extractWeightEntry);\n const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry);\n\n function extractDenseBlock3Params(prefix: string, isFirstLayer: boolean = false): DenseBlock3Params {\n const conv0 = isFirstLayer\n ? extractConvParams(`${prefix}/conv0`)\n : extractSeparableConvParams(`${prefix}/conv0`);\n const conv1 = extractSeparableConvParams(`${prefix}/conv1`);\n const conv2 = extractSeparableConvParams(`${prefix}/conv2`);\n\n return { conv0, conv1, conv2 };\n }\n\n function extractDenseBlock4Params(prefix: string, isFirstLayer: boolean = false): DenseBlock4Params {\n const conv0 = isFirstLayer\n ? extractConvParams(`${prefix}/conv0`)\n : extractSeparableConvParams(`${prefix}/conv0`);\n const conv1 = extractSeparableConvParams(`${prefix}/conv1`);\n const conv2 = extractSeparableConvParams(`${prefix}/conv2`);\n const conv3 = extractSeparableConvParams(`${prefix}/conv3`);\n\n return {\n conv0, conv1, conv2, conv3,\n };\n }\n\n return {\n extractDenseBlock3Params,\n extractDenseBlock4Params,\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, ParamMapping } from '../common/index';\nimport { loadParamsFactory } from './loadParamsFactory';\nimport { FaceFeatureExtractorParams } from './types';\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n): { params: FaceFeatureExtractorParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractDenseBlock4Params,\n } = loadParamsFactory(weightMap, paramMappings);\n\n const params = {\n dense0: extractDenseBlock4Params('dense0', true),\n dense1: extractDenseBlock4Params('dense1'),\n dense2: extractDenseBlock4Params('dense2'),\n dense3: extractDenseBlock4Params('dense3'),\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { normalize } from '../ops/index';\nimport { denseBlock4 } from './denseBlock';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { FaceFeatureExtractorParams, IFaceFeatureExtractor } from './types';\n\nexport class FaceFeatureExtractor extends NeuralNetwork implements IFaceFeatureExtractor {\n constructor() {\n super('FaceFeatureExtractor');\n }\n\n public forwardInput(input: NetInput): tf.Tensor4D {\n const { params } = this;\n\n if (!params) {\n throw new Error('FaceFeatureExtractor - load model before inference');\n }\n\n return tf.tidy(() => {\n const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32');\n const meanRgb = [122.782, 117.001, 104.298];\n const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;\n\n let out = denseBlock4(normalized, params.dense0, true);\n out = denseBlock4(out, params.dense1);\n out = denseBlock4(out, params.dense2);\n out = denseBlock4(out, params.dense3);\n out = tf.avgPool(out, [7, 7], [2, 2], 'valid');\n\n return out;\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n protected getDefaultModelName(): string {\n return 'face_feature_extractor_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMap(weightMap);\n }\n\n protected extractParams(weights: Float32Array) {\n return extractParams(weights);\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { FCParams } from './types';\n\nexport function fullyConnectedLayer(\n x: tf.Tensor2D,\n params: FCParams,\n): tf.Tensor2D {\n return tf.tidy(() => tf.add(\n tf.matMul(x, params.weights),\n params.bias,\n ));\n}\n", "import { extractFCParamsFactory, extractWeightsFactory, ParamMapping } from '../common/index';\nimport { NetParams } from './types';\n\nexport function extractParams(weights: Float32Array, channelsIn: number, channelsOut: number): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const extractFCParams = extractFCParamsFactory(extractWeights, paramMappings);\n\n const fc = extractFCParams(channelsIn, channelsOut, 'fc');\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n paramMappings,\n params: { fc },\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, extractWeightEntryFactory, FCParams, ParamMapping } from '../common/index';\nimport { NetParams } from './types';\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n function extractFcParams(prefix: string): FCParams {\n const weights = extractWeightEntry(`${prefix}/weights`, 2);\n const bias = extractWeightEntry(`${prefix}/bias`, 1);\n return { weights, bias };\n }\n\n const params = {\n fc: extractFcParams('fc'),\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nexport function seperateWeightMaps(weightMap: tf.NamedTensorMap) {\n const featureExtractorMap: tf.NamedTensorMap = {};\n const classifierMap: tf.NamedTensorMap = {};\n\n Object.keys(weightMap).forEach((key) => {\n const map = key.startsWith('fc') ? classifierMap : featureExtractorMap;\n map[key] = weightMap[key];\n });\n\n return { featureExtractorMap, classifierMap };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { fullyConnectedLayer } from '../common/fullyConnectedLayer';\nimport { NetInput } from '../dom/index';\nimport { FaceFeatureExtractorParams, IFaceFeatureExtractor, TinyFaceFeatureExtractorParams } from '../faceFeatureExtractor/types';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { NetParams } from './types';\nimport { seperateWeightMaps } from './util';\n\nexport abstract class FaceProcessor<\n TExtractorParams extends FaceFeatureExtractorParams | TinyFaceFeatureExtractorParams\n>\n extends NeuralNetwork {\n protected _faceFeatureExtractor: IFaceFeatureExtractor\n\n constructor(_name: string, faceFeatureExtractor: IFaceFeatureExtractor) {\n super(_name);\n this._faceFeatureExtractor = faceFeatureExtractor;\n }\n\n public get faceFeatureExtractor(): IFaceFeatureExtractor {\n return this._faceFeatureExtractor;\n }\n\n protected abstract getDefaultModelName(): string\n\n protected abstract getClassifierChannelsIn(): number\n\n protected abstract getClassifierChannelsOut(): number\n\n public runNet(input: NetInput | tf.Tensor4D): tf.Tensor2D {\n const { params } = this;\n\n if (!params) {\n throw new Error(`${this._name} - load model before inference`);\n }\n\n return tf.tidy(() => {\n const bottleneckFeatures = input instanceof NetInput\n ? this.faceFeatureExtractor.forwardInput(input)\n : input;\n return fullyConnectedLayer(bottleneckFeatures.as2D(bottleneckFeatures.shape[0], -1), params.fc);\n });\n }\n\n public dispose(throwOnRedispose: boolean = true) {\n this.faceFeatureExtractor.dispose(throwOnRedispose);\n super.dispose(throwOnRedispose);\n }\n\n public loadClassifierParams(weights: Float32Array) {\n const { params, paramMappings } = this.extractClassifierParams(weights);\n this._params = params;\n this._paramMappings = paramMappings;\n }\n\n public extractClassifierParams(weights: Float32Array) {\n return extractParams(weights, this.getClassifierChannelsIn(), this.getClassifierChannelsOut());\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n const { featureExtractorMap, classifierMap } = seperateWeightMaps(weightMap);\n\n this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap);\n\n return extractParamsFromWeightMap(classifierMap);\n }\n\n protected extractParams(weights: Float32Array) {\n const cIn = this.getClassifierChannelsIn();\n const cOut = this.getClassifierChannelsOut();\n const classifierWeightSize = (cOut * cIn) + cOut;\n\n const featureExtractorWeights = weights.slice(0, weights.length - classifierWeightSize);\n const classifierWeights = weights.slice(weights.length - classifierWeightSize);\n\n this.faceFeatureExtractor.extractWeights(featureExtractorWeights);\n return this.extractClassifierParams(classifierWeights);\n }\n}\n", "export const FACE_EXPRESSION_LABELS = ['neutral', 'happy', 'sad', 'angry', 'fearful', 'disgusted', 'surprised'];\n\nexport class FaceExpressions {\n public neutral: number\n\n public happy: number\n\n public sad: number\n\n public angry: number\n\n public fearful: number\n\n public disgusted: number\n\n public surprised: number\n\n constructor(probabilities: number[] | Float32Array) {\n if (probabilities.length !== 7) {\n throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${probabilities.length}`);\n }\n\n FACE_EXPRESSION_LABELS.forEach((expression, idx) => {\n this[expression] = probabilities[idx];\n });\n }\n\n asSortedArray() {\n return FACE_EXPRESSION_LABELS\n .map((expression) => ({ expression, probability: this[expression] as number }))\n .sort((e0, e1) => e1.probability - e0.probability);\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor';\nimport { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types';\nimport { FaceProcessor } from '../faceProcessor/FaceProcessor';\nimport { FaceExpressions } from './FaceExpressions';\n\nexport class FaceExpressionNet extends FaceProcessor {\n constructor(faceFeatureExtractor: FaceFeatureExtractor = new FaceFeatureExtractor()) {\n super('FaceExpressionNet', faceFeatureExtractor);\n }\n\n public forwardInput(input: NetInput | tf.Tensor4D): tf.Tensor2D {\n return tf.tidy(() => tf.softmax(this.runNet(input)));\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n public async predictExpressions(input: TNetInput) {\n const netInput = await toNetInput(input);\n const out = await this.forwardInput(netInput);\n const probabilitesByBatch = await Promise.all(tf.unstack(out).map(async (t) => {\n const data = t.dataSync();\n t.dispose();\n return data;\n }));\n out.dispose();\n\n const predictionsByBatch = probabilitesByBatch\n .map((probabilites) => new FaceExpressions(probabilites as Float32Array));\n\n return netInput.isBatchInput\n ? predictionsByBatch\n : predictionsByBatch[0];\n }\n\n protected getDefaultModelName(): string {\n return 'face_expression_model';\n }\n\n protected getClassifierChannelsIn(): number {\n return 256;\n }\n\n protected getClassifierChannelsOut(): number {\n return 7;\n }\n}\n", "import { FaceExpressions } from '../faceExpressionNet/FaceExpressions';\n\nexport type WithFaceExpressions = TSource & { expressions: FaceExpressions }\n\nexport function isWithFaceExpressions(obj: any): obj is WithFaceExpressions<{}> {\n return obj.expressions instanceof FaceExpressions;\n}\n\nexport function extendWithFaceExpressions(sourceObj: TSource, expressions: FaceExpressions): WithFaceExpressions {\n const extension = { expressions };\n return { ...sourceObj, ...extension };\n}\n", "import { IPoint, Point } from '../classes/index';\nimport { FaceExpressions } from '../faceExpressionNet/index';\nimport { isWithFaceDetection } from '../factories/WithFaceDetection';\nimport { isWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';\nimport { round } from '../utils/index';\nimport { DrawTextField } from './DrawTextField';\n\nexport type DrawFaceExpressionsInput = FaceExpressions | WithFaceExpressions<{}>\n\nexport function drawFaceExpressions(\n canvasArg: string | HTMLCanvasElement,\n faceExpressions: DrawFaceExpressionsInput | Array,\n minConfidence = 0.1,\n textFieldAnchor?: IPoint,\n) {\n const faceExpressionsArray = Array.isArray(faceExpressions) ? faceExpressions : [faceExpressions];\n\n faceExpressionsArray.forEach((e) => {\n // eslint-disable-next-line no-nested-ternary\n const expr = e instanceof FaceExpressions\n ? e\n : (isWithFaceExpressions(e) ? e.expressions : undefined);\n if (!expr) {\n throw new Error('drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof');\n }\n\n const sorted = expr.asSortedArray();\n const resultsToDisplay = sorted.filter((exprLocal) => exprLocal.probability > minConfidence);\n\n const anchor = isWithFaceDetection(e)\n ? e.detection.box.bottomLeft\n : (textFieldAnchor || new Point(0, 0));\n\n const drawTextField = new DrawTextField(\n resultsToDisplay.map((exprLocal) => `${exprLocal.expression} (${round(exprLocal.probability)})`),\n anchor,\n );\n drawTextField.draw(canvasArg);\n });\n}\n", "import { FaceDetection } from '../classes/FaceDetection';\nimport { FaceLandmarks } from '../classes/FaceLandmarks';\nimport { FaceLandmarks68 } from '../classes/FaceLandmarks68';\nimport { isWithFaceDetection, WithFaceDetection } from './WithFaceDetection';\n\nexport type WithFaceLandmarks<\n TSource extends WithFaceDetection<{}>,\n TFaceLandmarks extends FaceLandmarks = FaceLandmarks68 > = TSource & {\n landmarks: TFaceLandmarks,\n unshiftedLandmarks: TFaceLandmarks,\n alignedRect: FaceDetection,\n angle: { roll: number | undefined, pitch: number | undefined, yaw: number | undefined },\n }\n\nexport function isWithFaceLandmarks(obj: any): obj is WithFaceLandmarks, FaceLandmarks> {\n return isWithFaceDetection(obj)\n // eslint-disable-next-line dot-notation\n && obj['landmarks'] instanceof FaceLandmarks\n // eslint-disable-next-line dot-notation\n && obj['unshiftedLandmarks'] instanceof FaceLandmarks\n // eslint-disable-next-line dot-notation\n && obj['alignedRect'] instanceof FaceDetection;\n}\n\nfunction calculateFaceAngle(mesh) {\n // returns the angle in the plane (in radians) between the positive x-axis and the ray from (0,0) to the point (x,y)\n const radians = (a1, a2, b1, b2) => (Math.atan2(b2 - a2, b1 - a1) % Math.PI);\n // convert radians to degrees\n // eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars\n const degrees = (theta) => (theta * 180) / Math.PI;\n\n const angle = { roll: undefined, pitch: undefined, yaw: undefined };\n\n if (!mesh || !mesh._positions || mesh._positions.length !== 68) return angle;\n const pt = mesh._positions;\n\n // values are in radians in range of -pi/2 to pi/2 which is -90 to +90 degrees\n // value of 0 means center\n\n // roll is face lean from left to right\n // comparing x,y of outside corners of leftEye and rightEye\n angle.roll = -radians(pt[36]._x, pt[36]._y, pt[45]._x, pt[45]._y);\n\n // pitch is face turn from left right\n // comparing x distance of top of nose to left and right edge of face\n // precision is lacking since coordinates are not precise enough\n angle.pitch = radians(0, Math.abs(pt[0]._x - pt[30]._x) / pt[30]._x, Math.PI, Math.abs(pt[16]._x - pt[30]._x) / pt[30]._x);\n\n // yaw is face move from up to down\n // comparing size of the box around the face with top and bottom of detected landmarks\n // silly hack, but this gives us face compression on y-axis\n // e.g., tilting head up hides the forehead that doesn't have any landmarks so ratio drops\n const bottom = pt.reduce((prev, cur) => (prev < cur._y ? prev : cur._y), +Infinity);\n const top = pt.reduce((prev, cur) => (prev > cur._y ? prev : cur._y), -Infinity);\n angle.yaw = Math.PI * (mesh._imgDims._height / (top - bottom) / 1.40 - 1);\n\n return angle;\n}\n\nexport function extendWithFaceLandmarks<\n TSource extends WithFaceDetection<{}>,\n TFaceLandmarks extends FaceLandmarks = FaceLandmarks68 >(sourceObj: TSource, unshiftedLandmarks: TFaceLandmarks): WithFaceLandmarks {\n const { box: shift } = sourceObj.detection;\n const landmarks = unshiftedLandmarks.shiftBy(shift.x, shift.y);\n\n const rect = landmarks.align();\n const { imageDims } = sourceObj.detection;\n const alignedRect = new FaceDetection(sourceObj.detection.score, rect.rescale(imageDims.reverse()), imageDims);\n const angle = calculateFaceAngle(unshiftedLandmarks);\n\n const extension = {\n landmarks,\n unshiftedLandmarks,\n alignedRect,\n angle,\n };\n\n return { ...sourceObj, ...extension };\n}\n", "/* eslint-disable max-classes-per-file */\nimport { IPoint } from '../classes/index';\nimport { FaceLandmarks } from '../classes/FaceLandmarks';\nimport { FaceLandmarks68 } from '../classes/FaceLandmarks68';\nimport { getContext2dOrThrow } from '../dom/getContext2dOrThrow';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { isWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';\nimport { drawContour } from './drawContour';\n\nexport interface IDrawFaceLandmarksOptions {\n drawLines?: boolean\n drawPoints?: boolean\n lineWidth?: number\n pointSize?: number\n lineColor?: string\n pointColor?: string\n}\n\nexport class DrawFaceLandmarksOptions {\n public drawLines: boolean\n\n public drawPoints: boolean\n\n public lineWidth: number\n\n public pointSize: number\n\n public lineColor: string\n\n public pointColor: string\n\n constructor(options: IDrawFaceLandmarksOptions = {}) {\n const {\n drawLines = true, drawPoints = true, lineWidth, lineColor, pointSize, pointColor,\n } = options;\n this.drawLines = drawLines;\n this.drawPoints = drawPoints;\n this.lineWidth = lineWidth || 1;\n this.pointSize = pointSize || 2;\n this.lineColor = lineColor || 'rgba(0, 255, 255, 1)';\n this.pointColor = pointColor || 'rgba(255, 0, 255, 1)';\n }\n}\n\nexport class DrawFaceLandmarks {\n public faceLandmarks: FaceLandmarks\n\n public options: DrawFaceLandmarksOptions\n\n constructor(\n faceLandmarks: FaceLandmarks,\n options: IDrawFaceLandmarksOptions = {},\n ) {\n this.faceLandmarks = faceLandmarks;\n this.options = new DrawFaceLandmarksOptions(options);\n }\n\n draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D) {\n const ctx = getContext2dOrThrow(canvasArg);\n\n const {\n drawLines, drawPoints, lineWidth, lineColor, pointSize, pointColor,\n } = this.options;\n\n if (drawLines && this.faceLandmarks instanceof FaceLandmarks68) {\n ctx.strokeStyle = lineColor;\n ctx.lineWidth = lineWidth;\n drawContour(ctx, this.faceLandmarks.getJawOutline());\n drawContour(ctx, this.faceLandmarks.getLeftEyeBrow());\n drawContour(ctx, this.faceLandmarks.getRightEyeBrow());\n drawContour(ctx, this.faceLandmarks.getNose());\n drawContour(ctx, this.faceLandmarks.getLeftEye(), true);\n drawContour(ctx, this.faceLandmarks.getRightEye(), true);\n drawContour(ctx, this.faceLandmarks.getMouth(), true);\n }\n\n if (drawPoints) {\n ctx.strokeStyle = pointColor;\n ctx.fillStyle = pointColor;\n\n const drawPoint = (pt: IPoint) => {\n ctx.beginPath();\n ctx.arc(pt.x, pt.y, pointSize, 0, 2 * Math.PI);\n ctx.fill();\n };\n this.faceLandmarks.positions.forEach(drawPoint);\n }\n }\n}\n\nexport type DrawFaceLandmarksInput = FaceLandmarks | WithFaceLandmarks>\n\nexport function drawFaceLandmarks(\n canvasArg: string | HTMLCanvasElement,\n faceLandmarks: DrawFaceLandmarksInput | Array,\n) {\n const faceLandmarksArray = Array.isArray(faceLandmarks) ? faceLandmarks : [faceLandmarks];\n faceLandmarksArray.forEach((f) => {\n // eslint-disable-next-line no-nested-ternary\n const landmarks = f instanceof FaceLandmarks\n ? f\n : (isWithFaceLandmarks(f) ? f.landmarks : undefined);\n if (!landmarks) {\n throw new Error('drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks> or array thereof');\n }\n\n new DrawFaceLandmarks(landmarks).draw(canvasArg);\n });\n}\n", "import { extractConvParamsFactory, extractSeparableConvParamsFactory, extractWeightsFactory } from '../common/index';\nimport { ExtractWeightsFunction, ParamMapping } from '../common/types';\nimport { range } from '../utils/index';\nimport { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';\n\nfunction extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {\n const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings);\n const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings);\n\n function extractReductionBlockParams(channelsIn: number, channelsOut: number, mappedPrefix: string): ReductionBlockParams {\n const separable_conv0 = extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/separable_conv0`);\n const separable_conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/separable_conv1`);\n const expansion_conv = extractConvParams(channelsIn, channelsOut, 1, `${mappedPrefix}/expansion_conv`);\n\n return { separable_conv0, separable_conv1, expansion_conv };\n }\n\n function extractMainBlockParams(channels: number, mappedPrefix: string): MainBlockParams {\n const separable_conv0 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv0`);\n const separable_conv1 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv1`);\n const separable_conv2 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv2`);\n\n return { separable_conv0, separable_conv1, separable_conv2 };\n }\n\n return {\n extractConvParams,\n extractSeparableConvParams,\n extractReductionBlockParams,\n extractMainBlockParams,\n };\n}\n\nexport function extractParams(weights: Float32Array, numMainBlocks: number): { params: TinyXceptionParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const {\n extractConvParams,\n extractSeparableConvParams,\n extractReductionBlockParams,\n extractMainBlockParams,\n } = extractorsFactory(extractWeights, paramMappings);\n\n const entry_flow_conv_in = extractConvParams(3, 32, 3, 'entry_flow/conv_in');\n const entry_flow_reduction_block_0 = extractReductionBlockParams(32, 64, 'entry_flow/reduction_block_0');\n const entry_flow_reduction_block_1 = extractReductionBlockParams(64, 128, 'entry_flow/reduction_block_1');\n\n const entry_flow = {\n conv_in: entry_flow_conv_in,\n reduction_block_0: entry_flow_reduction_block_0,\n reduction_block_1: entry_flow_reduction_block_1,\n };\n\n const middle_flow = {};\n range(numMainBlocks, 0, 1).forEach((idx) => {\n middle_flow[`main_block_${idx}`] = extractMainBlockParams(128, `middle_flow/main_block_${idx}`);\n });\n\n const exit_flow_reduction_block = extractReductionBlockParams(128, 256, 'exit_flow/reduction_block');\n const exit_flow_separable_conv = extractSeparableConvParams(256, 512, 'exit_flow/separable_conv');\n\n const exit_flow = {\n reduction_block: exit_flow_reduction_block,\n separable_conv: exit_flow_separable_conv,\n };\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n paramMappings,\n params: { entry_flow, middle_flow, exit_flow },\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, extractWeightEntryFactory, loadSeparableConvParamsFactory, ParamMapping } from '../common/index';\nimport { loadConvParamsFactory } from '../common/loadConvParamsFactory';\nimport { range } from '../utils/index';\nimport { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';\n\nfunction loadParamsFactory(weightMap: any, paramMappings: ParamMapping[]) {\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n const extractConvParams = loadConvParamsFactory(extractWeightEntry);\n const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry);\n\n function extractReductionBlockParams(mappedPrefix: string): ReductionBlockParams {\n const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`);\n const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`);\n const expansion_conv = extractConvParams(`${mappedPrefix}/expansion_conv`);\n\n return { separable_conv0, separable_conv1, expansion_conv };\n }\n\n function extractMainBlockParams(mappedPrefix: string): MainBlockParams {\n const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`);\n const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`);\n const separable_conv2 = extractSeparableConvParams(`${mappedPrefix}/separable_conv2`);\n\n return { separable_conv0, separable_conv1, separable_conv2 };\n }\n\n return {\n extractConvParams,\n extractSeparableConvParams,\n extractReductionBlockParams,\n extractMainBlockParams,\n };\n}\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n numMainBlocks: number,\n): { params: TinyXceptionParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractConvParams,\n extractSeparableConvParams,\n extractReductionBlockParams,\n extractMainBlockParams,\n } = loadParamsFactory(weightMap, paramMappings);\n\n const entry_flow_conv_in = extractConvParams('entry_flow/conv_in');\n const entry_flow_reduction_block_0 = extractReductionBlockParams('entry_flow/reduction_block_0');\n const entry_flow_reduction_block_1 = extractReductionBlockParams('entry_flow/reduction_block_1');\n\n const entry_flow = {\n conv_in: entry_flow_conv_in,\n reduction_block_0: entry_flow_reduction_block_0,\n reduction_block_1: entry_flow_reduction_block_1,\n };\n\n const middle_flow = {};\n range(numMainBlocks, 0, 1).forEach((idx) => {\n middle_flow[`main_block_${idx}`] = extractMainBlockParams(`middle_flow/main_block_${idx}`);\n });\n\n const exit_flow_reduction_block = extractReductionBlockParams('exit_flow/reduction_block');\n const exit_flow_separable_conv = extractSeparableConvParams('exit_flow/separable_conv');\n\n const exit_flow = {\n reduction_block: exit_flow_reduction_block,\n separable_conv: exit_flow_separable_conv,\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params: { entry_flow, middle_flow, exit_flow }, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams, depthwiseSeparableConv } from '../common/index';\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { normalize } from '../ops/index';\nimport { range } from '../utils/index';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { MainBlockParams, ReductionBlockParams, TinyXceptionParams } from './types';\n\nfunction conv(x: tf.Tensor4D, params: ConvParams, stride: [number, number]): tf.Tensor4D {\n return tf.add(tf.conv2d(x, params.filters, stride, 'same'), params.bias);\n}\n\nfunction reductionBlock(x: tf.Tensor4D, params: ReductionBlockParams, isActivateInput: boolean = true): tf.Tensor4D {\n let out = isActivateInput ? tf.relu(x) : x;\n out = depthwiseSeparableConv(out, params.separable_conv0, [1, 1]);\n out = depthwiseSeparableConv(tf.relu(out), params.separable_conv1, [1, 1]);\n out = tf.maxPool(out, [3, 3], [2, 2], 'same');\n out = tf.add(out, conv(x, params.expansion_conv, [2, 2]));\n return out;\n}\n\nfunction mainBlock(x: tf.Tensor4D, params: MainBlockParams): tf.Tensor4D {\n let out = depthwiseSeparableConv(tf.relu(x), params.separable_conv0, [1, 1]);\n out = depthwiseSeparableConv(tf.relu(out), params.separable_conv1, [1, 1]);\n out = depthwiseSeparableConv(tf.relu(out), params.separable_conv2, [1, 1]);\n out = tf.add(out, x);\n return out;\n}\n\nexport class TinyXception extends NeuralNetwork {\n private _numMainBlocks: number\n\n constructor(numMainBlocks: number) {\n super('TinyXception');\n this._numMainBlocks = numMainBlocks;\n }\n\n public forwardInput(input: NetInput): tf.Tensor4D {\n const { params } = this;\n if (!params) {\n throw new Error('TinyXception - load model before inference');\n }\n return tf.tidy(() => {\n const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32');\n const meanRgb = [122.782, 117.001, 104.298];\n const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;\n let out = tf.relu(conv(normalized, params.entry_flow.conv_in, [2, 2]));\n out = reductionBlock(out, params.entry_flow.reduction_block_0, false);\n out = reductionBlock(out, params.entry_flow.reduction_block_1);\n range(this._numMainBlocks, 0, 1).forEach((idx) => {\n out = mainBlock(out, params.middle_flow[`main_block_${idx}`]);\n });\n out = reductionBlock(out, params.exit_flow.reduction_block);\n out = tf.relu(depthwiseSeparableConv(out, params.exit_flow.separable_conv, [1, 1]));\n return out;\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n protected getDefaultModelName(): string {\n return 'tiny_xception_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMap(weightMap, this._numMainBlocks);\n }\n\n protected extractParams(weights: Float32Array) {\n return extractParams(weights, this._numMainBlocks);\n }\n}\n", "import { extractFCParamsFactory, extractWeightsFactory, ParamMapping } from '../common/index';\nimport { NetParams } from './types';\n\nexport function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const extractFCParams = extractFCParamsFactory(extractWeights, paramMappings);\n\n const age = extractFCParams(512, 1, 'fc/age');\n const gender = extractFCParams(512, 2, 'fc/gender');\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n paramMappings,\n params: { fc: { age, gender } },\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, extractWeightEntryFactory, FCParams, ParamMapping } from '../common/index';\nimport { NetParams } from './types';\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n function extractFcParams(prefix: string): FCParams {\n const weights = extractWeightEntry(`${prefix}/weights`, 2);\n const bias = extractWeightEntry(`${prefix}/bias`, 1);\n return { weights, bias };\n }\n\n const params = {\n fc: {\n age: extractFcParams('fc/age'),\n gender: extractFcParams('fc/gender'),\n },\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { FCParams } from '../common/index';\n\n// eslint-disable-next-line no-shadow\nexport enum Gender {\n // eslint-disable-next-line no-unused-vars\n FEMALE = 'female',\n // eslint-disable-next-line no-unused-vars\n MALE = 'male'\n}\n\nexport type AgeAndGenderPrediction = {\n age: number\n gender: Gender\n genderProbability: number\n}\n\nexport type NetOutput = { age: tf.Tensor1D, gender: tf.Tensor2D }\n\nexport type NetParams = {\n fc: {\n age: FCParams\n gender: FCParams\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { fullyConnectedLayer } from '../common/fullyConnectedLayer';\nimport { seperateWeightMaps } from '../faceProcessor/util';\nimport { TinyXception } from '../xception/TinyXception';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { AgeAndGenderPrediction, Gender, NetOutput, NetParams } from './types';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\n\nexport class AgeGenderNet extends NeuralNetwork {\n private _faceFeatureExtractor: TinyXception\n\n constructor(faceFeatureExtractor: TinyXception = new TinyXception(2)) {\n super('AgeGenderNet');\n this._faceFeatureExtractor = faceFeatureExtractor;\n }\n\n public get faceFeatureExtractor(): TinyXception {\n return this._faceFeatureExtractor;\n }\n\n public runNet(input: NetInput | tf.Tensor4D): NetOutput {\n const { params } = this;\n\n if (!params) {\n throw new Error(`${this._name} - load model before inference`);\n }\n\n return tf.tidy(() => {\n const bottleneckFeatures = input instanceof NetInput\n ? this.faceFeatureExtractor.forwardInput(input)\n : input;\n\n const pooled = tf.avgPool(bottleneckFeatures, [7, 7], [2, 2], 'valid').as2D(bottleneckFeatures.shape[0], -1);\n const age = fullyConnectedLayer(pooled, params.fc.age).as1D();\n const gender = fullyConnectedLayer(pooled, params.fc.gender);\n return { age, gender };\n });\n }\n\n public forwardInput(input: NetInput | tf.Tensor4D): NetOutput {\n return tf.tidy(() => {\n const { age, gender } = this.runNet(input);\n return { age, gender: tf.softmax(gender) };\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n public async predictAgeAndGender(input: TNetInput): Promise {\n const netInput = await toNetInput(input);\n const out = await this.forwardInput(netInput);\n\n const ages = tf.unstack(out.age);\n const genders = tf.unstack(out.gender);\n const ageAndGenderTensors = ages.map((ageTensor, i) => ({\n ageTensor,\n genderTensor: genders[i],\n }));\n\n const predictionsByBatch = await Promise.all(\n ageAndGenderTensors.map(async ({ ageTensor, genderTensor }) => {\n const age = (ageTensor.dataSync())[0];\n const probMale = (genderTensor.dataSync())[0];\n const isMale = probMale > 0.5;\n const gender = isMale ? Gender.MALE : Gender.FEMALE;\n const genderProbability = isMale ? probMale : (1 - probMale);\n\n ageTensor.dispose();\n genderTensor.dispose();\n return { age, gender, genderProbability };\n }),\n );\n out.age.dispose();\n out.gender.dispose();\n\n return netInput.isBatchInput ? predictionsByBatch as AgeAndGenderPrediction[] : predictionsByBatch[0] as AgeAndGenderPrediction;\n }\n\n protected getDefaultModelName(): string {\n return 'age_gender_model';\n }\n\n public dispose(throwOnRedispose: boolean = true) {\n this.faceFeatureExtractor.dispose(throwOnRedispose);\n super.dispose(throwOnRedispose);\n }\n\n public loadClassifierParams(weights: Float32Array) {\n const { params, paramMappings } = this.extractClassifierParams(weights);\n this._params = params;\n this._paramMappings = paramMappings;\n }\n\n public extractClassifierParams(weights: Float32Array) {\n return extractParams(weights);\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n const { featureExtractorMap, classifierMap } = seperateWeightMaps(weightMap);\n\n this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap);\n\n return extractParamsFromWeightMap(classifierMap);\n }\n\n protected extractParams(weights: Float32Array) {\n const classifierWeightSize = (512 * 1 + 1) + (512 * 2 + 2);\n\n const featureExtractorWeights = weights.slice(0, weights.length - classifierWeightSize);\n const classifierWeights = weights.slice(weights.length - classifierWeightSize);\n\n this.faceFeatureExtractor.extractWeights(featureExtractorWeights);\n return this.extractClassifierParams(classifierWeights);\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { IDimensions, Point } from '../classes/index';\nimport { FaceLandmarks68 } from '../classes/FaceLandmarks68';\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { FaceFeatureExtractorParams, TinyFaceFeatureExtractorParams } from '../faceFeatureExtractor/types';\nimport { FaceProcessor } from '../faceProcessor/FaceProcessor';\nimport { isEven } from '../utils/index';\n\nexport abstract class FaceLandmark68NetBase<\n TExtractorParams extends FaceFeatureExtractorParams | TinyFaceFeatureExtractorParams\n>\n extends FaceProcessor {\n public postProcess(output: tf.Tensor2D, inputSize: number, originalDimensions: IDimensions[]): tf.Tensor2D {\n const inputDimensions = originalDimensions.map(({ width, height }) => {\n const scale = inputSize / Math.max(height, width);\n return {\n width: width * scale,\n height: height * scale,\n };\n });\n\n const batchSize = inputDimensions.length;\n\n return tf.tidy(() => {\n const createInterleavedTensor = (fillX: number, fillY: number) => tf.stack([tf.fill([68], fillX, 'float32'), tf.fill([68], fillY, 'float32')], 1).as2D(1, 136).as1D();\n\n // eslint-disable-next-line no-unused-vars\n const getPadding = (batchIdx: number, cond: (w: number, h: number) => boolean): number => {\n const { width, height } = inputDimensions[batchIdx];\n return cond(width, height) ? Math.abs(width - height) / 2 : 0;\n };\n\n const getPaddingX = (batchIdx: number) => getPadding(batchIdx, (w, h) => w < h);\n const getPaddingY = (batchIdx: number) => getPadding(batchIdx, (w, h) => h < w);\n\n const landmarkTensors = output\n .mul(tf.fill([batchSize, 136], inputSize, 'float32'))\n .sub(tf.stack(Array.from(Array(batchSize), (_, batchIdx) => createInterleavedTensor(\n getPaddingX(batchIdx),\n getPaddingY(batchIdx),\n ))))\n .div(tf.stack(Array.from(Array(batchSize), (_, batchIdx) => createInterleavedTensor(\n inputDimensions[batchIdx].width,\n inputDimensions[batchIdx].height,\n ))));\n\n return landmarkTensors as tf.Tensor2D;\n });\n }\n\n public forwardInput(input: NetInput): tf.Tensor2D {\n return tf.tidy(() => {\n const out = this.runNet(input);\n return this.postProcess(\n out,\n input.inputSize as number,\n input.inputDimensions.map(([height, width]) => ({ height, width })),\n );\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n public async detectLandmarks(input: TNetInput): Promise {\n const netInput = await toNetInput(input);\n const landmarkTensors = tf.tidy(\n () => tf.unstack(this.forwardInput(netInput)),\n );\n\n const landmarksForBatch = await Promise.all(landmarkTensors.map(\n async (landmarkTensor, batchIdx) => {\n const landmarksArray = Array.from(landmarkTensor.dataSync());\n const xCoords = landmarksArray.filter((_, i) => isEven(i));\n const yCoords = landmarksArray.filter((_, i) => !isEven(i));\n\n return new FaceLandmarks68(\n Array(68).fill(0).map((_, i) => new Point(xCoords[i] as number, yCoords[i] as number)),\n {\n height: netInput.getInputHeight(batchIdx),\n width: netInput.getInputWidth(batchIdx),\n },\n );\n },\n ));\n\n landmarkTensors.forEach((t) => t.dispose());\n\n return netInput.isBatchInput ? landmarksForBatch as FaceLandmarks68[] : landmarksForBatch[0] as FaceLandmarks68;\n }\n\n protected getClassifierChannelsOut(): number {\n return 136;\n }\n}\n", "import { FaceFeatureExtractor } from '../faceFeatureExtractor/FaceFeatureExtractor';\nimport { FaceFeatureExtractorParams } from '../faceFeatureExtractor/types';\nimport { FaceLandmark68NetBase } from './FaceLandmark68NetBase';\n\nexport class FaceLandmark68Net extends FaceLandmark68NetBase {\n constructor(faceFeatureExtractor: FaceFeatureExtractor = new FaceFeatureExtractor()) {\n super('FaceLandmark68Net', faceFeatureExtractor);\n }\n\n protected getDefaultModelName(): string {\n return 'face_landmark_68_model';\n }\n\n protected getClassifierChannelsIn(): number {\n return 256;\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, ParamMapping } from '../common/index';\nimport { loadParamsFactory } from './loadParamsFactory';\nimport { TinyFaceFeatureExtractorParams } from './types';\n\nexport function extractParamsFromWeightMapTiny(\n weightMap: tf.NamedTensorMap,\n): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractDenseBlock3Params,\n } = loadParamsFactory(weightMap, paramMappings);\n\n const params = {\n dense0: extractDenseBlock3Params('dense0', true),\n dense1: extractDenseBlock3Params('dense1'),\n dense2: extractDenseBlock3Params('dense2'),\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params, paramMappings };\n}\n", "import { extractWeightsFactory, ParamMapping } from '../common/index';\nimport { extractorsFactory } from './extractorsFactory';\nimport { TinyFaceFeatureExtractorParams } from './types';\n\nexport function extractParamsTiny(weights: Float32Array): { params: TinyFaceFeatureExtractorParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const {\n extractDenseBlock3Params,\n } = extractorsFactory(extractWeights, paramMappings);\n\n const dense0 = extractDenseBlock3Params(3, 32, 'dense0', true);\n const dense1 = extractDenseBlock3Params(32, 64, 'dense1');\n const dense2 = extractDenseBlock3Params(64, 128, 'dense2');\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n paramMappings,\n params: { dense0, dense1, dense2 },\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { normalize } from '../ops/index';\nimport { denseBlock3 } from './denseBlock';\nimport { extractParamsFromWeightMapTiny } from './extractParamsFromWeightMapTiny';\nimport { extractParamsTiny } from './extractParamsTiny';\nimport { IFaceFeatureExtractor, TinyFaceFeatureExtractorParams } from './types';\n\nexport class TinyFaceFeatureExtractor extends NeuralNetwork implements IFaceFeatureExtractor {\n constructor() {\n super('TinyFaceFeatureExtractor');\n }\n\n public forwardInput(input: NetInput): tf.Tensor4D {\n const { params } = this;\n\n if (!params) {\n throw new Error('TinyFaceFeatureExtractor - load model before inference');\n }\n\n return tf.tidy(() => {\n const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32');\n const meanRgb = [122.782, 117.001, 104.298];\n const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;\n\n let out = denseBlock3(normalized, params.dense0, true);\n out = denseBlock3(out, params.dense1);\n out = denseBlock3(out, params.dense2);\n out = tf.avgPool(out, [14, 14], [2, 2], 'valid');\n\n return out;\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n protected getDefaultModelName(): string {\n return 'face_feature_extractor_tiny_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMapTiny(weightMap);\n }\n\n protected extractParams(weights: Float32Array) {\n return extractParamsTiny(weights);\n }\n}\n", "import { TinyFaceFeatureExtractor } from '../faceFeatureExtractor/TinyFaceFeatureExtractor';\nimport { TinyFaceFeatureExtractorParams } from '../faceFeatureExtractor/types';\nimport { FaceLandmark68NetBase } from './FaceLandmark68NetBase';\n\nexport class FaceLandmark68TinyNet extends FaceLandmark68NetBase {\n constructor(faceFeatureExtractor: TinyFaceFeatureExtractor = new TinyFaceFeatureExtractor()) {\n super('FaceLandmark68TinyNet', faceFeatureExtractor);\n }\n\n protected getDefaultModelName(): string {\n return 'face_landmark_68_tiny_model';\n }\n\n protected getClassifierChannelsIn(): number {\n return 128;\n }\n}\n", "import { FaceLandmark68Net } from './FaceLandmark68Net';\n\nexport * from './FaceLandmark68Net';\nexport * from './FaceLandmark68TinyNet';\nexport class FaceLandmarkNet extends FaceLandmark68Net {}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ScaleLayerParams } from './types';\n\nexport function scale(x: tf.Tensor4D, params: ScaleLayerParams): tf.Tensor4D {\n return tf.add(tf.mul(x, params.weights), params.biases);\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { scale } from './scaleLayer';\nimport { ConvLayerParams } from './types';\n\nfunction convLayer(\n x: tf.Tensor4D,\n params: ConvLayerParams,\n strides: [number, number],\n withRelu: boolean,\n padding: 'valid' | 'same' = 'same',\n): tf.Tensor4D {\n const { filters, bias } = params.conv;\n\n let out = tf.conv2d(x, filters, strides, padding);\n out = tf.add(out, bias);\n out = scale(out, params.scale);\n return withRelu ? tf.relu(out) : out;\n}\n\nexport function conv(x: tf.Tensor4D, params: ConvLayerParams) {\n return convLayer(x, params, [1, 1], true);\n}\n\nexport function convNoRelu(x: tf.Tensor4D, params: ConvLayerParams) {\n return convLayer(x, params, [1, 1], false);\n}\n\nexport function convDown(x: tf.Tensor4D, params: ConvLayerParams) {\n return convLayer(x, params, [2, 2], true, 'valid');\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams, extractWeightsFactory, ExtractWeightsFunction, ParamMapping } from '../common/index';\nimport { isFloat } from '../utils/index';\nimport { ConvLayerParams, NetParams, ResidualLayerParams, ScaleLayerParams } from './types';\n\nfunction extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {\n function extractFilterValues(numFilterValues: number, numFilters: number, filterSize: number): tf.Tensor4D {\n const weights = extractWeights(numFilterValues);\n const depth = weights.length / (numFilters * filterSize * filterSize);\n\n if (isFloat(depth)) {\n throw new Error(`depth has to be an integer: ${depth}, weights.length: ${weights.length}, numFilters: ${numFilters}, filterSize: ${filterSize}`);\n }\n\n return tf.tidy(\n () => tf.transpose(\n tf.tensor4d(weights, [numFilters, depth, filterSize, filterSize]),\n [2, 3, 1, 0],\n ),\n );\n }\n\n function extractConvParams(\n numFilterValues: number,\n numFilters: number,\n filterSize: number,\n mappedPrefix: string,\n ): ConvParams {\n const filters = extractFilterValues(numFilterValues, numFilters, filterSize);\n const bias = tf.tensor1d(extractWeights(numFilters));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/filters` },\n { paramPath: `${mappedPrefix}/bias` },\n );\n\n return { filters, bias };\n }\n\n function extractScaleLayerParams(numWeights: number, mappedPrefix: string): ScaleLayerParams {\n const weights = tf.tensor1d(extractWeights(numWeights));\n const biases = tf.tensor1d(extractWeights(numWeights));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/weights` },\n { paramPath: `${mappedPrefix}/biases` },\n );\n\n return {\n weights,\n biases,\n };\n }\n\n function extractConvLayerParams(\n numFilterValues: number,\n numFilters: number,\n filterSize: number,\n mappedPrefix: string,\n ): ConvLayerParams {\n const conv = extractConvParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv`);\n const scale = extractScaleLayerParams(numFilters, `${mappedPrefix}/scale`);\n\n return { conv, scale };\n }\n\n function extractResidualLayerParams(\n numFilterValues: number,\n numFilters: number,\n filterSize: number,\n mappedPrefix: string,\n isDown: boolean = false,\n ): ResidualLayerParams {\n const conv1 = extractConvLayerParams((isDown ? 0.5 : 1) * numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv1`);\n const conv2 = extractConvLayerParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv2`);\n\n return { conv1, conv2 };\n }\n\n return {\n extractConvLayerParams,\n extractResidualLayerParams,\n };\n}\n\nexport function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractConvLayerParams,\n extractResidualLayerParams,\n } = extractorsFactory(extractWeights, paramMappings);\n\n const conv32_down = extractConvLayerParams(4704, 32, 7, 'conv32_down');\n const conv32_1 = extractResidualLayerParams(9216, 32, 3, 'conv32_1');\n const conv32_2 = extractResidualLayerParams(9216, 32, 3, 'conv32_2');\n const conv32_3 = extractResidualLayerParams(9216, 32, 3, 'conv32_3');\n\n const conv64_down = extractResidualLayerParams(36864, 64, 3, 'conv64_down', true);\n const conv64_1 = extractResidualLayerParams(36864, 64, 3, 'conv64_1');\n const conv64_2 = extractResidualLayerParams(36864, 64, 3, 'conv64_2');\n const conv64_3 = extractResidualLayerParams(36864, 64, 3, 'conv64_3');\n\n const conv128_down = extractResidualLayerParams(147456, 128, 3, 'conv128_down', true);\n const conv128_1 = extractResidualLayerParams(147456, 128, 3, 'conv128_1');\n const conv128_2 = extractResidualLayerParams(147456, 128, 3, 'conv128_2');\n\n const conv256_down = extractResidualLayerParams(589824, 256, 3, 'conv256_down', true);\n const conv256_1 = extractResidualLayerParams(589824, 256, 3, 'conv256_1');\n const conv256_2 = extractResidualLayerParams(589824, 256, 3, 'conv256_2');\n const conv256_down_out = extractResidualLayerParams(589824, 256, 3, 'conv256_down_out');\n\n const fc = tf.tidy(\n () => tf.transpose(tf.tensor2d(extractWeights(256 * 128), [128, 256]), [1, 0]),\n );\n paramMappings.push({ paramPath: 'fc' });\n\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n const params = {\n conv32_down,\n conv32_1,\n conv32_2,\n conv32_3,\n conv64_down,\n conv64_1,\n conv64_2,\n conv64_3,\n conv128_down,\n conv128_1,\n conv128_2,\n conv256_down,\n conv256_1,\n conv256_2,\n conv256_down_out,\n fc,\n };\n\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { disposeUnusedWeightTensors, extractWeightEntryFactory, ParamMapping } from '../common/index';\nimport { isTensor2D } from '../utils/index';\nimport { ConvLayerParams, NetParams, ResidualLayerParams, ScaleLayerParams } from './types';\n\nfunction extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n function extractScaleLayerParams(prefix: string): ScaleLayerParams {\n const weights = extractWeightEntry(`${prefix}/scale/weights`, 1);\n const biases = extractWeightEntry(`${prefix}/scale/biases`, 1);\n\n return { weights, biases };\n }\n\n function extractConvLayerParams(prefix: string): ConvLayerParams {\n const filters = extractWeightEntry(`${prefix}/conv/filters`, 4);\n const bias = extractWeightEntry(`${prefix}/conv/bias`, 1);\n const scale = extractScaleLayerParams(prefix);\n\n return { conv: { filters, bias }, scale };\n }\n\n function extractResidualLayerParams(prefix: string): ResidualLayerParams {\n return {\n conv1: extractConvLayerParams(`${prefix}/conv1`),\n conv2: extractConvLayerParams(`${prefix}/conv2`),\n };\n }\n\n return {\n extractConvLayerParams,\n extractResidualLayerParams,\n };\n}\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractConvLayerParams,\n extractResidualLayerParams,\n } = extractorsFactory(weightMap, paramMappings);\n\n const conv32_down = extractConvLayerParams('conv32_down');\n const conv32_1 = extractResidualLayerParams('conv32_1');\n const conv32_2 = extractResidualLayerParams('conv32_2');\n const conv32_3 = extractResidualLayerParams('conv32_3');\n\n const conv64_down = extractResidualLayerParams('conv64_down');\n const conv64_1 = extractResidualLayerParams('conv64_1');\n const conv64_2 = extractResidualLayerParams('conv64_2');\n const conv64_3 = extractResidualLayerParams('conv64_3');\n\n const conv128_down = extractResidualLayerParams('conv128_down');\n const conv128_1 = extractResidualLayerParams('conv128_1');\n const conv128_2 = extractResidualLayerParams('conv128_2');\n\n const conv256_down = extractResidualLayerParams('conv256_down');\n const conv256_1 = extractResidualLayerParams('conv256_1');\n const conv256_2 = extractResidualLayerParams('conv256_2');\n const conv256_down_out = extractResidualLayerParams('conv256_down_out');\n\n const { fc } = weightMap;\n paramMappings.push({ originalPath: 'fc', paramPath: 'fc' });\n\n if (!isTensor2D(fc)) {\n throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${fc}`);\n }\n\n const params = {\n conv32_down,\n conv32_1,\n conv32_2,\n conv32_3,\n conv64_down,\n conv64_1,\n conv64_2,\n conv64_3,\n conv128_down,\n conv128_1,\n conv128_2,\n conv256_down,\n conv256_1,\n conv256_2,\n conv256_down_out,\n fc,\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { conv, convDown, convNoRelu } from './convLayer';\nimport { ResidualLayerParams } from './types';\n\nexport function residual(x: tf.Tensor4D, params: ResidualLayerParams): tf.Tensor4D {\n let out = conv(x, params.conv1);\n out = convNoRelu(out, params.conv2);\n out = tf.add(out, x);\n out = tf.relu(out);\n return out;\n}\n\nexport function residualDown(x: tf.Tensor4D, params: ResidualLayerParams): tf.Tensor4D {\n let out = convDown(x, params.conv1);\n out = convNoRelu(out, params.conv2);\n\n let pooled = tf.avgPool(x, 2, 2, 'valid') as tf.Tensor4D;\n const zeros = tf.zeros(pooled.shape);\n const isPad = pooled.shape[3] !== out.shape[3];\n const isAdjustShape = pooled.shape[1] !== out.shape[1] || pooled.shape[2] !== out.shape[2];\n\n if (isAdjustShape) {\n const padShapeX = [...out.shape] as [number, number, number, number];\n padShapeX[1] = 1;\n const zerosW = tf.zeros(padShapeX);\n out = tf.concat([out, zerosW], 1);\n\n const padShapeY = [...out.shape] as [number, number, number, number];\n padShapeY[2] = 1;\n const zerosH = tf.zeros(padShapeY);\n out = tf.concat([out, zerosH], 2);\n }\n\n pooled = isPad ? tf.concat([pooled, zeros], 3) : pooled;\n out = tf.add(pooled, out) as tf.Tensor4D;\n\n out = tf.relu(out);\n return out;\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { normalize } from '../ops/index';\nimport { convDown } from './convLayer';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { residual, residualDown } from './residualLayer';\nimport { NetParams } from './types';\n\nexport class FaceRecognitionNet extends NeuralNetwork {\n constructor() {\n super('FaceRecognitionNet');\n }\n\n public forwardInput(input: NetInput): tf.Tensor2D {\n const { params } = this;\n\n if (!params) {\n throw new Error('FaceRecognitionNet - load model before inference');\n }\n\n return tf.tidy(() => {\n const batchTensor = tf.cast(input.toBatchTensor(150, true), 'float32');\n\n const meanRgb = [122.782, 117.001, 104.298];\n const normalized = normalize(batchTensor, meanRgb).div(255) as tf.Tensor4D;\n\n let out = convDown(normalized, params.conv32_down);\n out = tf.maxPool(out, 3, 2, 'valid');\n\n out = residual(out, params.conv32_1);\n out = residual(out, params.conv32_2);\n out = residual(out, params.conv32_3);\n\n out = residualDown(out, params.conv64_down);\n out = residual(out, params.conv64_1);\n out = residual(out, params.conv64_2);\n out = residual(out, params.conv64_3);\n\n out = residualDown(out, params.conv128_down);\n out = residual(out, params.conv128_1);\n out = residual(out, params.conv128_2);\n\n out = residualDown(out, params.conv256_down);\n out = residual(out, params.conv256_1);\n out = residual(out, params.conv256_2);\n out = residualDown(out, params.conv256_down_out);\n\n const globalAvg = out.mean([1, 2]) as tf.Tensor2D;\n const fullyConnected = tf.matMul(globalAvg, params.fc);\n\n return fullyConnected;\n });\n }\n\n public async forward(input: TNetInput): Promise {\n return this.forwardInput(await toNetInput(input));\n }\n\n public async computeFaceDescriptor(input: TNetInput): Promise {\n if (input?.shape?.some((dim) => dim <= 0)) return new Float32Array(128);\n const netInput = await toNetInput(input);\n const faceDescriptorTensors = tf.tidy(() => tf.unstack(this.forwardInput(netInput)));\n const faceDescriptorsForBatch = await Promise.all(faceDescriptorTensors.map((t) => t.data())) as Float32Array[];\n faceDescriptorTensors.forEach((t) => t.dispose());\n return netInput.isBatchInput ? faceDescriptorsForBatch : faceDescriptorsForBatch[0];\n }\n\n protected getDefaultModelName(): string {\n return 'face_recognition_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMap(weightMap);\n }\n\n protected extractParams(weights: Float32Array) {\n return extractParams(weights);\n }\n}\n", "import { FaceRecognitionNet } from './FaceRecognitionNet';\n\nexport * from './FaceRecognitionNet';\n\nexport function createFaceRecognitionNet(weights: Float32Array) {\n const net = new FaceRecognitionNet();\n net.extractWeights(weights);\n return net;\n}\n", "export type WithFaceDescriptor = TSource & {\n descriptor: Float32Array\n}\n\nexport function extendWithFaceDescriptor<\n TSource\n>(\n sourceObj: TSource,\n descriptor: Float32Array,\n): WithFaceDescriptor {\n const extension = { descriptor };\n return { ...sourceObj, ...extension };\n}\n", "export type WithAge = TSource & {\n age: number\n}\n\nexport function isWithAge(obj: any): obj is WithAge<{}> {\n return typeof obj.age === 'number';\n}\n\nexport function extendWithAge<\n TSource\n>(\n sourceObj: TSource,\n age: number,\n): WithAge {\n const extension = { age };\n return { ...sourceObj, ...extension };\n}\n", "import { Gender } from '../ageGenderNet/types';\nimport { isValidProbablitiy } from '../utils/index';\n\nexport type WithGender = TSource & {\n gender: Gender\n genderProbability: number\n}\n\nexport function isWithGender(obj: any): obj is WithGender<{}> {\n return (obj.gender === Gender.MALE || obj.gender === Gender.FEMALE)\n && isValidProbablitiy(obj.genderProbability);\n}\n\nexport function extendWithGender<\n TSource\n>(\n sourceObj: TSource,\n gender: Gender,\n genderProbability: number,\n): WithGender {\n const extension = { gender, genderProbability };\n return { ...sourceObj, ...extension };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ExtractWeightsFunction, ParamMapping, ConvParams, extractWeightsFactory } from '../common/index';\nimport { MobileNetV1, NetParams, PointwiseConvParams, PredictionLayerParams } from './types';\n\nfunction extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {\n function extractDepthwiseConvParams(numChannels: number, mappedPrefix: string): MobileNetV1.DepthwiseConvParams {\n const filters = tf.tensor4d(extractWeights(3 * 3 * numChannels), [3, 3, numChannels, 1]);\n const batch_norm_scale = tf.tensor1d(extractWeights(numChannels));\n const batch_norm_offset = tf.tensor1d(extractWeights(numChannels));\n const batch_norm_mean = tf.tensor1d(extractWeights(numChannels));\n const batch_norm_variance = tf.tensor1d(extractWeights(numChannels));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/filters` },\n { paramPath: `${mappedPrefix}/batch_norm_scale` },\n { paramPath: `${mappedPrefix}/batch_norm_offset` },\n { paramPath: `${mappedPrefix}/batch_norm_mean` },\n { paramPath: `${mappedPrefix}/batch_norm_variance` },\n );\n\n return {\n filters,\n batch_norm_scale,\n batch_norm_offset,\n batch_norm_mean,\n batch_norm_variance,\n };\n }\n\n function extractConvParams(\n channelsIn: number,\n channelsOut: number,\n filterSize: number,\n mappedPrefix: string,\n isPointwiseConv?: boolean,\n ): ConvParams {\n const filters = tf.tensor4d(\n extractWeights(channelsIn * channelsOut * filterSize * filterSize),\n [filterSize, filterSize, channelsIn, channelsOut],\n );\n const bias = tf.tensor1d(extractWeights(channelsOut));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/filters` },\n { paramPath: `${mappedPrefix}/${isPointwiseConv ? 'batch_norm_offset' : 'bias'}` },\n );\n\n return { filters, bias };\n }\n\n function extractPointwiseConvParams(\n channelsIn: number,\n channelsOut: number,\n filterSize: number,\n mappedPrefix: string,\n ): PointwiseConvParams {\n const {\n filters,\n bias,\n } = extractConvParams(channelsIn, channelsOut, filterSize, mappedPrefix, true);\n\n return {\n filters,\n batch_norm_offset: bias,\n };\n }\n\n function extractConvPairParams(\n channelsIn: number,\n channelsOut: number,\n mappedPrefix: string,\n ): MobileNetV1.ConvPairParams {\n const depthwise_conv = extractDepthwiseConvParams(channelsIn, `${mappedPrefix}/depthwise_conv`);\n const pointwise_conv = extractPointwiseConvParams(channelsIn, channelsOut, 1, `${mappedPrefix}/pointwise_conv`);\n\n return { depthwise_conv, pointwise_conv };\n }\n\n function extractMobilenetV1Params(): MobileNetV1.Params {\n const conv_0 = extractPointwiseConvParams(3, 32, 3, 'mobilenetv1/conv_0');\n const conv_1 = extractConvPairParams(32, 64, 'mobilenetv1/conv_1');\n const conv_2 = extractConvPairParams(64, 128, 'mobilenetv1/conv_2');\n const conv_3 = extractConvPairParams(128, 128, 'mobilenetv1/conv_3');\n const conv_4 = extractConvPairParams(128, 256, 'mobilenetv1/conv_4');\n const conv_5 = extractConvPairParams(256, 256, 'mobilenetv1/conv_5');\n const conv_6 = extractConvPairParams(256, 512, 'mobilenetv1/conv_6');\n const conv_7 = extractConvPairParams(512, 512, 'mobilenetv1/conv_7');\n const conv_8 = extractConvPairParams(512, 512, 'mobilenetv1/conv_8');\n const conv_9 = extractConvPairParams(512, 512, 'mobilenetv1/conv_9');\n const conv_10 = extractConvPairParams(512, 512, 'mobilenetv1/conv_10');\n const conv_11 = extractConvPairParams(512, 512, 'mobilenetv1/conv_11');\n const conv_12 = extractConvPairParams(512, 1024, 'mobilenetv1/conv_12');\n const conv_13 = extractConvPairParams(1024, 1024, 'mobilenetv1/conv_13');\n return {\n conv_0,\n conv_1,\n conv_2,\n conv_3,\n conv_4,\n conv_5,\n conv_6,\n conv_7,\n conv_8,\n conv_9,\n conv_10,\n conv_11,\n conv_12,\n conv_13,\n };\n }\n\n function extractPredictionLayerParams(): PredictionLayerParams {\n const conv_0 = extractPointwiseConvParams(1024, 256, 1, 'prediction_layer/conv_0');\n const conv_1 = extractPointwiseConvParams(256, 512, 3, 'prediction_layer/conv_1');\n const conv_2 = extractPointwiseConvParams(512, 128, 1, 'prediction_layer/conv_2');\n const conv_3 = extractPointwiseConvParams(128, 256, 3, 'prediction_layer/conv_3');\n const conv_4 = extractPointwiseConvParams(256, 128, 1, 'prediction_layer/conv_4');\n const conv_5 = extractPointwiseConvParams(128, 256, 3, 'prediction_layer/conv_5');\n const conv_6 = extractPointwiseConvParams(256, 64, 1, 'prediction_layer/conv_6');\n const conv_7 = extractPointwiseConvParams(64, 128, 3, 'prediction_layer/conv_7');\n const box_encoding_0_predictor = extractConvParams(512, 12, 1, 'prediction_layer/box_predictor_0/box_encoding_predictor');\n const class_predictor_0 = extractConvParams(512, 9, 1, 'prediction_layer/box_predictor_0/class_predictor');\n const box_encoding_1_predictor = extractConvParams(1024, 24, 1, 'prediction_layer/box_predictor_1/box_encoding_predictor');\n const class_predictor_1 = extractConvParams(1024, 18, 1, 'prediction_layer/box_predictor_1/class_predictor');\n const box_encoding_2_predictor = extractConvParams(512, 24, 1, 'prediction_layer/box_predictor_2/box_encoding_predictor');\n const class_predictor_2 = extractConvParams(512, 18, 1, 'prediction_layer/box_predictor_2/class_predictor');\n const box_encoding_3_predictor = extractConvParams(256, 24, 1, 'prediction_layer/box_predictor_3/box_encoding_predictor');\n const class_predictor_3 = extractConvParams(256, 18, 1, 'prediction_layer/box_predictor_3/class_predictor');\n const box_encoding_4_predictor = extractConvParams(256, 24, 1, 'prediction_layer/box_predictor_4/box_encoding_predictor');\n const class_predictor_4 = extractConvParams(256, 18, 1, 'prediction_layer/box_predictor_4/class_predictor');\n const box_encoding_5_predictor = extractConvParams(128, 24, 1, 'prediction_layer/box_predictor_5/box_encoding_predictor');\n const class_predictor_5 = extractConvParams(128, 18, 1, 'prediction_layer/box_predictor_5/class_predictor');\n\n const box_predictor_0 = {\n box_encoding_predictor: box_encoding_0_predictor,\n class_predictor: class_predictor_0,\n };\n const box_predictor_1 = {\n box_encoding_predictor: box_encoding_1_predictor,\n class_predictor: class_predictor_1,\n };\n const box_predictor_2 = {\n box_encoding_predictor: box_encoding_2_predictor,\n class_predictor: class_predictor_2,\n };\n const box_predictor_3 = {\n box_encoding_predictor: box_encoding_3_predictor,\n class_predictor: class_predictor_3,\n };\n const box_predictor_4 = {\n box_encoding_predictor: box_encoding_4_predictor,\n class_predictor: class_predictor_4,\n };\n const box_predictor_5 = {\n box_encoding_predictor: box_encoding_5_predictor,\n class_predictor: class_predictor_5,\n };\n return {\n conv_0,\n conv_1,\n conv_2,\n conv_3,\n conv_4,\n conv_5,\n conv_6,\n conv_7,\n box_predictor_0,\n box_predictor_1,\n box_predictor_2,\n box_predictor_3,\n box_predictor_4,\n box_predictor_5,\n };\n }\n\n return {\n extractMobilenetV1Params,\n extractPredictionLayerParams,\n };\n}\n\nexport function extractParams(weights: Float32Array): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n const {\n extractMobilenetV1Params,\n extractPredictionLayerParams,\n } = extractorsFactory(extractWeights, paramMappings);\n const mobilenetv1 = extractMobilenetV1Params();\n const prediction_layer = extractPredictionLayerParams();\n const extra_dim = tf.tensor3d(\n extractWeights(5118 * 4),\n [1, 5118, 4],\n );\n const output_layer = {\n extra_dim,\n };\n paramMappings.push({ paramPath: 'output_layer/extra_dim' });\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n\n return {\n params: {\n mobilenetv1,\n prediction_layer,\n output_layer,\n },\n paramMappings,\n };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams, disposeUnusedWeightTensors, extractWeightEntryFactory, ParamMapping } from '../common/index';\nimport { isTensor3D } from '../utils/index';\nimport { BoxPredictionParams, MobileNetV1, NetParams, PointwiseConvParams, PredictionLayerParams } from './types';\n\nfunction extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n function extractPointwiseConvParams(prefix: string, idx: number, mappedPrefix: string): PointwiseConvParams {\n const filters = extractWeightEntry(`${prefix}/Conv2d_${idx}_pointwise/weights`, 4, `${mappedPrefix}/filters`);\n const batch_norm_offset = extractWeightEntry(`${prefix}/Conv2d_${idx}_pointwise/convolution_bn_offset`, 1, `${mappedPrefix}/batch_norm_offset`);\n return { filters, batch_norm_offset };\n }\n\n function extractConvPairParams(idx: number): MobileNetV1.ConvPairParams {\n const mappedPrefix = `mobilenetv1/conv_${idx}`;\n const prefixDepthwiseConv = `MobilenetV1/Conv2d_${idx}_depthwise`;\n const mappedPrefixDepthwiseConv = `${mappedPrefix}/depthwise_conv`;\n const mappedPrefixPointwiseConv = `${mappedPrefix}/pointwise_conv`;\n\n const filters = extractWeightEntry(`${prefixDepthwiseConv}/depthwise_weights`, 4, `${mappedPrefixDepthwiseConv}/filters`);\n const batch_norm_scale = extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/gamma`, 1, `${mappedPrefixDepthwiseConv}/batch_norm_scale`);\n const batch_norm_offset = extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/beta`, 1, `${mappedPrefixDepthwiseConv}/batch_norm_offset`);\n const batch_norm_mean = extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/moving_mean`, 1, `${mappedPrefixDepthwiseConv}/batch_norm_mean`);\n const batch_norm_variance = extractWeightEntry(`${prefixDepthwiseConv}/BatchNorm/moving_variance`, 1, `${mappedPrefixDepthwiseConv}/batch_norm_variance`);\n\n return {\n depthwise_conv: {\n filters,\n batch_norm_scale,\n batch_norm_offset,\n batch_norm_mean,\n batch_norm_variance,\n },\n pointwise_conv: extractPointwiseConvParams('MobilenetV1', idx, mappedPrefixPointwiseConv),\n };\n }\n\n function extractMobilenetV1Params(): MobileNetV1.Params {\n return {\n conv_0: extractPointwiseConvParams('MobilenetV1', 0, 'mobilenetv1/conv_0'),\n conv_1: extractConvPairParams(1),\n conv_2: extractConvPairParams(2),\n conv_3: extractConvPairParams(3),\n conv_4: extractConvPairParams(4),\n conv_5: extractConvPairParams(5),\n conv_6: extractConvPairParams(6),\n conv_7: extractConvPairParams(7),\n conv_8: extractConvPairParams(8),\n conv_9: extractConvPairParams(9),\n conv_10: extractConvPairParams(10),\n conv_11: extractConvPairParams(11),\n conv_12: extractConvPairParams(12),\n conv_13: extractConvPairParams(13),\n };\n }\n\n function extractConvParams(prefix: string, mappedPrefix: string): ConvParams {\n const filters = extractWeightEntry(`${prefix}/weights`, 4, `${mappedPrefix}/filters`);\n const bias = extractWeightEntry(`${prefix}/biases`, 1, `${mappedPrefix}/bias`);\n return { filters, bias };\n }\n\n function extractBoxPredictorParams(idx: number): BoxPredictionParams {\n const box_encoding_predictor = extractConvParams(\n `Prediction/BoxPredictor_${idx}/BoxEncodingPredictor`,\n `prediction_layer/box_predictor_${idx}/box_encoding_predictor`,\n );\n const class_predictor = extractConvParams(\n `Prediction/BoxPredictor_${idx}/ClassPredictor`,\n `prediction_layer/box_predictor_${idx}/class_predictor`,\n );\n return { box_encoding_predictor, class_predictor };\n }\n\n function extractPredictionLayerParams(): PredictionLayerParams {\n return {\n conv_0: extractPointwiseConvParams('Prediction', 0, 'prediction_layer/conv_0'),\n conv_1: extractPointwiseConvParams('Prediction', 1, 'prediction_layer/conv_1'),\n conv_2: extractPointwiseConvParams('Prediction', 2, 'prediction_layer/conv_2'),\n conv_3: extractPointwiseConvParams('Prediction', 3, 'prediction_layer/conv_3'),\n conv_4: extractPointwiseConvParams('Prediction', 4, 'prediction_layer/conv_4'),\n conv_5: extractPointwiseConvParams('Prediction', 5, 'prediction_layer/conv_5'),\n conv_6: extractPointwiseConvParams('Prediction', 6, 'prediction_layer/conv_6'),\n conv_7: extractPointwiseConvParams('Prediction', 7, 'prediction_layer/conv_7'),\n box_predictor_0: extractBoxPredictorParams(0),\n box_predictor_1: extractBoxPredictorParams(1),\n box_predictor_2: extractBoxPredictorParams(2),\n box_predictor_3: extractBoxPredictorParams(3),\n box_predictor_4: extractBoxPredictorParams(4),\n box_predictor_5: extractBoxPredictorParams(5),\n };\n }\n\n return {\n extractMobilenetV1Params,\n extractPredictionLayerParams,\n };\n}\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n): { params: NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n const {\n extractMobilenetV1Params,\n extractPredictionLayerParams,\n } = extractorsFactory(weightMap, paramMappings);\n const extra_dim = weightMap['Output/extra_dim'];\n paramMappings.push({ originalPath: 'Output/extra_dim', paramPath: 'output_layer/extra_dim' });\n if (!isTensor3D(extra_dim)) {\n throw new Error(`expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have ${extra_dim}`);\n }\n\n const params = {\n mobilenetv1: extractMobilenetV1Params(),\n prediction_layer: extractPredictionLayerParams(),\n output_layer: {\n extra_dim,\n },\n };\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { PointwiseConvParams } from './types';\n\nexport function pointwiseConvLayer(x: tf.Tensor4D, params: PointwiseConvParams, strides: [number, number]) {\n return tf.tidy(() => {\n let out = tf.conv2d(x, params.filters, strides, 'same');\n out = tf.add(out, params.batch_norm_offset);\n return tf.clipByValue(out, 0, 6);\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { pointwiseConvLayer } from './pointwiseConvLayer';\nimport { MobileNetV1 } from './types';\n\nconst epsilon = 0.0010000000474974513;\n\nfunction depthwiseConvLayer(x: tf.Tensor4D, params: MobileNetV1.DepthwiseConvParams, strides: [number, number]) {\n return tf.tidy(() => {\n let out = tf.depthwiseConv2d(x, params.filters, strides, 'same');\n out = tf.batchNorm(\n out,\n params.batch_norm_mean,\n params.batch_norm_variance,\n params.batch_norm_offset,\n params.batch_norm_scale,\n epsilon,\n );\n return tf.clipByValue(out, 0, 6);\n });\n}\n\nfunction getStridesForLayerIdx(layerIdx: number): [number, number] {\n return [2, 4, 6, 12].some((idx) => idx === layerIdx) ? [2, 2] : [1, 1];\n}\n\nexport function mobileNetV1(x: tf.Tensor4D, params: MobileNetV1.Params) {\n return tf.tidy(() => {\n let conv11;\n let out = pointwiseConvLayer(x, params.conv_0, [2, 2]);\n\n const convPairParams = [\n params.conv_1,\n params.conv_2,\n params.conv_3,\n params.conv_4,\n params.conv_5,\n params.conv_6,\n params.conv_7,\n params.conv_8,\n params.conv_9,\n params.conv_10,\n params.conv_11,\n params.conv_12,\n params.conv_13,\n ];\n\n convPairParams.forEach((param, i) => {\n const layerIdx = i + 1;\n const depthwiseConvStrides = getStridesForLayerIdx(layerIdx);\n out = depthwiseConvLayer(out, param.depthwise_conv, depthwiseConvStrides);\n out = pointwiseConvLayer(out, param.pointwise_conv, [1, 1]);\n if (layerIdx === 11) conv11 = out;\n });\n\n if (conv11 === null) {\n throw new Error('mobileNetV1 - output of conv layer 11 is null');\n }\n\n return {\n out,\n conv11: conv11 as any,\n };\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nfunction IOU(boxes: tf.Tensor2D, i: number, j: number) {\n const boxesData = boxes.arraySync();\n const yminI = Math.min(boxesData[i][0], boxesData[i][2]);\n const xminI = Math.min(boxesData[i][1], boxesData[i][3]);\n const ymaxI = Math.max(boxesData[i][0], boxesData[i][2]);\n const xmaxI = Math.max(boxesData[i][1], boxesData[i][3]);\n const yminJ = Math.min(boxesData[j][0], boxesData[j][2]);\n const xminJ = Math.min(boxesData[j][1], boxesData[j][3]);\n const ymaxJ = Math.max(boxesData[j][0], boxesData[j][2]);\n const xmaxJ = Math.max(boxesData[j][1], boxesData[j][3]);\n const areaI = (ymaxI - yminI) * (xmaxI - xminI);\n const areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ);\n if (areaI <= 0 || areaJ <= 0) return 0.0;\n const intersectionYmin = Math.max(yminI, yminJ);\n const intersectionXmin = Math.max(xminI, xminJ);\n const intersectionYmax = Math.min(ymaxI, ymaxJ);\n const intersectionXmax = Math.min(xmaxI, xmaxJ);\n const intersectionArea = Math.max(intersectionYmax - intersectionYmin, 0.0) * Math.max(intersectionXmax - intersectionXmin, 0.0);\n return intersectionArea / (areaI + areaJ - intersectionArea);\n}\n\nexport function nonMaxSuppression(\n boxes: tf.Tensor2D,\n scores: number[],\n maxOutputSize: number,\n iouThreshold: number,\n scoreThreshold: number,\n): number[] {\n const numBoxes = boxes.shape[0];\n const outputSize = Math.min(maxOutputSize, numBoxes);\n\n const candidates = scores\n .map((score, boxIndex) => ({ score, boxIndex }))\n .filter((c) => c.score > scoreThreshold)\n .sort((c1, c2) => c2.score - c1.score);\n\n const suppressFunc = (x: number) => (x <= iouThreshold ? 1 : 0);\n const selected: number[] = [];\n\n candidates.forEach((c) => {\n if (selected.length >= outputSize) return;\n const originalScore = c.score;\n for (let j = selected.length - 1; j >= 0; --j) {\n const iou = IOU(boxes, c.boxIndex, selected[j]);\n if (iou === 0.0) continue;\n c.score *= suppressFunc(iou);\n if (c.score <= scoreThreshold) break;\n }\n if (originalScore === c.score) {\n selected.push(c.boxIndex);\n }\n });\n return selected;\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { OutputLayerParams } from './types';\n\nfunction getCenterCoordinatesAndSizesLayer(x: tf.Tensor2D) {\n const vec = tf.unstack(tf.transpose(x, [1, 0]));\n\n const sizes = [\n tf.sub(vec[2], vec[0]),\n tf.sub(vec[3], vec[1]),\n ];\n const centers = [\n tf.add(vec[0], tf.div(sizes[0], 2)),\n tf.add(vec[1], tf.div(sizes[1], 2)),\n ];\n return { sizes, centers };\n}\n\nfunction decodeBoxesLayer(x0: tf.Tensor2D, x1: tf.Tensor2D) {\n const { sizes, centers } = getCenterCoordinatesAndSizesLayer(x0);\n\n const vec = tf.unstack(tf.transpose(x1, [1, 0]));\n const div0_out = tf.div(tf.mul(tf.exp(tf.div(vec[2], 5)), sizes[0]), 2);\n const add0_out = tf.add(tf.mul(tf.div(vec[0], 10), sizes[0]), centers[0]);\n const div1_out = tf.div(tf.mul(tf.exp(tf.div(vec[3], 5)), sizes[1]), 2);\n const add1_out = tf.add(tf.mul(tf.div(vec[1], 10), sizes[1]), centers[1]);\n\n return tf.transpose(\n tf.stack([\n tf.sub(add0_out, div0_out),\n tf.sub(add1_out, div1_out),\n tf.add(add0_out, div0_out),\n tf.add(add1_out, div1_out),\n ]),\n [1, 0],\n );\n}\n\nexport function outputLayer(boxPredictions: tf.Tensor4D, classPredictions: tf.Tensor4D, params: OutputLayerParams) {\n return tf.tidy(() => {\n const batchSize = boxPredictions.shape[0];\n\n let boxes = decodeBoxesLayer(\n tf.reshape(tf.tile(params.extra_dim, [batchSize, 1, 1]), [-1, 4]) as tf.Tensor2D,\n tf.reshape(boxPredictions, [-1, 4]) as tf.Tensor2D,\n );\n boxes = tf.reshape(boxes, [batchSize, (boxes.shape[0] / batchSize), 4]);\n\n const scoresAndClasses = tf.sigmoid(tf.slice(classPredictions, [0, 0, 1], [-1, -1, -1]));\n let scores = tf.slice(scoresAndClasses, [0, 0, 0], [-1, -1, 1]) as tf.Tensor;\n\n scores = tf.reshape(scores, [batchSize, scores.shape[1] as number]);\n\n const boxesByBatch = tf.unstack(boxes) as tf.Tensor2D[];\n const scoresByBatch = tf.unstack(scores) as tf.Tensor1D[];\n\n return { boxes: boxesByBatch, scores: scoresByBatch };\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { convLayer } from '../common/index';\nimport { BoxPredictionParams } from './types';\n\nexport function boxPredictionLayer(\n x: tf.Tensor4D,\n params: BoxPredictionParams,\n) {\n return tf.tidy(() => {\n const batchSize = x.shape[0];\n const boxPredictionEncoding = tf.reshape(\n convLayer(x, params.box_encoding_predictor),\n [batchSize, -1, 1, 4],\n );\n const classPrediction = tf.reshape(\n convLayer(x, params.class_predictor),\n [batchSize, -1, 3],\n );\n return { boxPredictionEncoding, classPrediction };\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { boxPredictionLayer } from './boxPredictionLayer';\nimport { pointwiseConvLayer } from './pointwiseConvLayer';\nimport { PredictionLayerParams } from './types';\n\nexport function predictionLayer(\n x: tf.Tensor4D,\n conv11: tf.Tensor4D,\n params: PredictionLayerParams,\n) {\n return tf.tidy(() => {\n const conv0 = pointwiseConvLayer(x, params.conv_0, [1, 1]);\n const conv1 = pointwiseConvLayer(conv0, params.conv_1, [2, 2]);\n const conv2 = pointwiseConvLayer(conv1, params.conv_2, [1, 1]);\n const conv3 = pointwiseConvLayer(conv2, params.conv_3, [2, 2]);\n const conv4 = pointwiseConvLayer(conv3, params.conv_4, [1, 1]);\n const conv5 = pointwiseConvLayer(conv4, params.conv_5, [2, 2]);\n const conv6 = pointwiseConvLayer(conv5, params.conv_6, [1, 1]);\n const conv7 = pointwiseConvLayer(conv6, params.conv_7, [2, 2]);\n\n const boxPrediction0 = boxPredictionLayer(conv11, params.box_predictor_0);\n const boxPrediction1 = boxPredictionLayer(x, params.box_predictor_1);\n const boxPrediction2 = boxPredictionLayer(conv1, params.box_predictor_2);\n const boxPrediction3 = boxPredictionLayer(conv3, params.box_predictor_3);\n const boxPrediction4 = boxPredictionLayer(conv5, params.box_predictor_4);\n const boxPrediction5 = boxPredictionLayer(conv7, params.box_predictor_5);\n\n const boxPredictions = tf.concat([\n boxPrediction0.boxPredictionEncoding,\n boxPrediction1.boxPredictionEncoding,\n boxPrediction2.boxPredictionEncoding,\n boxPrediction3.boxPredictionEncoding,\n boxPrediction4.boxPredictionEncoding,\n boxPrediction5.boxPredictionEncoding,\n ], 1) as tf.Tensor4D;\n\n const classPredictions = tf.concat([\n boxPrediction0.classPrediction,\n boxPrediction1.classPrediction,\n boxPrediction2.classPrediction,\n boxPrediction3.classPrediction,\n boxPrediction4.classPrediction,\n boxPrediction5.classPrediction,\n ], 1) as tf.Tensor4D;\n\n return {\n boxPredictions,\n classPredictions,\n };\n });\n}\n", "export interface ISsdMobilenetv1Options {\n minConfidence?: number\n maxResults?: number\n}\n\nexport class SsdMobilenetv1Options {\n protected _name: string = 'SsdMobilenetv1Options'\n\n private _minConfidence: number\n\n private _maxResults: number\n\n constructor({ minConfidence, maxResults }: ISsdMobilenetv1Options = {}) {\n this._minConfidence = minConfidence || 0.5;\n this._maxResults = maxResults || 100;\n\n if (typeof this._minConfidence !== 'number' || this._minConfidence <= 0 || this._minConfidence >= 1) {\n throw new Error(`${this._name} - expected minConfidence to be a number between 0 and 1`);\n }\n\n if (typeof this._maxResults !== 'number') {\n throw new Error(`${this._name} - expected maxResults to be a number`);\n }\n }\n\n get minConfidence(): number { return this._minConfidence; }\n\n get maxResults(): number { return this._maxResults; }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { Rect } from '../classes/index';\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { NetInput, TNetInput, toNetInput } from '../dom/index';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { mobileNetV1 } from './mobileNetV1';\nimport { nonMaxSuppression } from './nonMaxSuppression';\nimport { outputLayer } from './outputLayer';\nimport { predictionLayer } from './predictionLayer';\nimport { ISsdMobilenetv1Options, SsdMobilenetv1Options } from './SsdMobilenetv1Options';\nimport { NetParams } from './types';\n\nexport class SsdMobilenetv1 extends NeuralNetwork {\n constructor() {\n super('SsdMobilenetv1');\n }\n\n public forwardInput(input: NetInput) {\n const { params } = this;\n if (!params) throw new Error('SsdMobilenetv1 - load model before inference');\n return tf.tidy(() => {\n const batchTensor = tf.cast(input.toBatchTensor(512, false), 'float32');\n const x = tf.sub(tf.div(batchTensor, 127.5), 1) as tf.Tensor4D; // input is normalized -1..1\n const features = mobileNetV1(x, params.mobilenetv1);\n const { boxPredictions, classPredictions } = predictionLayer(features.out, features.conv11, params.prediction_layer);\n return outputLayer(boxPredictions, classPredictions, params.output_layer);\n });\n }\n\n public async forward(input: TNetInput) {\n return this.forwardInput(await toNetInput(input));\n }\n\n public async locateFaces(input: TNetInput, options: ISsdMobilenetv1Options = {}): Promise {\n const { maxResults, minConfidence } = new SsdMobilenetv1Options(options);\n const netInput = await toNetInput(input);\n const { boxes: _boxes, scores: _scores } = this.forwardInput(netInput);\n const boxes = _boxes[0];\n const scores = _scores[0];\n for (let i = 1; i < _boxes.length; i++) {\n _boxes[i].dispose();\n _scores[i].dispose();\n }\n const scoresData = Array.from(scores.dataSync());\n const iouThreshold = 0.5;\n const indices = nonMaxSuppression(boxes, scoresData as number[], maxResults, iouThreshold, minConfidence);\n const reshapedDims = netInput.getReshapedInputDimensions(0);\n const inputSize = netInput.inputSize as number;\n const padX = inputSize / reshapedDims.width;\n const padY = inputSize / reshapedDims.height;\n const boxesData = boxes.arraySync();\n const results = indices\n .map((idx) => {\n const [top, bottom] = [\n Math.max(0, boxesData[idx][0]),\n Math.min(1.0, boxesData[idx][2]),\n ].map((val) => val * padY);\n const [left, right] = [\n Math.max(0, boxesData[idx][1]),\n Math.min(1.0, boxesData[idx][3]),\n ].map((val) => val * padX);\n return new FaceDetection(\n scoresData[idx] as number,\n new Rect(left, top, right - left, bottom - top),\n { height: netInput.getInputHeight(0), width: netInput.getInputWidth(0) },\n );\n });\n boxes.dispose();\n scores.dispose();\n return results;\n }\n\n protected getDefaultModelName(): string {\n return 'ssd_mobilenetv1_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMap(weightMap);\n }\n\n protected extractParams(weights: Float32Array) {\n return extractParams(weights);\n }\n}\n", "import { SsdMobilenetv1 } from './SsdMobilenetv1';\n\nexport * from './SsdMobilenetv1';\nexport * from './SsdMobilenetv1Options';\n\nexport function createSsdMobilenetv1(weights: Float32Array) {\n const net = new SsdMobilenetv1();\n net.extractWeights(weights);\n return net;\n}\n\nexport function createFaceDetectionNet(weights: Float32Array) {\n return createSsdMobilenetv1(weights);\n}\n\n// alias for backward compatibily\nexport class FaceDetectionNet extends SsdMobilenetv1 {}\n", "import { Point } from '../classes/index';\n\nexport const IOU_THRESHOLD = 0.4;\n\nexport const BOX_ANCHORS = [\n new Point(0.738768, 0.874946),\n new Point(2.42204, 2.65704),\n new Point(4.30971, 7.04493),\n new Point(10.246, 4.59428),\n new Point(12.6868, 11.8741),\n];\n\nexport const BOX_ANCHORS_SEPARABLE = [\n new Point(1.603231, 2.094468),\n new Point(6.041143, 7.080126),\n new Point(2.882459, 3.518061),\n new Point(4.266906, 5.178857),\n new Point(9.041765, 10.66308),\n];\n\nexport const MEAN_RGB_SEPARABLE: [number, number, number] = [117.001, 114.697, 97.404];\n\nexport const DEFAULT_MODEL_NAME = 'tiny_yolov2_model';\nexport const DEFAULT_MODEL_NAME_SEPARABLE_CONV = 'tiny_yolov2_separable_conv_model';\n", "import { Point } from '../classes/Point';\n\nexport type TinyYolov2Config = {\n withSeparableConvs: boolean\n iouThreshold: number\n anchors: Point[]\n classes: string[]\n meanRgb?: [number, number, number]\n withClassScores?: boolean,\n filterSizes?: number[]\n isFirstLayerConv2d?: boolean\n}\n\nconst isNumber = (arg: any) => typeof arg === 'number';\n\nexport function validateConfig(config: any) {\n if (!config) {\n throw new Error(`invalid config: ${config}`);\n }\n\n if (typeof config.withSeparableConvs !== 'boolean') {\n throw new Error(`config.withSeparableConvs has to be a boolean, have: ${config.withSeparableConvs}`);\n }\n\n if (!isNumber(config.iouThreshold) || config.iouThreshold < 0 || config.iouThreshold > 1.0) {\n throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${config.iouThreshold}`);\n }\n\n if (\n !Array.isArray(config.classes)\n || !config.classes.length\n || !config.classes.every((c: any) => typeof c === 'string')\n ) {\n throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(config.classes)}`);\n }\n\n if (\n !Array.isArray(config.anchors)\n || !config.anchors.length\n || !config.anchors.map((a: any) => a || {}).every((a: any) => isNumber(a.x) && isNumber(a.y))\n ) {\n throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(config.anchors)}`);\n }\n\n if (config.meanRgb && (\n !Array.isArray(config.meanRgb)\n || config.meanRgb.length !== 3\n || !config.meanRgb.every(isNumber)\n )) {\n throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(config.meanRgb)}`);\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nexport function leaky(x: tf.Tensor4D): tf.Tensor4D {\n return tf.tidy(() => {\n const min = tf.mul(x, tf.scalar(0.10000000149011612));\n return tf.add(tf.relu(tf.sub(x, min)), min);\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { leaky } from './leaky';\nimport { ConvWithBatchNorm } from './types';\n\nexport function convWithBatchNorm(x: tf.Tensor4D, params: ConvWithBatchNorm): tf.Tensor4D {\n return tf.tidy(() => {\n let out = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]) as tf.Tensor4D;\n out = tf.conv2d(out, params.conv.filters, [1, 1], 'valid');\n out = tf.sub(out, params.bn.sub);\n out = tf.mul(out, params.bn.truediv);\n out = tf.add(out, params.conv.bias);\n return leaky(out);\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { SeparableConvParams } from '../common/types';\nimport { leaky } from './leaky';\n\nexport function depthwiseSeparableConv(x: tf.Tensor4D, params: SeparableConvParams): tf.Tensor4D {\n return tf.tidy(() => {\n let out = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]) as tf.Tensor4D;\n out = tf.separableConv2d(out, params.depthwise_filter, params.pointwise_filter, [1, 1], 'valid');\n out = tf.add(out, params.bias);\n return leaky(out);\n });\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { extractConvParamsFactory } from '../common/index';\nimport { extractSeparableConvParamsFactory } from '../common/extractSeparableConvParamsFactory';\nimport { extractWeightsFactory } from '../common/extractWeightsFactory';\nimport { ExtractWeightsFunction, ParamMapping } from '../common/types';\nimport { TinyYolov2Config } from './config';\nimport { BatchNorm, ConvWithBatchNorm, TinyYolov2NetParams } from './types';\n\nfunction extractorsFactory(extractWeights: ExtractWeightsFunction, paramMappings: ParamMapping[]) {\n const extractConvParams = extractConvParamsFactory(extractWeights, paramMappings);\n\n function extractBatchNormParams(size: number, mappedPrefix: string): BatchNorm {\n const sub = tf.tensor1d(extractWeights(size));\n const truediv = tf.tensor1d(extractWeights(size));\n\n paramMappings.push(\n { paramPath: `${mappedPrefix}/sub` },\n { paramPath: `${mappedPrefix}/truediv` },\n );\n return { sub, truediv };\n }\n\n function extractConvWithBatchNormParams(channelsIn: number, channelsOut: number, mappedPrefix: string): ConvWithBatchNorm {\n const conv = extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv`);\n const bn = extractBatchNormParams(channelsOut, `${mappedPrefix}/bn`);\n return { conv, bn };\n }\n const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings);\n\n return {\n extractConvParams,\n extractConvWithBatchNormParams,\n extractSeparableConvParams,\n };\n}\n\nexport function extractParams(\n weights: Float32Array,\n config: TinyYolov2Config,\n boxEncodingSize: number,\n filterSizes: number[],\n): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {\n const {\n extractWeights,\n getRemainingWeights,\n } = extractWeightsFactory(weights);\n\n const paramMappings: ParamMapping[] = [];\n const {\n extractConvParams,\n extractConvWithBatchNormParams,\n extractSeparableConvParams,\n } = extractorsFactory(extractWeights, paramMappings);\n let params: TinyYolov2NetParams;\n\n if (config.withSeparableConvs) {\n const [s0, s1, s2, s3, s4, s5, s6, s7, s8] = filterSizes;\n const conv0 = config.isFirstLayerConv2d\n ? extractConvParams(s0, s1, 3, 'conv0')\n : extractSeparableConvParams(s0, s1, 'conv0');\n const conv1 = extractSeparableConvParams(s1, s2, 'conv1');\n const conv2 = extractSeparableConvParams(s2, s3, 'conv2');\n const conv3 = extractSeparableConvParams(s3, s4, 'conv3');\n const conv4 = extractSeparableConvParams(s4, s5, 'conv4');\n const conv5 = extractSeparableConvParams(s5, s6, 'conv5');\n const conv6 = s7 ? extractSeparableConvParams(s6, s7, 'conv6') : undefined;\n const conv7 = s8 ? extractSeparableConvParams(s7, s8, 'conv7') : undefined;\n const conv8 = extractConvParams(s8 || s7 || s6, 5 * boxEncodingSize, 1, 'conv8');\n params = {\n conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\n };\n } else {\n const [s0, s1, s2, s3, s4, s5, s6, s7, s8] = filterSizes;\n const conv0 = extractConvWithBatchNormParams(s0, s1, 'conv0');\n const conv1 = extractConvWithBatchNormParams(s1, s2, 'conv1');\n const conv2 = extractConvWithBatchNormParams(s2, s3, 'conv2');\n const conv3 = extractConvWithBatchNormParams(s3, s4, 'conv3');\n const conv4 = extractConvWithBatchNormParams(s4, s5, 'conv4');\n const conv5 = extractConvWithBatchNormParams(s5, s6, 'conv5');\n const conv6 = extractConvWithBatchNormParams(s6, s7, 'conv6');\n const conv7 = extractConvWithBatchNormParams(s7, s8, 'conv7');\n const conv8 = extractConvParams(s8, 5 * boxEncodingSize, 1, 'conv8');\n params = {\n conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\n };\n }\n if (getRemainingWeights().length !== 0) {\n throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`);\n }\n return { params, paramMappings };\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { ConvParams } from '../common/index';\nimport { disposeUnusedWeightTensors } from '../common/disposeUnusedWeightTensors';\nimport { loadSeparableConvParamsFactory } from '../common/extractSeparableConvParamsFactory';\nimport { extractWeightEntryFactory } from '../common/extractWeightEntryFactory';\nimport { ParamMapping } from '../common/types';\nimport { TinyYolov2Config } from './config';\nimport { BatchNorm, ConvWithBatchNorm, TinyYolov2NetParams } from './types';\n\nfunction extractorsFactory(weightMap: any, paramMappings: ParamMapping[]) {\n const extractWeightEntry = extractWeightEntryFactory(weightMap, paramMappings);\n\n function extractBatchNormParams(prefix: string): BatchNorm {\n const sub = extractWeightEntry(`${prefix}/sub`, 1);\n const truediv = extractWeightEntry(`${prefix}/truediv`, 1);\n return { sub, truediv };\n }\n\n function extractConvParams(prefix: string): ConvParams {\n const filters = extractWeightEntry(`${prefix}/filters`, 4);\n const bias = extractWeightEntry(`${prefix}/bias`, 1);\n return { filters, bias };\n }\n\n function extractConvWithBatchNormParams(prefix: string): ConvWithBatchNorm {\n const conv = extractConvParams(`${prefix}/conv`);\n const bn = extractBatchNormParams(`${prefix}/bn`);\n return { conv, bn };\n }\n\n const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry);\n return {\n extractConvParams,\n extractConvWithBatchNormParams,\n extractSeparableConvParams,\n };\n}\n\nexport function extractParamsFromWeightMap(\n weightMap: tf.NamedTensorMap,\n config: TinyYolov2Config,\n): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {\n const paramMappings: ParamMapping[] = [];\n\n const {\n extractConvParams,\n extractConvWithBatchNormParams,\n extractSeparableConvParams,\n } = extractorsFactory(weightMap, paramMappings);\n\n let params: TinyYolov2NetParams;\n\n if (config.withSeparableConvs) {\n // eslint-disable-next-line no-mixed-operators\n const numFilters = (config.filterSizes && config.filterSizes.length || 9);\n params = {\n conv0: config.isFirstLayerConv2d ? extractConvParams('conv0') : extractSeparableConvParams('conv0'),\n conv1: extractSeparableConvParams('conv1'),\n conv2: extractSeparableConvParams('conv2'),\n conv3: extractSeparableConvParams('conv3'),\n conv4: extractSeparableConvParams('conv4'),\n conv5: extractSeparableConvParams('conv5'),\n conv6: numFilters > 7 ? extractSeparableConvParams('conv6') : undefined,\n conv7: numFilters > 8 ? extractSeparableConvParams('conv7') : undefined,\n conv8: extractConvParams('conv8'),\n };\n } else {\n params = {\n conv0: extractConvWithBatchNormParams('conv0'),\n conv1: extractConvWithBatchNormParams('conv1'),\n conv2: extractConvWithBatchNormParams('conv2'),\n conv3: extractConvWithBatchNormParams('conv3'),\n conv4: extractConvWithBatchNormParams('conv4'),\n conv5: extractConvWithBatchNormParams('conv5'),\n conv6: extractConvWithBatchNormParams('conv6'),\n conv7: extractConvWithBatchNormParams('conv7'),\n conv8: extractConvParams('conv8'),\n };\n }\n\n disposeUnusedWeightTensors(weightMap, paramMappings);\n return { params, paramMappings };\n}\n", "export interface ITinyYolov2Options {\n inputSize?: number\n scoreThreshold?: number\n}\n\nexport class TinyYolov2Options {\n protected _name: string = 'TinyYolov2Options'\n\n private _inputSize: number\n\n private _scoreThreshold: number\n\n constructor({ inputSize, scoreThreshold }: ITinyYolov2Options = {}) {\n this._inputSize = inputSize || 416;\n this._scoreThreshold = scoreThreshold || 0.5;\n\n if (typeof this._inputSize !== 'number' || this._inputSize % 32 !== 0) {\n throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`);\n }\n\n if (typeof this._scoreThreshold !== 'number' || this._scoreThreshold <= 0 || this._scoreThreshold >= 1) {\n throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`);\n }\n }\n\n get inputSize(): number { return this._inputSize; }\n\n get scoreThreshold(): number { return this._scoreThreshold; }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { BoundingBox } from '../classes/BoundingBox';\nimport { Dimensions } from '../classes/Dimensions';\nimport { ObjectDetection } from '../classes/ObjectDetection';\nimport { convLayer } from '../common/index';\nimport { ConvParams, SeparableConvParams } from '../common/types';\nimport { toNetInput } from '../dom/index';\nimport { NetInput } from '../dom/NetInput';\nimport { TNetInput } from '../dom/types';\nimport { NeuralNetwork } from '../NeuralNetwork';\nimport { sigmoid } from '../ops/index';\nimport { nonMaxSuppression } from '../ops/nonMaxSuppression';\nimport { normalize } from '../ops/normalize';\nimport { TinyYolov2Config, validateConfig } from './config';\nimport { convWithBatchNorm } from './convWithBatchNorm';\nimport { depthwiseSeparableConv } from './depthwiseSeparableConv';\nimport { extractParams } from './extractParams';\nimport { extractParamsFromWeightMap } from './extractParamsFromWeightMap';\nimport { leaky } from './leaky';\nimport { ITinyYolov2Options, TinyYolov2Options } from './TinyYolov2Options';\nimport { DefaultTinyYolov2NetParams, MobilenetParams, TinyYolov2NetParams } from './types';\n\nexport class TinyYolov2Base extends NeuralNetwork {\n public static DEFAULT_FILTER_SIZES = [3, 16, 32, 64, 128, 256, 512, 1024, 1024];\n\n private _config: TinyYolov2Config\n\n constructor(config: TinyYolov2Config) {\n super('TinyYolov2');\n validateConfig(config);\n this._config = config;\n }\n\n public get config(): TinyYolov2Config {\n return this._config;\n }\n\n public get withClassScores(): boolean {\n return this.config.withClassScores || this.config.classes.length > 1;\n }\n\n public get boxEncodingSize(): number {\n return 5 + (this.withClassScores ? this.config.classes.length : 0);\n }\n\n public runTinyYolov2(x: tf.Tensor4D, params: DefaultTinyYolov2NetParams): tf.Tensor4D {\n let out = convWithBatchNorm(x, params.conv0);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = convWithBatchNorm(out, params.conv1);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = convWithBatchNorm(out, params.conv2);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = convWithBatchNorm(out, params.conv3);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = convWithBatchNorm(out, params.conv4);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = convWithBatchNorm(out, params.conv5);\n out = tf.maxPool(out, [2, 2], [1, 1], 'same');\n out = convWithBatchNorm(out, params.conv6);\n out = convWithBatchNorm(out, params.conv7);\n return convLayer(out, params.conv8, 'valid', false);\n }\n\n public runMobilenet(x: tf.Tensor4D, params: MobilenetParams): tf.Tensor4D {\n let out = this.config.isFirstLayerConv2d\n ? leaky(convLayer(x, params.conv0 as ConvParams, 'valid', false))\n : depthwiseSeparableConv(x, params.conv0 as SeparableConvParams);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = depthwiseSeparableConv(out, params.conv1);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = depthwiseSeparableConv(out, params.conv2);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = depthwiseSeparableConv(out, params.conv3);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = depthwiseSeparableConv(out, params.conv4);\n out = tf.maxPool(out, [2, 2], [2, 2], 'same');\n out = depthwiseSeparableConv(out, params.conv5);\n out = tf.maxPool(out, [2, 2], [1, 1], 'same');\n out = params.conv6 ? depthwiseSeparableConv(out, params.conv6) : out;\n out = params.conv7 ? depthwiseSeparableConv(out, params.conv7) : out;\n return convLayer(out, params.conv8, 'valid', false);\n }\n\n public forwardInput(input: NetInput, inputSize: number): tf.Tensor4D {\n const { params } = this;\n\n if (!params) {\n throw new Error('TinyYolov2 - load model before inference');\n }\n\n return tf.tidy(() => {\n let batchTensor = tf.cast(input.toBatchTensor(inputSize, false), 'float32');\n batchTensor = this.config.meanRgb\n ? normalize(batchTensor, this.config.meanRgb)\n : batchTensor;\n batchTensor = batchTensor.div(255) as tf.Tensor4D;\n return this.config.withSeparableConvs\n ? this.runMobilenet(batchTensor, params as MobilenetParams)\n : this.runTinyYolov2(batchTensor, params as DefaultTinyYolov2NetParams);\n });\n }\n\n public async forward(input: TNetInput, inputSize: number): Promise {\n return this.forwardInput(await toNetInput(input), inputSize);\n }\n\n public async detect(input: TNetInput, forwardParams: ITinyYolov2Options = {}): Promise {\n const { inputSize, scoreThreshold } = new TinyYolov2Options(forwardParams);\n const netInput = await toNetInput(input);\n const out = await this.forwardInput(netInput, inputSize);\n const out0 = tf.tidy(() => tf.unstack(out)[0].expandDims()) as tf.Tensor4D;\n const inputDimensions = {\n width: netInput.getInputWidth(0),\n height: netInput.getInputHeight(0),\n };\n\n const results = await this.extractBoxes(out0, netInput.getReshapedInputDimensions(0), scoreThreshold);\n out.dispose();\n out0.dispose();\n\n const boxes = results.map((res) => res.box);\n const scores = results.map((res) => res.score);\n const classScores = results.map((res) => res.classScore);\n const classNames = results.map((res) => this.config.classes[res.label]);\n\n const indices = nonMaxSuppression(\n boxes.map((box) => box.rescale(inputSize)),\n scores,\n this.config.iouThreshold,\n true,\n );\n\n const detections = indices.map((idx) => new ObjectDetection(\n scores[idx],\n classScores[idx],\n classNames[idx],\n boxes[idx],\n inputDimensions,\n ));\n return detections;\n }\n\n protected getDefaultModelName(): string {\n return '';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap) {\n return extractParamsFromWeightMap(weightMap, this.config);\n }\n\n protected extractParams(weights: Float32Array) {\n const filterSizes = this.config.filterSizes || TinyYolov2Base.DEFAULT_FILTER_SIZES;\n\n const numFilters = filterSizes ? filterSizes.length : undefined;\n if (numFilters !== 7 && numFilters !== 8 && numFilters !== 9) {\n throw new Error(`TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found ${numFilters} filterSizes in config`);\n }\n return extractParams(weights, this.config, this.boxEncodingSize, filterSizes);\n }\n\n protected async extractBoxes(\n outputTensor: tf.Tensor4D,\n inputBlobDimensions: Dimensions,\n scoreThreshold?: number,\n ) {\n const { width, height } = inputBlobDimensions;\n const inputSize = Math.max(width, height);\n const correctionFactorX = inputSize / width;\n const correctionFactorY = inputSize / height;\n\n const numCells = outputTensor.shape[1];\n const numBoxes = this.config.anchors.length;\n\n const [boxesTensor, scoresTensor, classScoresTensor] = tf.tidy(() => {\n const reshaped = outputTensor.reshape([numCells, numCells, numBoxes, this.boxEncodingSize]);\n\n const boxes = reshaped.slice([0, 0, 0, 0], [numCells, numCells, numBoxes, 4]);\n const scores = reshaped.slice([0, 0, 0, 4], [numCells, numCells, numBoxes, 1]);\n const classScores = this.withClassScores\n ? tf.softmax(reshaped.slice([0, 0, 0, 5], [numCells, numCells, numBoxes, this.config.classes.length]), 3)\n : tf.scalar(0);\n return [boxes, scores, classScores];\n });\n\n const results = [] as any;\n const scoresData = await scoresTensor.array();\n const boxesData = await boxesTensor.array();\n for (let row = 0; row < numCells; row++) {\n for (let col = 0; col < numCells; col++) {\n for (let anchor = 0; anchor < numBoxes; anchor++) {\n const score = sigmoid(scoresData[row][col][anchor][0]);\n if (!scoreThreshold || score > scoreThreshold) {\n const ctX = ((col + sigmoid(boxesData[row][col][anchor][0])) / numCells) * correctionFactorX;\n const ctY = ((row + sigmoid(boxesData[row][col][anchor][1])) / numCells) * correctionFactorY;\n const widthLocal = ((Math.exp(boxesData[row][col][anchor][2]) * this.config.anchors[anchor].x) / numCells) * correctionFactorX;\n const heightLocal = ((Math.exp(boxesData[row][col][anchor][3]) * this.config.anchors[anchor].y) / numCells) * correctionFactorY;\n const x = (ctX - (widthLocal / 2));\n const y = (ctY - (heightLocal / 2));\n const pos = { row, col, anchor };\n const { classScore, label } = this.withClassScores\n ? await this.extractPredictedClass(classScoresTensor as tf.Tensor4D, pos)\n : { classScore: 1, label: 0 };\n results.push({\n box: new BoundingBox(x, y, x + widthLocal, y + heightLocal),\n score,\n classScore: score * classScore,\n label,\n ...pos,\n });\n }\n }\n }\n }\n\n boxesTensor.dispose();\n scoresTensor.dispose();\n classScoresTensor.dispose();\n return results;\n }\n\n private async extractPredictedClass(classesTensor: tf.Tensor4D, pos: { row: number, col: number, anchor: number }) {\n const { row, col, anchor } = pos;\n const classesData = await classesTensor.array();\n return Array(this.config.classes.length).fill(0)\n .map((_, i) => classesData[row][col][anchor][i])\n .map((classScore, label) => ({\n classScore,\n label,\n }))\n .reduce((max, curr) => (max.classScore > curr.classScore ? max : curr));\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { FaceDetection, Point } from '../classes/index';\nimport { ParamMapping } from '../common/types';\nimport { TNetInput } from '../dom/types';\nimport {\n BOX_ANCHORS,\n BOX_ANCHORS_SEPARABLE,\n DEFAULT_MODEL_NAME,\n DEFAULT_MODEL_NAME_SEPARABLE_CONV,\n IOU_THRESHOLD,\n MEAN_RGB_SEPARABLE,\n} from './const';\nimport { TinyYolov2Base } from './TinyYolov2Base';\nimport { ITinyYolov2Options } from './TinyYolov2Options';\nimport { TinyYolov2NetParams } from './types';\n\nexport class TinyYolov2 extends TinyYolov2Base {\n constructor(withSeparableConvs: boolean = true) {\n const config = {\n withSeparableConvs,\n iouThreshold: IOU_THRESHOLD,\n classes: ['face'],\n ...(withSeparableConvs\n ? {\n anchors: BOX_ANCHORS_SEPARABLE,\n meanRgb: MEAN_RGB_SEPARABLE,\n }\n : {\n anchors: BOX_ANCHORS,\n withClassScores: true,\n }),\n };\n\n super(config);\n }\n\n public get withSeparableConvs(): boolean {\n return this.config.withSeparableConvs;\n }\n\n public get anchors(): Point[] {\n return this.config.anchors;\n }\n\n public async locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise {\n const objectDetections = await this.detect(input, forwardParams);\n return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight }));\n }\n\n protected getDefaultModelName(): string {\n return this.withSeparableConvs ? DEFAULT_MODEL_NAME_SEPARABLE_CONV : DEFAULT_MODEL_NAME;\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {\n return super.extractParamsFromWeightMap(weightMap);\n }\n}\n", "import { TinyYolov2 } from './TinyYolov2';\n\nexport * from './TinyYolov2Options';\nexport * from './config';\nexport * from './types';\nexport { TinyYolov2 };\n\nexport function createTinyYolov2(weights: Float32Array, withSeparableConvs: boolean = true) {\n const net = new TinyYolov2(withSeparableConvs);\n net.extractWeights(weights);\n return net;\n}\n", "import { ITinyYolov2Options, TinyYolov2Options } from '../tinyYolov2/index';\n\nexport interface ITinyFaceDetectorOptions extends ITinyYolov2Options {}\n\nexport class TinyFaceDetectorOptions extends TinyYolov2Options {\n protected _name: string = 'TinyFaceDetectorOptions'\n}\n", "export class ComposableTask {\n // eslint-disable-next-line no-unused-vars\n public async then(onfulfilled: (value: T) => T | PromiseLike): Promise {\n return onfulfilled(await this.run());\n }\n\n public async run(): Promise {\n throw new Error('ComposableTask - run is not implemented');\n }\n}\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { extractFaces, extractFaceTensors, TNetInput } from '../dom/index';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { isWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';\n\nexport async function extractAllFacesAndComputeResults, TResult>(\n parentResults: TSource[],\n input: TNetInput,\n // eslint-disable-next-line no-unused-vars\n computeResults: (faces: Array) => Promise,\n extractedFaces?: Array | null,\n // eslint-disable-next-line no-unused-vars\n getRectForAlignment: (parentResult: WithFaceLandmarks) => FaceDetection = ({ alignedRect }) => alignedRect,\n) {\n const faceBoxes = parentResults.map((parentResult) => (isWithFaceLandmarks(parentResult)\n ? getRectForAlignment(parentResult)\n : parentResult.detection));\n const faces: Array = extractedFaces || (\n input instanceof tf.Tensor\n ? await extractFaceTensors(input, faceBoxes)\n : await extractFaces(input, faceBoxes)\n );\n const results = await computeResults(faces);\n faces.forEach((f) => f instanceof tf.Tensor && f.dispose());\n return results;\n}\n\nexport async function extractSingleFaceAndComputeResult, TResult>(\n parentResult: TSource,\n input: TNetInput,\n // eslint-disable-next-line no-unused-vars\n computeResult: (face: HTMLCanvasElement | tf.Tensor3D) => Promise,\n extractedFaces?: Array | null,\n // eslint-disable-next-line no-unused-vars\n getRectForAlignment?: (parentResultLocal: WithFaceLandmarks) => FaceDetection,\n) {\n return extractAllFacesAndComputeResults(\n [parentResult],\n input,\n async (faces) => computeResult(faces[0]),\n extractedFaces,\n getRectForAlignment,\n );\n}\n", "import { Point } from '../classes/index';\n\nexport const IOU_THRESHOLD = 0.4;\n\nexport const BOX_ANCHORS = [\n new Point(1.603231, 2.094468),\n new Point(6.041143, 7.080126),\n new Point(2.882459, 3.518061),\n new Point(4.266906, 5.178857),\n new Point(9.041765, 10.66308),\n];\n\nexport const MEAN_RGB: [number, number, number] = [117.001, 114.697, 97.404];\n", "import * as tf from '../../dist/tfjs.esm';\n\nimport { FaceDetection, Point } from '../classes/index';\nimport { ParamMapping } from '../common/index';\nimport { TNetInput } from '../dom/index';\nimport { ITinyYolov2Options } from '../tinyYolov2/index';\nimport { TinyYolov2Base } from '../tinyYolov2/TinyYolov2Base';\nimport { TinyYolov2NetParams } from '../tinyYolov2/types';\nimport { BOX_ANCHORS, IOU_THRESHOLD, MEAN_RGB } from './const';\n\nexport class TinyFaceDetector extends TinyYolov2Base {\n constructor() {\n const config = {\n withSeparableConvs: true,\n iouThreshold: IOU_THRESHOLD,\n classes: ['face'],\n anchors: BOX_ANCHORS,\n meanRgb: MEAN_RGB,\n isFirstLayerConv2d: true,\n filterSizes: [3, 16, 32, 64, 128, 256, 512],\n };\n\n super(config);\n }\n\n public get anchors(): Point[] {\n return this.config.anchors;\n }\n\n public async locateFaces(input: TNetInput, forwardParams: ITinyYolov2Options): Promise {\n const objectDetections = await this.detect(input, forwardParams);\n return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight }));\n }\n\n protected getDefaultModelName(): string {\n return 'tiny_face_detector_model';\n }\n\n protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): { params: TinyYolov2NetParams, paramMappings: ParamMapping[] } {\n return super.extractParamsFromWeightMap(weightMap);\n }\n}\n", "import { AgeGenderNet } from '../ageGenderNet/AgeGenderNet';\nimport { AgeAndGenderPrediction } from '../ageGenderNet/types';\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { FaceLandmarks68 } from '../classes/FaceLandmarks68';\nimport { TNetInput } from '../dom/index';\nimport { FaceExpressionNet } from '../faceExpressionNet/FaceExpressionNet';\nimport { FaceExpressions } from '../faceExpressionNet/FaceExpressions';\nimport { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';\nimport { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';\nimport { FaceRecognitionNet } from '../faceRecognitionNet/FaceRecognitionNet';\nimport { SsdMobilenetv1 } from '../ssdMobilenetv1/SsdMobilenetv1';\nimport { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';\nimport { TinyFaceDetector } from '../tinyFaceDetector/TinyFaceDetector';\nimport { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions';\nimport { ITinyYolov2Options, TinyYolov2 } from '../tinyYolov2/index';\n\nexport const nets = {\n ssdMobilenetv1: new SsdMobilenetv1(),\n tinyFaceDetector: new TinyFaceDetector(),\n tinyYolov2: new TinyYolov2(),\n faceLandmark68Net: new FaceLandmark68Net(),\n faceLandmark68TinyNet: new FaceLandmark68TinyNet(),\n faceRecognitionNet: new FaceRecognitionNet(),\n faceExpressionNet: new FaceExpressionNet(),\n ageGenderNet: new AgeGenderNet(),\n};\n\n/**\n * Attempts to detect all faces in an image using SSD Mobilenetv1 Network.\n *\n * @param input The input image.\n * @param options (optional, default: see SsdMobilenetv1Options constructor for default parameters).\n * @returns Bounding box of each face with score.\n */\nexport const ssdMobilenetv1 = (input: TNetInput, options: SsdMobilenetv1Options): Promise => nets.ssdMobilenetv1.locateFaces(input, options);\n\n/**\n * Attempts to detect all faces in an image using the Tiny Face Detector.\n *\n * @param input The input image.\n * @param options (optional, default: see TinyFaceDetectorOptions constructor for default parameters).\n * @returns Bounding box of each face with score.\n */\nexport const tinyFaceDetector = (input: TNetInput, options: TinyFaceDetectorOptions): Promise => nets.tinyFaceDetector.locateFaces(input, options);\n\n/**\n * Attempts to detect all faces in an image using the Tiny Yolov2 Network.\n *\n * @param input The input image.\n * @param options (optional, default: see TinyYolov2Options constructor for default parameters).\n * @returns Bounding box of each face with score.\n */\nexport const tinyYolov2 = (input: TNetInput, options: ITinyYolov2Options): Promise => nets.tinyYolov2.locateFaces(input, options);\n\n/**\n * Detects the 68 point face landmark positions of the face shown in an image.\n *\n * @param inputs The face image extracted from the bounding box of a face. Can\n * also be an array of input images, which will be batch processed.\n * @returns 68 point face landmarks or array thereof in case of batch input.\n */\nexport const detectFaceLandmarks = (input: TNetInput): Promise => nets.faceLandmark68Net.detectLandmarks(input);\n\n/**\n * Detects the 68 point face landmark positions of the face shown in an image\n * using a tinier version of the 68 point face landmark model, which is slightly\n * faster at inference, but also slightly less accurate.\n *\n * @param inputs The face image extracted from the bounding box of a face. Can\n * also be an array of input images, which will be batch processed.\n * @returns 68 point face landmarks or array thereof in case of batch input.\n */\nexport const detectFaceLandmarksTiny = (input: TNetInput): Promise => nets.faceLandmark68TinyNet.detectLandmarks(input);\n\n/**\n * Computes a 128 entry vector (face descriptor / face embeddings) from the face shown in an image,\n * which uniquely represents the features of that persons face. The computed face descriptor can\n * be used to measure the similarity between faces, by computing the euclidean distance of two\n * face descriptors.\n *\n * @param inputs The face image extracted from the aligned bounding box of a face. Can\n * also be an array of input images, which will be batch processed.\n * @returns Face descriptor with 128 entries or array thereof in case of batch input.\n */\nexport const computeFaceDescriptor = (input: TNetInput): Promise => nets.faceRecognitionNet.computeFaceDescriptor(input);\n\n/**\n * Recognizes the facial expressions from a face image.\n *\n * @param inputs The face image extracted from the bounding box of a face. Can\n * also be an array of input images, which will be batch processed.\n * @returns Facial expressions with corresponding probabilities or array thereof in case of batch input.\n */\nexport const recognizeFaceExpressions = (input: TNetInput): Promise => nets.faceExpressionNet.predictExpressions(input);\n\n/**\n * Predicts age and gender from a face image.\n *\n * @param inputs The face image extracted from the bounding box of a face. Can\n * also be an array of input images, which will be batch processed.\n * @returns Predictions with age, gender and gender probability or array thereof in case of batch input.\n */\nexport const predictAgeAndGender = (input: TNetInput): Promise => nets.ageGenderNet.predictAgeAndGender(input);\n\nexport const loadSsdMobilenetv1Model = (url: string) => nets.ssdMobilenetv1.load(url);\nexport const loadTinyFaceDetectorModel = (url: string) => nets.tinyFaceDetector.load(url);\nexport const loadTinyYolov2Model = (url: string) => nets.tinyYolov2.load(url);\nexport const loadFaceLandmarkModel = (url: string) => nets.faceLandmark68Net.load(url);\nexport const loadFaceLandmarkTinyModel = (url: string) => nets.faceLandmark68TinyNet.load(url);\nexport const loadFaceRecognitionModel = (url: string) => nets.faceRecognitionNet.load(url);\nexport const loadFaceExpressionModel = (url: string) => nets.faceExpressionNet.load(url);\nexport const loadAgeGenderModel = (url: string) => nets.ageGenderNet.load(url);\n\n// backward compatibility\nexport const loadFaceDetectionModel = loadSsdMobilenetv1Model;\nexport const locateFaces = ssdMobilenetv1;\nexport const detectLandmarks = detectFaceLandmarks;\n", "/* eslint-disable max-classes-per-file */\nimport * as tf from '../../dist/tfjs.esm';\n\nimport { TNetInput } from '../dom/index';\nimport { FaceExpressions } from '../faceExpressionNet/FaceExpressions';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { extendWithFaceExpressions, WithFaceExpressions } from '../factories/WithFaceExpressions';\nimport { WithFaceLandmarks } from '../factories/WithFaceLandmarks';\nimport { ComposableTask } from './ComposableTask';\nimport { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';\nimport { extractAllFacesAndComputeResults, extractSingleFaceAndComputeResult } from './extractFacesAndComputeResults';\nimport { nets } from './nets';\nimport { PredictAllAgeAndGenderTask, PredictAllAgeAndGenderWithFaceAlignmentTask, PredictSingleAgeAndGenderTask, PredictSingleAgeAndGenderWithFaceAlignmentTask } from './PredictAgeAndGenderTask';\n\nexport class PredictFaceExpressionsTaskBase extends ComposableTask {\n constructor(\n // eslint-disable-next-line no-unused-vars\n protected parentTask: ComposableTask | Promise,\n // eslint-disable-next-line no-unused-vars\n protected input: TNetInput,\n // eslint-disable-next-line no-unused-vars\n protected extractedFaces?: Array,\n ) {\n super();\n }\n}\n\nexport class PredictAllFaceExpressionsTask> extends PredictFaceExpressionsTaskBase[], TSource[]> {\n public async run(): Promise[]> {\n const parentResults = await this.parentTask;\n\n const faceExpressionsByFace = await extractAllFacesAndComputeResults(\n parentResults,\n this.input,\n async (faces) => Promise.all(\n faces.map((face) => nets.faceExpressionNet.predictExpressions(face) as Promise),\n ),\n this.extractedFaces,\n );\n\n return parentResults.map(\n (parentResult, i) => extendWithFaceExpressions(parentResult, faceExpressionsByFace[i]),\n );\n }\n\n withAgeAndGender() {\n return new PredictAllAgeAndGenderTask(this, this.input);\n }\n}\n\nexport class PredictSingleFaceExpressionsTask> extends PredictFaceExpressionsTaskBase | undefined, TSource | undefined> {\n public async run(): Promise | undefined> {\n const parentResult = await this.parentTask;\n if (!parentResult) {\n return undefined;\n }\n\n const faceExpressions = await extractSingleFaceAndComputeResult(\n parentResult,\n this.input,\n (face) => nets.faceExpressionNet.predictExpressions(face) as Promise,\n this.extractedFaces,\n );\n\n return extendWithFaceExpressions(parentResult, faceExpressions);\n }\n\n withAgeAndGender() {\n return new PredictSingleAgeAndGenderTask(this, this.input);\n }\n}\n\nexport class PredictAllFaceExpressionsWithFaceAlignmentTask>> extends PredictAllFaceExpressionsTask {\n withAgeAndGender() {\n return new PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptors() {\n return new ComputeAllFaceDescriptorsTask(this, this.input);\n }\n}\n\nexport class PredictSingleFaceExpressionsWithFaceAlignmentTask>> extends PredictSingleFaceExpressionsTask {\n withAgeAndGender() {\n return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptor() {\n return new ComputeSingleFaceDescriptorTask(this, this.input);\n }\n}\n", "/* eslint-disable max-classes-per-file */\nimport * as tf from '../../dist/tfjs.esm';\n\nimport { AgeAndGenderPrediction } from '../ageGenderNet/types';\nimport { TNetInput } from '../dom/index';\nimport { extendWithAge, WithAge } from '../factories/WithAge';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { WithFaceLandmarks } from '../factories/WithFaceLandmarks';\nimport { extendWithGender, WithGender } from '../factories/WithGender';\nimport { ComposableTask } from './ComposableTask';\nimport { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';\nimport { extractAllFacesAndComputeResults, extractSingleFaceAndComputeResult } from './extractFacesAndComputeResults';\nimport { nets } from './nets';\nimport { PredictAllFaceExpressionsTask, PredictAllFaceExpressionsWithFaceAlignmentTask, PredictSingleFaceExpressionsTask, PredictSingleFaceExpressionsWithFaceAlignmentTask } from './PredictFaceExpressionsTask';\n\nexport class PredictAgeAndGenderTaskBase extends ComposableTask {\n constructor(\n // eslint-disable-next-line no-unused-vars\n protected parentTask: ComposableTask | Promise,\n // eslint-disable-next-line no-unused-vars\n protected input: TNetInput,\n // eslint-disable-next-line no-unused-vars\n protected extractedFaces?: Array,\n ) {\n super();\n }\n}\n\nexport class PredictAllAgeAndGenderTask> extends PredictAgeAndGenderTaskBase>[], TSource[]> {\n public async run(): Promise>[]> {\n const parentResults = await this.parentTask;\n const ageAndGenderByFace = await extractAllFacesAndComputeResults(\n parentResults,\n this.input,\n async (faces) => Promise.all(faces.map((face) => nets.ageGenderNet.predictAgeAndGender(face) as Promise)),\n this.extractedFaces,\n );\n return parentResults.map((parentResult, i) => {\n const { age, gender, genderProbability } = ageAndGenderByFace[i];\n return extendWithAge(extendWithGender(parentResult, gender, genderProbability), age);\n });\n }\n\n withFaceExpressions() {\n return new PredictAllFaceExpressionsTask(this, this.input);\n }\n}\n\nexport class PredictSingleAgeAndGenderTask> extends PredictAgeAndGenderTaskBase> | undefined, TSource | undefined> {\n public async run(): Promise> | undefined> {\n const parentResult = await this.parentTask;\n if (!parentResult) return undefined;\n const { age, gender, genderProbability } = await extractSingleFaceAndComputeResult(\n parentResult,\n this.input,\n (face) => nets.ageGenderNet.predictAgeAndGender(face) as Promise,\n this.extractedFaces,\n );\n return extendWithAge(extendWithGender(parentResult, gender, genderProbability), age);\n }\n\n withFaceExpressions() {\n return new PredictSingleFaceExpressionsTask(this, this.input);\n }\n}\n\nexport class PredictAllAgeAndGenderWithFaceAlignmentTask>> extends PredictAllAgeAndGenderTask {\n withFaceExpressions() {\n return new PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptors() {\n return new ComputeAllFaceDescriptorsTask(this, this.input);\n }\n}\n\nexport class PredictSingleAgeAndGenderWithFaceAlignmentTask>> extends PredictSingleAgeAndGenderTask {\n withFaceExpressions() {\n return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptor() {\n return new ComputeSingleFaceDescriptorTask(this, this.input);\n }\n}\n", "/* eslint-disable max-classes-per-file */\nimport { TNetInput } from '../dom/index';\nimport { extendWithFaceDescriptor, WithFaceDescriptor } from '../factories/WithFaceDescriptor';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { WithFaceLandmarks } from '../factories/WithFaceLandmarks';\nimport { ComposableTask } from './ComposableTask';\nimport { extractAllFacesAndComputeResults, extractSingleFaceAndComputeResult } from './extractFacesAndComputeResults';\nimport { nets } from './nets';\nimport { PredictAllAgeAndGenderWithFaceAlignmentTask, PredictSingleAgeAndGenderWithFaceAlignmentTask } from './PredictAgeAndGenderTask';\nimport { PredictAllFaceExpressionsWithFaceAlignmentTask, PredictSingleFaceExpressionsWithFaceAlignmentTask } from './PredictFaceExpressionsTask';\n\nexport class ComputeFaceDescriptorsTaskBase extends ComposableTask {\n constructor(\n // eslint-disable-next-line no-unused-vars\n protected parentTask: ComposableTask | Promise,\n // eslint-disable-next-line no-unused-vars\n protected input: TNetInput,\n ) {\n super();\n }\n}\n\nexport class ComputeAllFaceDescriptorsTask>> extends ComputeFaceDescriptorsTaskBase[], TSource[]> {\n public async run(): Promise[]> {\n const parentResults = await this.parentTask;\n const descriptors = await extractAllFacesAndComputeResults(\n parentResults,\n this.input,\n (faces) => Promise.all(faces.map((face) => nets.faceRecognitionNet.computeFaceDescriptor(face) as Promise)),\n null,\n (parentResult) => parentResult.landmarks.align(null, { useDlibAlignment: true }),\n );\n return descriptors.map((descriptor, i) => extendWithFaceDescriptor(parentResults[i], descriptor));\n }\n\n withFaceExpressions() {\n return new PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withAgeAndGender() {\n return new PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n}\n\nexport class ComputeSingleFaceDescriptorTask>> extends ComputeFaceDescriptorsTaskBase | undefined, TSource | undefined> {\n public async run(): Promise | undefined> {\n const parentResult = await this.parentTask;\n if (!parentResult) {\n return undefined;\n }\n const descriptor = await extractSingleFaceAndComputeResult(\n parentResult,\n this.input,\n (face) => nets.faceRecognitionNet.computeFaceDescriptor(face) as Promise,\n null,\n // eslint-disable-next-line no-shadow\n (parentResult) => parentResult.landmarks.align(null, { useDlibAlignment: true }),\n );\n\n return extendWithFaceDescriptor(parentResult, descriptor);\n }\n\n withFaceExpressions() {\n return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withAgeAndGender() {\n return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n}\n", "/* eslint-disable max-classes-per-file */\nimport * as tf from '../../dist/tfjs.esm';\n\nimport { FaceLandmarks68 } from '../classes/FaceLandmarks68';\nimport { extractFaces, extractFaceTensors, TNetInput } from '../dom/index';\nimport { FaceLandmark68Net } from '../faceLandmarkNet/FaceLandmark68Net';\nimport { FaceLandmark68TinyNet } from '../faceLandmarkNet/FaceLandmark68TinyNet';\nimport { WithFaceDetection } from '../factories/WithFaceDetection';\nimport { extendWithFaceLandmarks, WithFaceLandmarks } from '../factories/WithFaceLandmarks';\nimport { ComposableTask } from './ComposableTask';\nimport { ComputeAllFaceDescriptorsTask, ComputeSingleFaceDescriptorTask } from './ComputeFaceDescriptorsTasks';\nimport { nets } from './nets';\nimport { PredictAllAgeAndGenderWithFaceAlignmentTask, PredictSingleAgeAndGenderWithFaceAlignmentTask } from './PredictAgeAndGenderTask';\nimport { PredictAllFaceExpressionsWithFaceAlignmentTask, PredictSingleFaceExpressionsWithFaceAlignmentTask } from './PredictFaceExpressionsTask';\n\nexport class DetectFaceLandmarksTaskBase extends ComposableTask {\n constructor(\n // eslint-disable-next-line no-unused-vars\n protected parentTask: ComposableTask | Promise,\n // eslint-disable-next-line no-unused-vars\n protected input: TNetInput,\n // eslint-disable-next-line no-unused-vars\n protected useTinyLandmarkNet: boolean,\n ) {\n super();\n }\n\n protected get landmarkNet(): FaceLandmark68Net | FaceLandmark68TinyNet {\n return this.useTinyLandmarkNet\n ? nets.faceLandmark68TinyNet\n : nets.faceLandmark68Net;\n }\n}\n\nexport class DetectAllFaceLandmarksTask> extends DetectFaceLandmarksTaskBase[], TSource[]> {\n public async run(): Promise[]> {\n const parentResults = await this.parentTask;\n const detections = parentResults.map((res) => res.detection);\n const faces: Array = this.input instanceof tf.Tensor\n ? await extractFaceTensors(this.input, detections)\n : await extractFaces(this.input, detections);\n const faceLandmarksByFace = await Promise.all(\n faces.map((face) => this.landmarkNet.detectLandmarks(face)),\n ) as FaceLandmarks68[];\n faces.forEach((f) => f instanceof tf.Tensor && f.dispose());\n return parentResults.map((parentResult, i) => extendWithFaceLandmarks(parentResult, faceLandmarksByFace[i]));\n }\n\n withFaceExpressions() {\n return new PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withAgeAndGender() {\n return new PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptors() {\n return new ComputeAllFaceDescriptorsTask(this, this.input);\n }\n}\n\nexport class DetectSingleFaceLandmarksTask> extends DetectFaceLandmarksTaskBase | undefined, TSource | undefined> {\n public async run(): Promise | undefined> {\n const parentResult = await this.parentTask;\n if (!parentResult) {\n return undefined;\n }\n const { detection } = parentResult;\n const faces: Array = this.input instanceof tf.Tensor\n ? await extractFaceTensors(this.input, [detection])\n : await extractFaces(this.input, [detection]);\n const landmarks = await this.landmarkNet.detectLandmarks(faces[0]) as FaceLandmarks68;\n faces.forEach((f) => f instanceof tf.Tensor && f.dispose());\n return extendWithFaceLandmarks(parentResult, landmarks);\n }\n\n withFaceExpressions() {\n return new PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input);\n }\n\n withAgeAndGender() {\n return new PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input);\n }\n\n withFaceDescriptor() {\n return new ComputeSingleFaceDescriptorTask(this, this.input);\n }\n}\n", "/* eslint-disable max-classes-per-file */\nimport { FaceDetection } from '../classes/FaceDetection';\nimport { TNetInput } from '../dom/index';\nimport { extendWithFaceDetection, WithFaceDetection } from '../factories/WithFaceDetection';\nimport { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';\nimport { TinyFaceDetectorOptions } from '../tinyFaceDetector/TinyFaceDetectorOptions';\nimport { TinyYolov2Options } from '../tinyYolov2/index';\nimport { ComposableTask } from './ComposableTask';\nimport { DetectAllFaceLandmarksTask, DetectSingleFaceLandmarksTask } from './DetectFaceLandmarksTasks';\nimport { nets } from './nets';\nimport { PredictAllAgeAndGenderTask, PredictSingleAgeAndGenderTask } from './PredictAgeAndGenderTask';\nimport { PredictAllFaceExpressionsTask, PredictSingleFaceExpressionsTask } from './PredictFaceExpressionsTask';\nimport { FaceDetectionOptions } from './types';\n\nexport class DetectFacesTaskBase extends ComposableTask {\n // eslint-disable-next-line no-unused-vars\n constructor(protected input: TNetInput, protected options: FaceDetectionOptions = new SsdMobilenetv1Options()) {\n super();\n }\n}\n\nexport class DetectAllFacesTask extends DetectFacesTaskBase {\n public async run(): Promise {\n const { input, options } = this;\n let result;\n if (options instanceof TinyFaceDetectorOptions) result = nets.tinyFaceDetector.locateFaces(input, options);\n else if (options instanceof SsdMobilenetv1Options) result = nets.ssdMobilenetv1.locateFaces(input, options);\n else if (options instanceof TinyYolov2Options) result = nets.tinyYolov2.locateFaces(input, options);\n else throw new Error('detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | TinyYolov2Options');\n return result;\n }\n\n private runAndExtendWithFaceDetections(): Promise[]> {\n return new Promise[]>((resolve, reject) => {\n this.run()\n .then((detections) => resolve(detections.map((detection) => extendWithFaceDetection({}, detection))))\n .catch((err) => reject(err));\n });\n }\n\n withFaceLandmarks(useTinyLandmarkNet: boolean = false) {\n return new DetectAllFaceLandmarksTask(\n this.runAndExtendWithFaceDetections(),\n this.input,\n useTinyLandmarkNet,\n );\n }\n\n withFaceExpressions() {\n return new PredictAllFaceExpressionsTask(\n this.runAndExtendWithFaceDetections(),\n this.input,\n );\n }\n\n withAgeAndGender() {\n return new PredictAllAgeAndGenderTask(\n this.runAndExtendWithFaceDetections(),\n this.input,\n );\n }\n}\n\nexport class DetectSingleFaceTask extends DetectFacesTaskBase {\n public async run(): Promise {\n const faceDetections = await new DetectAllFacesTask(this.input, this.options);\n let faceDetectionWithHighestScore = faceDetections[0];\n faceDetections.forEach((faceDetection) => {\n if (faceDetection.score > faceDetectionWithHighestScore.score) faceDetectionWithHighestScore = faceDetection;\n });\n return faceDetectionWithHighestScore;\n }\n\n private runAndExtendWithFaceDetection(): Promise | undefined> {\n // eslint-disable-next-line no-async-promise-executor\n return new Promise | undefined>(async (resolve) => {\n const detection = await this.run();\n resolve(detection ? extendWithFaceDetection<{}>({}, detection) : undefined);\n });\n }\n\n withFaceLandmarks(useTinyLandmarkNet: boolean = false) {\n return new DetectSingleFaceLandmarksTask(\n this.runAndExtendWithFaceDetection(),\n this.input,\n useTinyLandmarkNet,\n );\n }\n\n withFaceExpressions() {\n return new PredictSingleFaceExpressionsTask(\n this.runAndExtendWithFaceDetection(),\n this.input,\n );\n }\n\n withAgeAndGender() {\n return new PredictSingleAgeAndGenderTask(\n this.runAndExtendWithFaceDetection(),\n this.input,\n );\n }\n}\n", "import { TNetInput } from '../dom/index';\nimport { SsdMobilenetv1Options } from '../ssdMobilenetv1/SsdMobilenetv1Options';\nimport { DetectAllFacesTask, DetectSingleFaceTask } from './DetectFacesTasks';\nimport { FaceDetectionOptions } from './types';\n\nexport function detectSingleFace(input: TNetInput, options: FaceDetectionOptions = new SsdMobilenetv1Options()): DetectSingleFaceTask {\n return new DetectSingleFaceTask(input, options);\n}\n\nexport function detectAllFaces(input: TNetInput, options: FaceDetectionOptions = new SsdMobilenetv1Options()): DetectAllFacesTask {\n return new DetectAllFacesTask(input, options);\n}\n", "import { TNetInput } from '../dom/index';\nimport { WithFaceDescriptor, WithFaceDetection, WithFaceLandmarks } from '../factories/index';\nimport { SsdMobilenetv1Options } from '../ssdMobilenetv1/index';\nimport { ITinyYolov2Options, TinyYolov2Options } from '../tinyYolov2/index';\nimport { detectAllFaces } from './detectFaces';\n\nexport async function allFacesSsdMobilenetv1(input: TNetInput, minConfidence?: number): Promise>>[]> {\n return detectAllFaces(input, new SsdMobilenetv1Options(minConfidence ? { minConfidence } : {}))\n .withFaceLandmarks()\n .withFaceDescriptors();\n}\n\nexport async function allFacesTinyYolov2(input: TNetInput, forwardParams: ITinyYolov2Options = {}): Promise>>[]> {\n return detectAllFaces(input, new TinyYolov2Options(forwardParams))\n .withFaceLandmarks()\n .withFaceDescriptors();\n}\n\nexport const allFaces = allFacesSsdMobilenetv1;\n", "export function euclideanDistance(arr1: number[] | Float32Array, arr2: number[] | Float32Array) {\n if (arr1.length !== arr2.length) throw new Error('euclideanDistance: arr1.length !== arr2.length');\n\n const desc1 = Array.from(arr1);\n const desc2 = Array.from(arr2);\n\n return Math.sqrt(\n desc1\n .map((val, i) => val - desc2[i])\n .reduce((res, diff) => res + (diff ** 2), 0),\n );\n}\n", "import { FaceMatch } from '../classes/FaceMatch';\nimport { LabeledFaceDescriptors } from '../classes/LabeledFaceDescriptors';\nimport { euclideanDistance } from '../euclideanDistance';\nimport { WithFaceDescriptor } from '../factories/index';\n\nexport class FaceMatcher {\n private _labeledDescriptors: LabeledFaceDescriptors[]\n\n private _distanceThreshold: number\n\n constructor(\n inputs: LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>,\n distanceThreshold: number = 0.6,\n ) {\n this._distanceThreshold = distanceThreshold;\n\n const inputArray = Array.isArray(inputs) ? inputs : [inputs];\n\n if (!inputArray.length) {\n throw new Error('FaceRecognizer.constructor - expected atleast one input');\n }\n\n let count = 1;\n const createUniqueLabel = () => `person ${count++}`;\n\n this._labeledDescriptors = inputArray.map((desc) => {\n if (desc instanceof LabeledFaceDescriptors) {\n return desc;\n }\n\n if (desc instanceof Float32Array) {\n return new LabeledFaceDescriptors(createUniqueLabel(), [desc]);\n }\n\n if (desc.descriptor && desc.descriptor instanceof Float32Array) {\n return new LabeledFaceDescriptors(createUniqueLabel(), [desc.descriptor]);\n }\n\n throw new Error('FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>');\n });\n }\n\n public get labeledDescriptors(): LabeledFaceDescriptors[] { return this._labeledDescriptors; }\n\n public get distanceThreshold(): number { return this._distanceThreshold; }\n\n public computeMeanDistance(queryDescriptor: Float32Array, descriptors: Float32Array[]): number {\n return descriptors\n .map((d) => euclideanDistance(d, queryDescriptor))\n .reduce((d1, d2) => d1 + d2, 0)\n / (descriptors.length || 1);\n }\n\n public matchDescriptor(queryDescriptor: Float32Array): FaceMatch {\n return this.labeledDescriptors\n .map(({ descriptors, label }) => new FaceMatch(\n label,\n this.computeMeanDistance(queryDescriptor, descriptors),\n ))\n .reduce((best, curr) => (best.distance < curr.distance ? best : curr));\n }\n\n public findBestMatch(queryDescriptor: Float32Array): FaceMatch {\n const bestMatch = this.matchDescriptor(queryDescriptor);\n return bestMatch.distance < this.distanceThreshold\n ? bestMatch\n : new FaceMatch('unknown', bestMatch.distance);\n }\n\n public toJSON(): any {\n return {\n distanceThreshold: this.distanceThreshold,\n labeledDescriptors: this.labeledDescriptors.map((ld) => ld.toJSON()),\n };\n }\n\n public static fromJSON(json: any): FaceMatcher {\n const labeledDescriptors = json.labeledDescriptors\n .map((ld: any) => LabeledFaceDescriptors.fromJSON(ld));\n return new FaceMatcher(labeledDescriptors, json.distanceThreshold);\n }\n}\n", "import { TinyFaceDetector } from './TinyFaceDetector';\n\nexport * from './TinyFaceDetector';\nexport * from './TinyFaceDetectorOptions';\n\nexport function createTinyFaceDetector(weights: Float32Array) {\n const net = new TinyFaceDetector();\n net.extractWeights(weights);\n return net;\n}\n", "import { Dimensions, IDimensions } from './classes/index';\nimport { FaceDetection } from './classes/FaceDetection';\nimport { FaceLandmarks } from './classes/FaceLandmarks';\nimport { extendWithFaceDetection, isWithFaceDetection } from './factories/WithFaceDetection';\nimport { extendWithFaceLandmarks, isWithFaceLandmarks } from './factories/WithFaceLandmarks';\n\nexport function resizeResults(results: T, dimensions: IDimensions): T {\n const { width, height } = new Dimensions(dimensions.width, dimensions.height);\n\n if (width <= 0 || height <= 0) {\n throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({ width, height })}`);\n }\n\n if (Array.isArray(results)) {\n // return results.map(obj => resizeResults(obj, { width, height })) as any as T\n return (results as Array).map((obj) => resizeResults(obj, { width, height } as IDimensions)) as any as T;\n }\n\n if (isWithFaceLandmarks(results)) {\n const resizedDetection = results.detection.forSize(width, height);\n const resizedLandmarks = results.unshiftedLandmarks.forSize(resizedDetection.box.width, resizedDetection.box.height);\n return extendWithFaceLandmarks(extendWithFaceDetection(results, resizedDetection), resizedLandmarks);\n }\n\n if (isWithFaceDetection(results)) {\n return extendWithFaceDetection(results, results.detection.forSize(width, height));\n }\n\n if (results instanceof FaceLandmarks || results instanceof FaceDetection) {\n return (results as any).forSize(width, height);\n }\n\n return results;\n}\n", "import * as tf from '../dist/tfjs.esm';\nimport * as draw from './draw/index';\nimport * as utils from './utils/index';\nimport * as pkg from '../package.json';\n\nexport { tf, draw, utils };\n\nexport * from './ageGenderNet/index';\nexport * from './classes/index';\nexport * from './dom/index';\nexport * from './env/index';\nexport * from './faceExpressionNet/index';\nexport * from './faceLandmarkNet/index';\nexport * from './faceRecognitionNet/index';\nexport * from './factories/index';\nexport * from './globalApi/index';\nexport * from './ops/index';\nexport * from './ssdMobilenetv1/index';\nexport * from './tinyFaceDetector/index';\nexport * from './tinyYolov2/index';\nexport * from './euclideanDistance';\nexport * from './NeuralNetwork';\nexport * from './resizeResults';\n\nconst node = (typeof process !== 'undefined');\nconst browser = (typeof navigator !== 'undefined') && (typeof navigator.userAgent !== 'undefined');\nexport const version = { faceapi: pkg.version as string, node, browser };\n"], + "mappings": ";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAKA;AACA;AADA;AACA;;;ACNA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACEO,qBACL,KACA,QACA,WAAoB,OACpB;AACA,MAAI;AAEJ,SAAO,MAAM,GAAG,QAAQ,CAAC,EAAE,GAAG,KAAK,YAAY;AAC7C,UAAM,OAAO,OAAO;AACpB,QAAI,OAAO,KAAK,GAAG,KAAK;AACxB,QAAI,OAAO,GAAG;AAAA;AAGhB,MAAI,UAAU;AACZ,UAAM,OAAO,OAAO,OAAO,SAAS;AACpC,UAAM,KAAK,OAAO;AAClB,QAAI,CAAC,QAAQ,CAAC,IAAI;AAChB;AAAA;AAGF,QAAI,OAAO,KAAK,GAAG,KAAK;AACxB,QAAI,OAAO,GAAG,GAAG,GAAG;AAAA;AAGtB,MAAI;AAAA;;;AC1BN;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACOO,uBAAwC;AAAA,EAK7C,YAAY,OAAe,QAAgB;AACzC,QAAI,CAAC,cAAc,UAAU,CAAC,cAAc,SAAS;AACnD,YAAM,IAAI,MAAM,wFAAwF,KAAK,UAAU,EAAE,OAAO;AAAA;AAGlI,SAAK,SAAS;AACd,SAAK,UAAU;AAAA;AAAA,MAGN,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,SAAiB;AAAE,WAAO,KAAK;AAAA;AAAA,EAEnC,UAAsB;AAC3B,WAAO,IAAI,WAAW,IAAI,KAAK,OAAO,IAAI,KAAK;AAAA;AAAA;;;ADrB5C,kBAAkB,SAAa,KAAa;AACjD,SAAO,mBAAqB,2BAAU,QAAO,MAAM,WAAW;AAAA;AAGzD,oBAAoB,SAAoC;AAC7D,SAAO,SAAS,SAAQ;AAAA;AAGnB,oBAAoB,SAAoC;AAC7D,SAAO,SAAS,SAAQ;AAAA;AAGnB,oBAAoB,SAAoC;AAC7D,SAAO,SAAS,SAAQ;AAAA;AAGnB,oBAAoB,SAAoC;AAC7D,SAAO,SAAS,SAAQ;AAAA;AAGnB,iBAAiB,KAAa;AACnC,SAAO,MAAM,MAAM;AAAA;AAGd,gBAAgB,KAAa;AAClC,SAAO,MAAM,MAAM;AAAA;AAGd,eAAe,KAAa,OAAe,GAAG;AACnD,QAAM,IAAI,MAAM;AAChB,SAAO,KAAK,MAAM,MAAM,KAAK;AAAA;AAGxB,sBAAsB,KAAmB;AAC9C,SAAO,OAAO,IAAI,SAAS,IAAI;AAAA;AAG1B,mCAAmC,EAAE,OAAO,UAAuB,WAAmB;AAC3F,QAAM,SAAQ,YAAY,KAAK,IAAI,QAAQ;AAC3C,SAAO,IAAI,WAAW,KAAK,MAAM,QAAQ,SAAQ,KAAK,MAAM,SAAS;AAAA;AAGhE,wBAAwB,KAAqB;AAClD,SAAO,IAAI,OAAO,CAAC,KAAK,OAAO,IAAI,IAAI,KAAK,IAAI,MAAM,GAAG,IACtD,IAAI,IAAI,MAAM,IAAI,QAAQ,IAAI;AAAA;AAG5B,eAAe,KAAa,OAAe,MAAwB;AACxE,SAAO,MAAM,KAAK,KAAK,GAAG,IAAI,CAAC,GAAG,MAAM,QAAS,IAAI;AAAA;AAGhD,uBAAuB,KAAU;AACtC,SAAO,CAAC,CAAC,OAAQ,QAAQ,YAAc,QAAQ,aAAc,CAAC,OAAO,MAAM,QAAQ,QAAQ;AAAA;AAGtF,4BAA4B,KAAU;AAC3C,SAAO,cAAc,QAAQ,OAAO,KAAK,OAAO;AAAA;;;AExD3C,kBAA8B;AAAA,EAKnC,YAAY,GAAW,GAAW;AAChC,SAAK,KAAK;AACV,SAAK,KAAK;AAAA;AAAA,MAGR,IAAY;AAAE,WAAO,KAAK;AAAA;AAAA,MAE1B,IAAY;AAAE,WAAO,KAAK;AAAA;AAAA,EAEvB,IAAI,IAAmB;AAC5B,WAAO,IAAI,MAAM,KAAK,IAAI,GAAG,GAAG,KAAK,IAAI,GAAG;AAAA;AAAA,EAGvC,IAAI,IAAmB;AAC5B,WAAO,IAAI,MAAM,KAAK,IAAI,GAAG,GAAG,KAAK,IAAI,GAAG;AAAA;AAAA,EAGvC,IAAI,IAAmB;AAC5B,WAAO,IAAI,MAAM,KAAK,IAAI,GAAG,GAAG,KAAK,IAAI,GAAG;AAAA;AAAA,EAGvC,IAAI,IAAmB;AAC5B,WAAO,IAAI,MAAM,KAAK,IAAI,GAAG,GAAG,KAAK,IAAI,GAAG;AAAA;AAAA,EAGvC,MAAa;AAClB,WAAO,IAAI,MAAM,KAAK,IAAI,KAAK,IAAI,KAAK,IAAI,KAAK;AAAA;AAAA,EAG5C,YAAoB;AACzB,WAAO,KAAK,KAAM,KAAK,KAAK,IAAM,KAAK,KAAK;AAAA;AAAA,EAGvC,QAAe;AACpB,WAAO,IAAI,MAAM,KAAK,MAAM,KAAK,IAAI,KAAK,MAAM,KAAK;AAAA;AAAA;;;ACtClD,gBAAwD;AAAA,SAC/C,OAAO,MAAoB;AACvC,WAAO,CAAC,CAAC,QAAQ,CAAC,KAAK,GAAG,KAAK,GAAG,KAAK,OAAO,KAAK,QAAQ,MAAM;AAAA;AAAA,SAGrD,iBAAiB,KAAU,QAAgB,0BAAmC,OAAO;AACjG,QAAI,CAAC,IAAI,OAAO,MAAM;AACpB,YAAM,IAAI,MAAM,GAAG,yBAAyB,KAAK,UAAU;AAAA;AAG7D,QAAI,CAAC,2BAA4B,KAAI,QAAQ,KAAK,IAAI,SAAS,IAAI;AACjE,YAAM,IAAI,MAAM,GAAG,mBAAmB,IAAI,sBAAsB,IAAI;AAAA;AAAA;AAAA,EAYxE,YAAY,MAA4B,0BAAmC,MAAM;AAC/E,UAAM,MAAO,QAAQ;AAErB,UAAM,SAAS,CAAC,IAAI,MAAM,IAAI,KAAK,IAAI,OAAO,IAAI,QAAQ,MAAM;AAChE,UAAM,SAAS,CAAC,IAAI,GAAG,IAAI,GAAG,IAAI,OAAO,IAAI,QAAQ,MAAM;AAE3D,QAAI,CAAC,UAAU,CAAC,QAAQ;AACtB,YAAM,IAAI,MAAM,2EAA2E,KAAK,UAAU;AAAA;AAG5G,UAAM,CAAC,GAAG,GAAG,OAAO,UAAU,SAC1B,CAAC,IAAI,GAAG,IAAI,GAAG,IAAI,OAAO,IAAI,UAC9B,CAAC,IAAI,MAAM,IAAI,KAAK,IAAI,QAAQ,IAAI,MAAM,IAAI,SAAS,IAAI;AAE/D,QAAI,iBAAiB;AAAA,MACnB;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA,OACZ,mBAAmB;AAEtB,SAAK,KAAK;AACV,SAAK,KAAK;AACV,SAAK,SAAS;AACd,SAAK,UAAU;AAAA;AAAA,MAGN,IAAY;AAAE,WAAO,KAAK;AAAA;AAAA,MAE1B,IAAY;AAAE,WAAO,KAAK;AAAA;AAAA,MAE1B,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,SAAiB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE/B,OAAe;AAAE,WAAO,KAAK;AAAA;AAAA,MAE7B,MAAc;AAAE,WAAO,KAAK;AAAA;AAAA,MAE5B,QAAgB;AAAE,WAAO,KAAK,IAAI,KAAK;AAAA;AAAA,MAEvC,SAAiB;AAAE,WAAO,KAAK,IAAI,KAAK;AAAA;AAAA,MAExC,OAAe;AAAE,WAAO,KAAK,QAAQ,KAAK;AAAA;AAAA,MAE1C,UAAiB;AAAE,WAAO,IAAI,MAAM,KAAK,MAAM,KAAK;AAAA;AAAA,MAEpD,WAAkB;AAAE,WAAO,IAAI,MAAM,KAAK,OAAO,KAAK;AAAA;AAAA,MAEtD,aAAoB;AAAE,WAAO,IAAI,MAAM,KAAK,MAAM,KAAK;AAAA;AAAA,MAEvD,cAAqB;AAAE,WAAO,IAAI,MAAM,KAAK,OAAO,KAAK;AAAA;AAAA,EAE7D,QAAsB;AAC3B,UAAM,CAAC,GAAG,GAAG,OAAO,UAAU,CAAC,KAAK,GAAG,KAAK,GAAG,KAAK,OAAO,KAAK,QAC7D,IAAI,CAAC,QAAQ,KAAK,MAAM;AAC3B,WAAO,IAAI,IAAI;AAAA,MACb;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA;AAAA;AAAA,EAIV,QAAsB;AAC3B,UAAM,CAAC,GAAG,GAAG,OAAO,UAAU,CAAC,KAAK,GAAG,KAAK,GAAG,KAAK,OAAO,KAAK,QAC7D,IAAI,CAAC,QAAQ,KAAK,MAAM;AAC3B,WAAO,IAAI,IAAI;AAAA,MACb;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA;AAAA;AAAA,EAIV,WAAyB;AAC9B,QAAI;AAAA,MACF;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA,QACX;AACJ,UAAM,OAAO,KAAK,IAAI,QAAQ;AAC9B,QAAI,QAAQ,QAAQ;AAClB,WAAM,OAAO;AACb,eAAS;AAAA;AAEX,QAAI,SAAS,OAAO;AAClB,WAAM,OAAO;AACb,gBAAU;AAAA;AAGZ,WAAO,IAAI,IAAI,EAAE,GAAG,GAAG,OAAO;AAAA;AAAA,EAGzB,QAAQ,GAAuC;AACpD,UAAM,SAAS,aAAa,KAAM,EAAkB,QAAQ;AAC5D,UAAM,SAAS,aAAa,KAAM,EAAkB,SAAS;AAC7D,WAAO,IAAI,IAAI;AAAA,MACb,GAAG,KAAK,IAAI;AAAA,MACZ,GAAG,KAAK,IAAI;AAAA,MACZ,OAAO,KAAK,QAAQ;AAAA,MACpB,QAAQ,KAAK,SAAS;AAAA;AAAA;AAAA,EAInB,IAAI,MAAc,MAA4B;AACnD,UAAM,CAAC,GAAG,GAAG,OAAO,UAAU;AAAA,MAC5B,KAAK,IAAK,OAAO;AAAA,MACjB,KAAK,IAAK,OAAO;AAAA,MACjB,KAAK,QAAQ;AAAA,MACb,KAAK,SAAS;AAAA;AAEhB,WAAO,IAAI,IAAI;AAAA,MACb;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA;AAAA;AAAA,EAIV,mBAAmB,UAAkB,WAAiC;AAC3E,UAAM,EAAE,GAAG,GAAG,OAAO,WAAW;AAChC,UAAM,WAAW,KAAK,IAAI,GAAG;AAC7B,UAAM,WAAW,KAAK,IAAI,GAAG;AAE7B,UAAM,WAAW,QAAQ;AACzB,UAAM,YAAY,SAAS;AAC3B,UAAM,eAAe,KAAK,IAAI,UAAU,WAAW;AACnD,UAAM,gBAAgB,KAAK,IAAI,WAAW,YAAY;AAEtD,WAAQ,IAAI,IAAI;AAAA,MACd,GAAG;AAAA,MAAU,GAAG;AAAA,MAAU,OAAO;AAAA,MAAc,QAAQ;AAAA,OACrD;AAAA;AAAA,EAGC,MAAM,IAAY,IAA0B;AACjD,UAAM,EAAE,OAAO,WAAW;AAC1B,UAAM,IAAI,KAAK,IAAI;AACnB,UAAM,IAAI,KAAK,IAAI;AAEnB,WAAO,IAAI,IAAI;AAAA,MACb;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA;AAAA;AAAA,EAIV,aAAa,aAAqB,YAAoB;AAC3D,UAAM,IAAI,KAAK,QAAQ;AACvB,UAAM,IAAI,KAAK,SAAS;AAExB,UAAM,KAAK;AACX,UAAM,KAAK;AACX,QAAI,MAAM;AACV,QAAI,MAAM;AAEV,QAAI,IAAI,KAAK;AACb,QAAI,IAAI,KAAK;AACb,QAAI,KAAK,KAAK;AACd,QAAI,KAAK,KAAK;AAEd,QAAI,KAAK,YAAY;AACnB,YAAM,CAAC,KAAK,aAAa;AACzB,WAAK;AAAA;AAEP,QAAI,KAAK,aAAa;AACpB,YAAM,CAAC,KAAK,cAAc;AAC1B,WAAK;AAAA;AAEP,QAAI,IAAI,GAAG;AACT,YAAM,IAAI;AACV,UAAI;AAAA;AAEN,QAAI,IAAI,GAAG;AACT,YAAM,IAAI;AACV,UAAI;AAAA;AAGN,WAAO;AAAA,MACL;AAAA,MAAI;AAAA,MAAK;AAAA,MAAI;AAAA,MAAK;AAAA,MAAG;AAAA,MAAI;AAAA,MAAG;AAAA,MAAI;AAAA,MAAG;AAAA;AAAA;AAAA,EAIhC,UAAU,QAAa;AAC5B,WAAO,IAAI,IAAI;AAAA,MACb,MAAM,KAAK,OAAQ,OAAO,OAAO,KAAK;AAAA,MACtC,KAAK,KAAK,MAAO,OAAO,MAAM,KAAK;AAAA,MACnC,OAAO,KAAK,QAAS,OAAO,QAAQ,KAAK;AAAA,MACzC,QAAQ,KAAK,SAAU,OAAO,SAAS,KAAK;AAAA,OAC3C,WAAW;AAAA;AAAA;;;ACjMX,gCAA0B,IAAyC;AAAA,EACxE,YAAY,MAAc,KAAa,OAAe,QAAgB,0BAAmC,OAAO;AAC9G,UAAM;AAAA,MACJ;AAAA,MAAM;AAAA,MAAK;AAAA,MAAO;AAAA,OACjB;AAAA;AAAA;;;ACTA,4BAAsB;AAAA,EAW3B,YACE,OACA,YACA,WACA,aACA,WACA;AACA,SAAK,aAAa,IAAI,WAAW,UAAU,OAAO,UAAU;AAC5D,SAAK,SAAS;AACd,SAAK,cAAc;AACnB,SAAK,aAAa;AAClB,SAAK,OAAO,IAAI,IAAI,aAAa,QAAQ,KAAK;AAAA;AAAA,MAGrC,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,aAAqB;AAAE,WAAO,KAAK;AAAA;AAAA,MAEnC,YAAoB;AAAE,WAAO,KAAK;AAAA;AAAA,MAElC,MAAW;AAAE,WAAO,KAAK;AAAA;AAAA,MAEzB,YAAwB;AAAE,WAAO,KAAK;AAAA;AAAA,MAEtC,aAAqB;AAAE,WAAO,KAAK,UAAU;AAAA;AAAA,MAE7C,cAAsB;AAAE,WAAO,KAAK,UAAU;AAAA;AAAA,MAE9C,cAAmB;AAAE,WAAO,IAAI,IAAI,KAAK,MAAM,QAAQ,KAAK,UAAU;AAAA;AAAA,EAE1E,QAAQ,OAAe,QAAiC;AAC7D,WAAO,IAAI,gBACT,KAAK,OACL,KAAK,YACL,KAAK,WACL,KAAK,aACL,EAAE,OAAO;AAAA;AAAA;;;ACzCR,kCAA4B,gBAAyC;AAAA,EAC1E,YACE,OACA,aACA,WACA;AACA,UAAM,OAAO,OAAO,IAAI,aAAa;AAAA;AAAA,EAGhC,QAAQ,OAAe,QAA+B;AAC3D,UAAM,EAAE,OAAO,aAAa,cAAc,MAAM,QAAQ,OAAO;AAC/D,WAAO,IAAI,cAAc,OAAO,aAAa;AAAA;AAAA;;;ACnB1C,aAAa,MAAW,MAAW,QAAiB,MAAM;AAC/D,QAAM,QAAQ,KAAK,IAAI,GAAK,KAAK,IAAI,KAAK,OAAO,KAAK,SAAS,KAAK,IAAI,KAAK,MAAM,KAAK;AACxF,QAAM,SAAS,KAAK,IAAI,GAAK,KAAK,IAAI,KAAK,QAAQ,KAAK,UAAU,KAAK,IAAI,KAAK,KAAK,KAAK;AAC1F,QAAM,eAAe,QAAQ;AAE7B,SAAO,QACH,eAAgB,MAAK,OAAO,KAAK,OAAO,gBACxC,eAAe,KAAK,IAAI,KAAK,MAAM,KAAK;AAAA;;;ACPvC,iBAAiB,KAA4B;AAClD,QAAM,KAAK,IAAI,IAAI,CAAC,OAAO,GAAG;AAC9B,QAAM,KAAK,IAAI,IAAI,CAAC,OAAO,GAAG;AAC9B,QAAM,OAAO,GAAG,OAAO,CAAC,KAAK,MAAO,IAAI,MAAM,IAAI,KAAM;AACxD,QAAM,OAAO,GAAG,OAAO,CAAC,KAAK,MAAO,IAAI,MAAM,IAAI,KAAM;AACxD,QAAM,OAAO,GAAG,OAAO,CAAC,KAAK,MAAO,MAAM,IAAI,IAAI,KAAM;AACxD,QAAM,OAAO,GAAG,OAAO,CAAC,KAAK,MAAO,MAAM,IAAI,IAAI,KAAM;AAExD,SAAO,IAAI,YAAY,MAAM,MAAM,MAAM;AAAA;;;ACPpC,2BACL,OACA,QACA,cACA,QAAiB,MACP;AACV,MAAI,uBAAuB,OACxB,IAAI,CAAC,OAAO,aAAc,GAAE,OAAO,aACnC,KAAK,CAAC,IAAI,OAAO,GAAG,QAAQ,GAAG,OAC/B,IAAI,CAAC,MAAM,EAAE;AAEhB,QAAM,OAAiB;AAEvB,SAAO,qBAAqB,SAAS,GAAG;AACtC,UAAM,OAAO,qBAAqB;AAClC,SAAK,KAAK;AAEV,UAAM,UAAU;AAEhB,UAAM,UAAoB;AAC1B,aAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACvC,YAAM,MAAM,QAAQ;AAEpB,YAAM,UAAU,MAAM;AACtB,YAAM,SAAS,MAAM;AAErB,cAAQ,KAAK,IAAI,SAAS,QAAQ;AAAA;AAGpC,2BAAuB,qBAAqB,OAC1C,CAAC,GAAG,MAAM,QAAQ,MAAM;AAAA;AAI5B,SAAO;AAAA;;;ACnCF,mBAAmB,GAAgB,SAAgC;AACxE,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,CAAC,GAAG,GAAG,KAAK;AAClB,UAAM,QAAQ,AAAG,sBAAK,CAAC,GAAG,EAAE,MAAM,MAAM,GAAG,IAAI,IAAI,GAAG;AACtD,UAAM,QAAQ,AAAG,sBAAK,CAAC,GAAG,EAAE,MAAM,MAAM,GAAG,IAAI,IAAI,GAAG;AACtD,UAAM,QAAQ,AAAG,sBAAK,CAAC,GAAG,EAAE,MAAM,MAAM,GAAG,IAAI,IAAI,GAAG;AACtD,UAAM,UAAU,AAAG,wBAAO,CAAC,OAAO,OAAO,QAAQ;AAEjD,WAAO,AAAG,qBAAI,GAAG;AAAA;AAAA;;;ACAd,qBACL,WACA,gBAAyB,OACZ;AACb,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,CAAC,QAAQ,SAAS,UAAU,MAAM,MAAM;AAC9C,QAAI,WAAW,OAAO;AACpB,aAAO;AAAA;AAGT,UAAM,UAAU,KAAK,IAAI,SAAS;AAClC,UAAM,gBAAgB,KAAK,MAAM,UAAW,iBAAgB,MAAM;AAClE,UAAM,cAAc,SAAS,QAAQ,IAAI;AAEzC,UAAM,sBAAsB,CAAC,uBAA0C;AACrE,YAAM,qBAAqB,UAAU,MAAM;AAC3C,yBAAmB,eAAe;AAClC,aAAO,AAAG,sBAAK,oBAAoB,GAAG;AAAA;AAGxC,UAAM,sBAAsB,oBAAoB;AAChD,UAAM,yBAAyB,UAAW,oBAAoB,MAAM;AAEpE,UAAM,uBAAuB,iBAAiB,yBAC1C,oBAAoB,0BACpB;AAEJ,UAAM,iBAAiB;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,MAEC,OAAO,CAAC,MAAM,CAAC,CAAC,GAChB,IAAI,CAAC,MAAiB,AAAG,sBAAK,GAAG;AACpC,WAAO,AAAG,wBAAO,gBAAgB;AAAA;AAAA;;;AC5C9B,sBAAsB,YAAmB;AAC9C,QAAM,QAAQ,WAAW;AACzB,WAAS,IAAI,MAAM,SAAS,GAAG,IAAI,GAAG,KAAK;AACzC,UAAM,IAAI,KAAK,MAAM,KAAK,WAAY,KAAI;AAC1C,UAAM,IAAI,MAAM;AAChB,UAAM,KAAK,MAAM;AACjB,UAAM,KAAK;AAAA;AAEb,SAAO;AAAA;;;ACDF,iBAAiB,GAAW;AACjC,SAAO,IAAK,KAAI,KAAK,IAAI,CAAC;AAAA;AAGrB,wBAAwB,GAAW;AACxC,SAAO,KAAK,IAAI,IAAK,KAAI;AAAA;;;ACHpB,yBAAmB,IAA2B;AAAA,EACnD,YAAY,GAAW,GAAW,OAAe,QAAgB,0BAAmC,OAAO;AACzG,UAAM;AAAA,MACJ;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA,OACZ;AAAA;AAAA;;;ACHP,IAAM,OAAO;AACb,IAAM,OAAO;AACb,IAAM,WAAW;AAOV,0BAA8C;AAAA,EAOnD,YACE,+BACA,SACA,QAAe,IAAI,MAAM,GAAG,IAC5B;AACA,UAAM,EAAE,OAAO,WAAW;AAC1B,SAAK,WAAW,IAAI,WAAW,OAAO;AACtC,SAAK,SAAS;AACd,SAAK,aAAa,8BAA8B,IAC9C,CAAC,OAAO,GAAG,IAAI,IAAI,MAAM,OAAO,SAAS,IAAI;AAAA;AAAA,MAItC,QAAe;AAAE,WAAO,IAAI,MAAM,KAAK,OAAO,GAAG,KAAK,OAAO;AAAA;AAAA,MAE7D,aAAqB;AAAE,WAAO,KAAK,SAAS;AAAA;AAAA,MAE5C,cAAsB;AAAE,WAAO,KAAK,SAAS;AAAA;AAAA,MAE7C,YAAqB;AAAE,WAAO,KAAK;AAAA;AAAA,MAEnC,oBAA6B;AACtC,WAAO,KAAK,WAAW,IACrB,CAAC,OAAO,GAAG,IAAI,KAAK,QAAQ,IAAI,IAAI,MAAM,KAAK,YAAY,KAAK;AAAA;AAAA,EAI7D,QAAiC,OAAe,QAAmB;AACxE,WAAO,IAAK,KAAK,YACf,KAAK,mBACL,EAAE,OAAO;AAAA;AAAA,EAIN,QAAiC,GAAW,GAAc;AAC/D,WAAO,IAAK,KAAK,YACf,KAAK,mBACL,KAAK,UACL,IAAI,MAAM,GAAG;AAAA;AAAA,EAIV,aAAsC,IAAc;AACzD,WAAO,KAAK,QAAQ,GAAG,GAAG,GAAG;AAAA;AAAA,EAcxB,MACL,WACA,UAAkE,IAC7D;AACL,QAAI,WAAW;AACb,YAAM,MAAM,qBAAqB,gBAC7B,UAAU,IAAI,UACd,IAAI,IAAI;AAEZ,aAAO,KAAK,QAAQ,IAAI,GAAG,IAAI,GAAG,MAAM,MAAM;AAAA;AAGhD,UAAM,EAAE,kBAAkB,kBAAkB,EAAE,kBAAkB,OAAO,eAAe,QAAQ;AAE9F,QAAI,kBAAkB;AACpB,aAAO,KAAK;AAAA;AAGd,WAAO,KAAK,aAAa;AAAA;AAAA,EAGnB,YAAiB;AACvB,UAAM,UAAU,KAAK;AAErB,UAAM,CAAC,eAAe,gBAAgB,eAAe;AACrD,UAAM,cAAc,CAAC,OAAc,YAAY,IAAI,IAAI;AACvD,UAAM,iBAAkB,aAAY,iBAAiB,YAAY,mBAAmB;AAEpF,UAAM,OAAO,KAAK,MAAM,iBAAiB;AAEzC,UAAM,WAAW,eAAe;AAEhC,UAAM,IAAI,KAAK,MAAM,KAAK,IAAI,GAAG,SAAS,IAAK,OAAO;AACtD,UAAM,IAAI,KAAK,MAAM,KAAK,IAAI,GAAG,SAAS,IAAK,OAAO;AAEtD,WAAO,IAAI,KAAK,GAAG,GAAG,KAAK,IAAI,MAAM,KAAK,aAAa,IAAI,KAAK,IAAI,MAAM,KAAK,cAAc;AAAA;AAAA,EAGvF,aAAa,SAAsB;AACzC,UAAM,MAAM,QAAQ,KAAK;AACzB,WAAO,IAAI,IAAI,IAAI,QAAQ,SAAS,IAAI,SAAS;AAAA;AAAA,EAGzC,2BAAoC;AAC5C,UAAM,IAAI,MAAM;AAAA;AAAA;;;AC3Hb,mCAA6B,cAAc;AAAA,EACtC,2BAAoC;AAC5C,UAAM,MAAM,KAAK;AACjB,WAAO;AAAA,MACL,IAAI;AAAA,MACJ,IAAI;AAAA,MACJ,eAAe,CAAC,IAAI,IAAI,IAAI;AAAA;AAAA;AAAA;;;ACN3B,oCAA8B,cAAc;AAAA,EAC1C,gBAAyB;AAC9B,WAAO,KAAK,UAAU,MAAM,GAAG;AAAA;AAAA,EAG1B,iBAA0B;AAC/B,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAG3B,kBAA2B;AAChC,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAG3B,UAAmB;AACxB,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAG3B,aAAsB;AAC3B,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAG3B,cAAuB;AAC5B,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAG3B,WAAoB;AACzB,WAAO,KAAK,UAAU,MAAM,IAAI;AAAA;AAAA,EAGxB,2BAAoC;AAC5C,WAAO;AAAA,MACL,KAAK;AAAA,MACL,KAAK;AAAA,MACL,KAAK;AAAA,MACL,IAAI;AAAA;AAAA;;;AC/BH,sBAAsC;AAAA,EAK3C,YAAY,OAAe,UAAkB;AAC3C,SAAK,SAAS;AACd,SAAK,YAAY;AAAA;AAAA,MAGR,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,WAAmB;AAAE,WAAO,KAAK;AAAA;AAAA,EAErC,SAAS,eAAwB,MAAc;AACpD,WAAO,GAAG,KAAK,QAAQ,eAAe,KAAK,MAAM,KAAK,eAAe;AAAA;AAAA;;;ACjBlE,+BAAyB,IAAgB;AAAA,SAChC,wBAAwB,KAAU,QAAgB;AAC9D,QAAI,iBAAiB,KAAK;AAE1B,QAAI,CAAC,cAAc,IAAI,QAAQ;AAC7B,YAAM,IAAI,MAAM,GAAG,qCAAqC,IAAI;AAAA;AAAA;AAAA,EAMhE,YAAY,KAAiC,OAAe;AAC1D,UAAM;AACN,SAAK,SAAS;AAAA;AAAA,MAGL,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA;;;ACrBpC,mCAA6B;AAAA,EAKlC,YAAY,OAAe,aAA6B;AACtD,QAAI,CAAE,QAAO,UAAU,WAAW;AAChC,YAAM,IAAI,MAAM;AAAA;AAGlB,QAAI,CAAC,MAAM,QAAQ,gBAAgB,YAAY,KAAK,CAAC,SAAS,CAAE,iBAAgB,gBAAgB;AAC9F,YAAM,IAAI,MAAM;AAAA;AAGlB,SAAK,SAAS;AACd,SAAK,eAAe;AAAA;AAAA,MAGX,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,cAA8B;AAAE,WAAO,KAAK;AAAA;AAAA,EAEhD,SAAc;AACnB,WAAO;AAAA,MACL,OAAO,KAAK;AAAA,MACZ,aAAa,KAAK,YAAY,IAAI,CAAC,MAAM,MAAM,KAAK;AAAA;AAAA;AAAA,SAI1C,SAAS,MAAmC;AACxD,UAAM,cAAc,KAAK,YAAY,IAAI,CAAC,MAAW,IAAI,aAAa;AACtE,WAAO,IAAI,uBAAuB,KAAK,OAAO;AAAA;AAAA;;;AC1B3C,iCAA2B,WAAW;AAAA,SAC7B,0BAA0B,KAAU,QAAgB;AAChE,eAAW,wBAAwB,KAAK;AAExC,QACE,CAAC,mBAAmB,IAAI,UACrB,CAAC,mBAAmB,IAAI,aAC3B;AACA,YAAM,IAAI,MAAM,GAAG,uCAAuC,IAAI,eAAe,IAAI;AAAA;AAAA;AAAA,EAQrF,YAAY,KAAiC,OAAe,OAAe,YAAoB;AAC7F,UAAM,KAAK;AACX,SAAK,SAAS;AACd,SAAK,cAAc;AAAA;AAAA,MAGV,QAAgB;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9B,aAAqB;AAAE,WAAO,KAAK;AAAA;AAAA;;;ACvBzC,6BAA6B,KAAwC;AAC1E,SAAO,IAAI,qBAAqB;AAAA;AAG3B,iCAA0C,WAAoB,WAAsD;AACzH,QAAM,YAAY,EAAE;AACpB,SAAO,KAAK,cAAc;AAAA;;;ACVrB,4BAAyC;AAC9C,QAAM,QAAQ,OAAO;AACrB,MAAI,CAAC;AAAO,UAAM,IAAI,MAAM;AAE5B,QAAM,WAAW,MAAM;AACrB,UAAM,IAAI,MAAM;AAAA;AAGlB,SAAO;AAAA,IACL,QAAQ;AAAA,IACR;AAAA,IACA,OAAO;AAAA,IACP;AAAA,IACA,OAAO;AAAA,IACP,qBAAqB,MAAM,SAAS,cAAc;AAAA,IAClD,oBAAoB,MAAM,SAAS,cAAc;AAAA,IACjD,oBAAoB,MAAM,SAAS,cAAc;AAAA,IACjD;AAAA,IACA;AAAA;AAAA;;;AClBG,0BAA0B,IAAsB;AACrD,MAAI,iBAAiB;AAErB,MAAI,CAAC,IAAI;AACP,QAAI;AAEF,WAAK,UAAQ;AAAA,aACN,KAAP;AACA,uBAAiB,IAAI;AAAA;AAAA;AAIzB,QAAM,WAAW,KACb,CAAC,aAAqB,IAAI,QAAgB,CAAC,SAAS,WAAW;AAC/D,OAAG,SAAS,UAAU,CAAC,KAAU,WAAoB,MAAM,OAAO,OAAO,QAAQ;AAAA,OAEjF,MAAM;AACN,UAAM,IAAI,MAAM,qEAAqE;AAAA;AAGzF,SAAO;AAAA,IACL;AAAA;AAAA;;;ACnBG,2BAAwC;AAE7C,QAAM,SAAS,OAAO,aAAa,OAAO;AAC1C,QAAM,QAAQ,OAAO,SAAS,OAAO;AAErC,QAAM,QAAQ,OAAO,YAAY,OAAO;AAExC,QAAM,sBAAsB,MAAM;AAChC,QAAI;AAAQ,aAAO,IAAI;AACvB,UAAM,IAAI,MAAM;AAAA;AAGlB,QAAM,qBAAqB,MAAM;AAC/B,QAAI;AAAO,aAAO,IAAI;AACtB,UAAM,IAAI,MAAM;AAAA;AAGlB,QAAM,qBAAqB,MAAM;AAC/B,QAAI;AAAO,aAAO,IAAI;AACtB,UAAM,IAAI,MAAM;AAAA;AAGlB,QAAM,QAAQ,OAAO;AAGrB,QAAM,aAAa;AAEnB,SAAO;AAAA,IACL,QAAQ,UAAU,MAAM;AAAA;AAAA,IACxB,0BAA0B,OAAO,4BAA4B,MAAM;AAAA;AAAA,IACnE,OAAO,SAAS,MAAM;AAAA;AAAA,IACtB,WAAW,OAAO,aAAa,MAAM;AAAA;AAAA,IACrC,OAAO,OAAO,oBAAoB,MAAM;AAAA;AAAA,IACxC;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,OACG;AAAA;AAAA;;;ACzCA,qBAA8B;AACnC,SAAO,OAAO,WAAW,YACpB,OAAO,aAAa,eACpB,OAAO,qBAAqB,eAC5B,OAAO,sBAAsB,eAC7B,OAAO,qBAAqB,eAC5B,OAAO,cAAc,eACrB,OAAO,6BAA6B;AAAA;;;ACPpC,oBAA6B;AAClC,SAAO,OAAO,WAAW,YACpB,OAAO,cAAY,cACnB,OAAO,WAAW,eAClB,OAAO,YAAY,eAAe,CAAC,CAAC,QAAQ;AAAA;;;ACGnD,IAAI;AAEJ,kBAA+B;AAC7B,MAAI,CAAC,aAAa;AAChB,UAAM,IAAI,MAAM;AAAA;AAElB,SAAO;AAAA;AAGT,gBAAgB,MAAkB;AAChC,gBAAc;AAAA;AAGhB,sBAAsB;AAGpB,MAAI;AAAa,WAAO,OAAO;AAC/B,MAAI;AAAY,WAAO,OAAO;AAC9B,SAAO;AAAA;AAGT,qBAAqB,MAA2B;AAC9C,MAAI,CAAC,aAAa;AAChB;AAAA;AAGF,MAAI,CAAC,aAAa;AAChB,UAAM,IAAI,MAAM;AAAA;AAGlB,QAAM,EAAE,SAAS,YAAY,QAAQ,QAAQ,YAAY,UAAU;AACnE,cAAY,SAAS;AACrB,cAAY,QAAQ;AACpB,cAAY,sBAAsB,KAAI,uBAAwB,OAAM,IAAI;AACxE,cAAY,qBAAqB,KAAI,sBAAuB,OAAM,IAAI;AAEtE,cAAY,YAAY,KAAI,aAAa,YAAY;AACrD,cAAY,QAAQ,KAAI,SAAS,YAAY;AAC7C,cAAY,QAAQ,KAAI,SAAS,YAAY;AAC7C,cAAY,WAAW,KAAI,YAAY,YAAY;AAAA;AAG9C,IAAM,MAAM;AAAA,EACjB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAGF;;;AC3DO,sBAAsB,KAAmB;AAC9C,MAAI,CAAC,IAAI,cAAc,OAAO,QAAQ,UAAU;AAC9C,WAAO,SAAS,eAAe;AAAA;AAEjC,SAAO;AAAA;;;ACHF,6BAA6B,WAA4F;AAC9H,QAAM,EAAE,QAAQ,wDAA6B,IAAI;AAEjD,MAAI,qBAAqB,2BAA0B;AACjD,WAAO;AAAA;AAGT,QAAM,SAAS,aAAa;AAE5B,MAAI,CAAE,mBAAkB,SAAS;AAC/B,UAAM,IAAI,MAAM;AAAA;AAGlB,QAAM,MAAM,OAAO,WAAW;AAC9B,MAAI,CAAC,KAAK;AACR,UAAM,IAAI,MAAM;AAAA;AAGlB,SAAO;AAAA;;;ACfF,IAAK;AAAL,UAAK,iBAAL;AAEL,gCAAW;AAEX,iCAAY;AAEZ,mCAAc;AAEd,oCAAe;AAAA,GARL;AAoBL,iCAA4D;AAAA,EAajE,YAAY,UAAiC,IAAI;AAC/C,UAAM;AAAA,MACJ;AAAA,MAAgB;AAAA,MAAiB;AAAA,MAAW;AAAA,MAAU;AAAA,MAAW;AAAA,QAC/D;AACJ,SAAK,iBAAiB,kBAAkB,eAAe;AACvD,SAAK,kBAAkB,mBAAmB;AAC1C,SAAK,YAAY,aAAa;AAC9B,SAAK,WAAW,YAAY;AAC5B,SAAK,YAAY,aAAa;AAC9B,SAAK,UAAU,WAAW;AAAA;AAAA;AAIvB,0BAAoB;AAAA,EAOzB,YACE,MACA,QACA,UAAiC,IACjC;AAEA,SAAK,OAAO,OAAO,SAAS,WACxB,CAAC,QACA,gBAAgB,gBAAgB,KAAK,OAAO;AACjD,SAAK,SAAS;AACd,SAAK,UAAU,IAAI,qBAAqB;AAAA;AAAA,EAG1C,aAAa,KAAuC;AAClD,UAAM,EAAE,YAAY,KAAK;AACzB,WAAO,KAAK,KAAK,IAAI,CAAC,MAAM,IAAI,YAAY,GAAG,OAAO,OAAO,CAAC,IAAI,OAAQ,KAAK,KAAK,KAAK,IAAK,KAAM,IAAI;AAAA;AAAA,EAG1G,gBAAwB;AACtB,UAAM,EAAE,UAAU,YAAY,KAAK;AACnC,WAAO,KAAK,KAAK,SAAS,WAAY,IAAI;AAAA;AAAA,EAG5C,aAAa,KAA+B,YAAkC;AAC5E,UAAM,EAAE,mBAAmB,KAAK;AAChC,UAAM,cAAc,mBAAmB,eAAe,gBAAgB,mBAAmB,eAAe;AACxG,UAAM,aAAa,mBAAmB,eAAe,eAAe,mBAAmB,eAAe;AAEtG,UAAM,iBAAiB,KAAK,aAAa;AACzC,UAAM,kBAAkB,KAAK;AAC7B,UAAM,IAAK,cAAc,KAAK,OAAO,IAAI,iBAAiB,KAAK,OAAO;AACtE,UAAM,IAAI,aAAa,KAAK,OAAO,IAAI,kBAAkB,KAAK,OAAO;AAGrE,QAAI,YAAY;AACd,YAAM,EAAE,OAAO,WAAW;AAC1B,YAAM,OAAO,KAAK,IAAI,KAAK,IAAI,GAAG,QAAQ,iBAAiB;AAC3D,YAAM,OAAO,KAAK,IAAI,KAAK,IAAI,GAAG,SAAS,kBAAkB;AAC7D,aAAO,EAAE,GAAG,MAAM,GAAG;AAAA;AAEvB,WAAO,EAAE,GAAG;AAAA;AAAA,EAGd,KAAK,WAAkE;AACrE,UAAM,SAAS,aAAa;AAC5B,UAAM,MAAM,oBAAoB;AAEhC,UAAM;AAAA,MACJ;AAAA,MAAiB;AAAA,MAAW;AAAA,MAAU;AAAA,MAAW;AAAA,QAC/C,KAAK;AAET,QAAI,OAAO,GAAG,cAAc;AAC5B,UAAM,eAAe,KAAK,aAAa;AACvC,UAAM,aAAa,KAAK;AAExB,QAAI,YAAY;AAChB,UAAM,YAAY,KAAK,aAAa,KAAK;AACzC,QAAI,SAAS,UAAU,GAAG,UAAU,GAAG,cAAc;AAErD,QAAI,YAAY;AAChB,SAAK,KAAK,QAAQ,CAAC,UAAU,MAAM;AACjC,YAAM,IAAI,UAAU,UAAU;AAC9B,YAAM,IAAI,UAAU,UAAU,IAAM,KAAI,KAAK;AAC7C,UAAI,SAAS,UAAU,GAAG;AAAA;AAAA;AAAA;;;AC9GzB,2BAAqB;AAAA,EAS1B,YAAY,UAA2B,IAAI;AACzC,UAAM;AAAA,MACJ;AAAA,MAAU;AAAA,MAAW;AAAA,MAAO;AAAA,QAC1B;AACJ,SAAK,WAAW,YAAY;AAC5B,SAAK,YAAY,aAAa;AAC9B,SAAK,QAAQ;AAEb,UAAM,0BAA0B;AAAA,MAC9B,gBAAgB,eAAe;AAAA,MAC/B,iBAAiB,KAAK;AAAA;AAExB,SAAK,mBAAmB,IAAI,qBAAqB,KAAK,4BAA4B;AAAA;AAAA;AAI/E,oBAAc;AAAA,EAKnB,YACE,KACA,UAA2B,IAC3B;AACA,SAAK,MAAM,IAAI,IAAI;AACnB,SAAK,UAAU,IAAI,eAAe;AAAA;AAAA,EAGpC,KAAK,WAAkE;AACrE,UAAM,MAAM,oBAAoB;AAEhC,UAAM,EAAE,UAAU,cAAc,KAAK;AAErC,UAAM;AAAA,MACJ;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA,QACX,KAAK;AACT,QAAI,cAAc;AAClB,QAAI,YAAY;AAChB,QAAI,WAAW,GAAG,GAAG,OAAO;AAE5B,UAAM,EAAE,UAAU,KAAK;AACvB,QAAI,OAAO;AACT,UAAI,cAAc,CAAC,QAAQ,EAAE,GAAG,IAAK,YAAY,GAAI,KAAK,KAAK,QAAQ,kBAAkB,KAAK;AAAA;AAAA;AAAA;;;ACxD7F,wBACL,WACA,YACA;AACA,QAAM,kBAAkB,MAAM,QAAQ,cAAc,aAAa,CAAC;AAElE,kBAAgB,QAAQ,CAAC,QAAQ;AAE/B,UAAM,QAAQ,eAAe,gBACzB,IAAI,QACH,oBAAoB,OAAO,IAAI,UAAU,QAAQ;AAGtD,UAAM,MAAM,eAAe,gBACvB,IAAI,MACH,oBAAoB,OAAO,IAAI,UAAU,MAAM,IAAI,IAAI;AAE5D,UAAM,QAAQ,QAAQ,GAAG,MAAM,WAAW;AAC1C,QAAI,QAAQ,KAAK,EAAE,SAAS,KAAK;AAAA;AAAA;;;ACxB9B,uBAAuB,OAAsD;AAClF,QAAM,EAAE,OAAO,UAAU,IAAI;AAE7B,SAAQ,iBAAiB,SAAS,MAAM,YAClC,iBAAiB,SAAS,MAAM,cAAc;AAAA;;;ACH/C,0BAA0B,OAAgE;AAE/F,SAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,QAAI,iBAAiB,IAAI,SAAS,UAAU,cAAc;AAAQ,aAAO,QAAQ;AAEjF,qBAAiB,GAAU;AACzB,UAAI,CAAC,EAAE;AAAe;AAEtB,QAAE,cAAc,oBAAoB,QAAQ;AAC5C,QAAE,cAAc,oBAAoB,SAAS;AAC7C,aAAO;AAAA;AAGT,oBAAgB,GAAU;AACxB,UAAI,CAAC,EAAE;AAAe;AACtB,QAAE,cAAc,oBAAoB,QAAQ;AAC5C,QAAE,cAAc,oBAAoB,SAAS;AAC7C,cAAQ;AAAA;AAGV,UAAM,iBAAiB,QAAQ;AAC/B,UAAM,iBAAiB,SAAS;AAAA;AAAA;;;ACtB7B,uBAAuB,KAAsC;AAClE,SAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,QAAI,CAAE,gBAAe;AAAO,aAAO,IAAI,MAAM;AAC7C,UAAM,SAAS,IAAI;AACnB,WAAO,SAAS,MAAM;AACpB,UAAI,OAAO,OAAO,WAAW;AAAU,eAAO,IAAI,MAAM;AACxD,YAAM,MAAM,IAAI,SAAS;AACzB,UAAI,SAAS,MAAM,QAAQ;AAC3B,UAAI,UAAU;AACd,UAAI,MAAM,OAAO;AAAA;AAEnB,WAAO,UAAU;AACjB,WAAO,cAAc;AAAA;AAAA;;;ACXlB,4BAA4B,OAA0F;AAC3H,QAAM,EAAE,OAAO,UAAU,IAAI;AAE7B,MAAI,iBAAiB,OAAO;AAC1B,WAAO,IAAI,WAAW,MAAM,cAAc,MAAM;AAAA;AAElD,MAAI,iBAAiB,OAAO;AAC1B,WAAO,IAAI,WAAW,MAAM,YAAY,MAAM;AAAA;AAEhD,SAAO,IAAI,WAAW,MAAM,OAAO,MAAM;AAAA;;;ACNpC,sBAAsB,EAAE,OAAO,UAA0C;AAC9E,QAAM,EAAE,wBAAwB,IAAI;AACpC,QAAM,SAAS;AACf,SAAO,QAAQ;AACf,SAAO,SAAS;AAChB,SAAO;AAAA;AAGF,+BAA+B,OAAwD,MAAuC;AACnI,QAAM,EAAE,0BAAc,IAAI;AAE1B,MAAI,CAAE,kBAAiB,eAAc,CAAC,cAAc,QAAQ;AAC1D,UAAM,IAAI,MAAM;AAAA;AAGlB,QAAM,EAAE,OAAO,WAAW,QAAQ,mBAAmB;AACrD,QAAM,SAAS,aAAa,EAAE,OAAO;AAErC,MAAI,iBAAiB,YAAW;AAC9B,wBAAoB,QAAQ,aAAa,OAAO,GAAG;AAAA,SAC9C;AACL,wBAAoB,QAAQ,UAAU,OAAO,GAAG,GAAG,OAAO;AAAA;AAE5D,SAAO;AAAA;;;ACxBT,mCACE,WACA,QAC4B;AAC5B,QAAM,eAAe,UAAU,IAAI,SAAS;AAE5C,QAAM,CAAC,QAAQ,OAAO,eAAe,UAAU,MAAM,MAAM,WAAW,aAAa,IAAI;AACvF,QAAM,cAAc,AAAG,sBAAK,MAAM,UAAU,KAAK,QAAQ,OAAO,aAAa;AAC7E,QAAM,AAAG,yBAAQ,SAAS,aAAa;AAEvC,cAAY;AAEZ,SAAO;AAAA;;;ACfF,wBAAwB,OAAY;AACzC,QAAM,EAAE,OAAO,QAAQ,UAAU,IAAI;AAErC,SAAO,iBAAiB,SACnB,iBAAiB,UACjB,iBAAiB;AAAA;;;ACFjB,uBAAuB,OAA6C,WAAmB,cAAuB,OAAO;AAC1H,QAAM,EAAE,OAAO,WAAW,IAAI;AAE9B,MAAI,CAAE,kBAAiB,SAAS,iBAAiB,SAAS;AACxD,UAAM,IAAI,MAAM;AAAA;AAGlB,MAAI,aAAa;AAAG,WAAO,aAAa,EAAE,OAAO,GAAG,QAAQ;AAC5D,QAAM,OAAO,mBAAmB;AAChC,QAAM,SAAQ,YAAY,KAAK,IAAI,KAAK,QAAQ,KAAK;AACrD,QAAM,QAAQ,SAAQ,KAAK;AAC3B,QAAM,SAAS,SAAQ,KAAK;AAE5B,QAAM,eAAe,aAAa,EAAE,OAAO,WAAW,QAAQ;AAC9D,QAAM,cAAc,iBAAiB,SAAS,QAAQ,sBAAsB;AAE5E,QAAM,SAAS,KAAK,IAAI,QAAQ,UAAU;AAC1C,QAAM,KAAK,eAAe,QAAQ,SAAS,SAAS;AACpD,QAAM,KAAK,eAAe,SAAS,QAAQ,SAAS;AACpD,MAAI,YAAY,QAAQ,KAAK,YAAY,SAAS;AAAG,wBAAoB,cAAc,UAAU,aAAa,IAAI,IAAI,OAAO;AAE7H,SAAO;AAAA;;;AChBF,qBAAe;AAAA,EAapB,YAAY,QAAkC,oBAA6B,OAAO;AAZ1E,yBAAkD;AAElD,qBAAiC;AAIjC,8BAA8B;AAE9B,4BAA+B;AAKrC,QAAI,CAAC,MAAM,QAAQ,SAAS;AAC1B,YAAM,IAAI,MAAM,4HAA4H;AAAA;AAG9I,SAAK,qBAAqB;AAC1B,SAAK,aAAa,OAAO;AAEzB,WAAO,QAAQ,CAAC,OAAO,QAAQ;AAC7B,UAAI,WAAW,QAAQ;AACrB,aAAK,cAAc,OAAO;AAC1B,aAAK,iBAAiB,OAAO,MAAM;AACnC;AAAA;AAGF,UAAI,WAAW,QAAQ;AACrB,cAAM,YAAa,MAAc,MAAM;AACvC,YAAI,cAAc,GAAG;AACnB,gBAAM,IAAI,MAAM,yCAAyC;AAAA;AAG3D,aAAK,cAAc,OAAO;AAC1B,aAAK,iBAAiB,OAAQ,MAAc,MAAM,MAAM;AACxD;AAAA;AAGF,YAAM,SAAU,iBAAyB,IAAI,SAAS,SAAS,QAAQ,sBAAsB;AAC7F,WAAK,UAAU,OAAO;AACtB,WAAK,iBAAiB,OAAO,CAAC,OAAO,QAAQ,OAAO,OAAO;AAAA;AAAA;AAAA,MAIpD,eAAiD;AAC1D,WAAO,KAAK;AAAA;AAAA,MAGH,WAAgC;AACzC,WAAO,KAAK;AAAA;AAAA,MAGH,eAAwB;AACjC,WAAO,KAAK,YAAY,KAAK,KAAK;AAAA;AAAA,MAGzB,YAAoB;AAC7B,WAAO,KAAK;AAAA;AAAA,MAGH,kBAA8B;AACvC,WAAO,KAAK;AAAA;AAAA,MAGH,YAAgC;AACzC,WAAO,KAAK;AAAA;AAAA,MAGH,0BAAwC;AACjD,WAAO,MAAM,KAAK,WAAW,GAAG,GAAG,IACjC,CAAC,GAAG,aAAa,KAAK,2BAA2B;AAAA;AAAA,EAI9C,SAAS,UAAiE;AAC/E,WAAO,KAAK,SAAS,aAAa,KAAK,aAAa;AAAA;AAAA,EAG/C,mBAAmB,UAA4B;AACpD,WAAO,KAAK,iBAAiB;AAAA;AAAA,EAGxB,eAAe,UAA0B;AAC9C,WAAO,KAAK,iBAAiB,UAAU;AAAA;AAAA,EAGlC,cAAc,UAA0B;AAC7C,WAAO,KAAK,iBAAiB,UAAU;AAAA;AAAA,EAGlC,2BAA2B,UAA8B;AAC9D,QAAI,OAAO,KAAK,cAAc,UAAU;AACtC,YAAM,IAAI,MAAM;AAAA;AAGlB,UAAM,QAAQ,KAAK,cAAc;AACjC,UAAM,SAAS,KAAK,eAAe;AACnC,WAAO,0BAA0B,EAAE,OAAO,UAAU,KAAK;AAAA;AAAA,EAYpD,cAAc,WAAmB,iBAA0B,MAAmB;AACnF,SAAK,aAAa;AAElB,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,eAAe,MAAM,KAAK,WAAW,GAAG,GAAG,IAAI,CAAC,aAAa;AACjE,cAAM,QAAQ,KAAK,SAAS;AAE5B,YAAI,iBAAoB,yBAAQ;AAC9B,cAAI,YAAY,WAAW,SAAS,QAAQ,AAAG,4BAAW;AAC1D,sBAAY,YAAY,WAAW;AAEnC,cAAI,UAAU,MAAM,OAAO,aAAa,UAAU,MAAM,OAAO,WAAW;AACxE,wBAAY,AAAG,uBAAM,eAAe,WAAW,CAAC,WAAW,YAAY,OAAO;AAAA;AAGhF,iBAAO,UAAU,KAAK,WAAW,WAAW;AAAA;AAG9C,YAAI,iBAAiB,IAAI,SAAS,QAAQ;AACxC,iBAAO,AAAG,yBAAQ,WAAW,cAAc,OAAO,WAAW;AAAA;AAG/D,cAAM,IAAI,MAAM,+BAA+B,qGAAqG;AAAA;AAGtJ,YAAM,cAAc,AAAG,uBAAM,aAAa,IAAI,CAAC,MAAM,AAAG,sBAAK,GAAG,aAAa,KAAK,KAAK,WAAW,WAAW,WAAW;AAExH,aAAO;AAAA;AAAA;AAAA;;;ACrIb,0BAAiC,QAAsC;AACrE,MAAI,kBAAkB;AAAU,WAAO;AACvC,QAAM,gBAAgB,MAAM,QAAQ,UAAU,SAAS,CAAC;AACxD,MAAI,CAAC,cAAc;AAAQ,UAAM,IAAI,MAAM;AAC3C,QAAM,aAAa,CAAC,QAAiB,MAAM,QAAQ,UAAU,mBAAmB,SAAS;AACzF,QAAM,aAAa,cAAc,IAAI;AACrC,aAAW,QAAQ,CAAC,OAAO,MAAM;AAC/B,QAAI,CAAC,eAAe,UAAU,CAAC,WAAW,UAAU,CAAC,WAAW,QAAQ;AACtE,UAAI,OAAO,cAAc,OAAO;AAAU,cAAM,IAAI,MAAM,eAAe,WAAW,sEAAsE,cAAc;AACxK,YAAM,IAAI,MAAM,eAAe,WAAW;AAAA;AAE5C,QAAI,WAAW,QAAQ;AAErB,YAAM,YAAY,MAAM,MAAM;AAC9B,UAAI,cAAc;AAAG,cAAM,IAAI,MAAM,eAAe,WAAW,iCAAiC;AAAA;AAAA;AAIpG,QAAM,QAAQ,IAAI,WAAW,IAAI,CAAC,UAAU,eAAe,UAAU,iBAAiB;AACtF,SAAO,IAAI,SAAS,YAAY,MAAM,QAAQ;AAAA;;;ACjBhD,4BAAmC,OAAkB,YAAuE;AAC1H,QAAM,EAAE,WAAW,IAAI;AACvB,MAAI,SAAS;AACb,MAAI,CAAE,kBAAiB,SAAS;AAC9B,UAAM,WAAW,MAAM,WAAW;AAClC,QAAI,SAAS,YAAY;AAAG,YAAM,IAAI,MAAM;AAC5C,UAAM,iBAAiB,SAAS,SAAS;AACzC,aAAS,0BAA0B,SAAS,iBAAiB,MAAM,oBAAoB;AAAA;AAEzF,QAAM,MAAM,oBAAoB;AAChC,QAAM,QAAQ,WACX,IAAI,CAAC,QAAS,eAAe,gBAAgB,IAAI,QAAQ,OAAO,OAAO,OAAO,QAAQ,IAAI,UAAU,KACpG,IAAI,CAAC,QAAQ,IAAI,mBAAmB,OAAO,OAAO,OAAO;AAC5D,SAAO,MAAM,IAAI,CAAC,EAAE,GAAG,GAAG,OAAO,aAAa;AAC5C,UAAM,UAAU,aAAa,EAAE,OAAO;AACtC,QAAI,QAAQ,KAAK,SAAS;AAAG,0BAAoB,SAAS,aAAa,IAAI,aAAa,GAAG,GAAG,OAAO,SAAS,GAAG;AACjH,WAAO;AAAA;AAAA;;;AChBX,kCAAyC,aAAwC,YAAiE;AAChJ,MAAI,CAAC,WAAW,gBAAgB,CAAC,WAAW,cAAc;AACxD,UAAM,IAAI,MAAM;AAAA;AAGlB,MAAI,WAAW,gBAAgB,YAAY,MAAM,KAAK,GAAG;AACvD,UAAM,IAAI,MAAM;AAAA;AAGlB,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,CAAC,WAAW,UAAU,eAAe,YAAY,MAAM,MAAM,WAAW,eAAe,IAAI;AAEjG,UAAM,QAAQ,WACX,IAAI,CAAC,QAAS,eAAe,gBAC1B,IAAI,QAAQ,UAAU,WAAW,MACjC,KACH,IAAI,CAAC,QAAQ,IAAI,mBAAmB,UAAU;AAEjD,UAAM,cAAc,MAAM,IAAI,CAAC;AAAA,MAC7B;AAAA,MAAG;AAAA,MAAG;AAAA,MAAO;AAAA,UACT,AAAG,yBAAQ,YAAY,KAAK,WAAW,UAAU,cAAc,CAAC,GAAG,GAAG,IAAI,CAAC,QAAQ,OAAO;AAEhG,WAAO;AAAA;AAAA;;;ACpCX,4BACE,KAEA,MACmB;AACnB,QAAM,EAAE,UAAU,IAAI;AACtB,QAAM,MAAM,MAAM,MAAM,KAAK;AAC7B,MAAI,CAAE,KAAI,SAAS,MAAM;AACvB,UAAM,IAAI,MAAM,qBAAqB,IAAI,WAAW,IAAI,yBAAyB,IAAI;AAAA;AAEvF,SAAO;AAAA;;;ACTT,0BAAiC,KAAwC;AACvE,QAAM,MAAM,MAAM,aAAa;AAC/B,QAAM,OAAO,MAAO,IAAK;AAEzB,MAAI,CAAC,KAAK,KAAK,WAAW,WAAW;AACnC,UAAM,IAAI,MAAM,wEAAwE,KAAK,kBAAkB,IAAI;AAAA;AAErH,SAAO,cAAc;AAAA;;;ACRvB,yBAAmC,KAAyB;AAC1D,SAAQ,OAAM,aAAa,MAAM;AAAA;;;ACDnC,+BAAsC,KAAoC;AACxE,SAAO,IAAI,aAAa,MAAO,OAAM,aAAa,MAAM;AAAA;;;ACDnD,uBAAuB,KAAsC;AAClE,SAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,QAAI,CAAE,gBAAe;AAAO,aAAO,IAAI,MAAM;AAE7C,UAAM,QAAQ,IAAI,SAAS;AAC3B,UAAM,YAAY,MAAM,QAAQ;AAChC,UAAM,UAAU;AAEhB,UAAM,cAAc;AACpB,UAAM,WAAW;AACjB,UAAM,QAAQ;AACd,UAAM,MAAM,IAAI,gBAAgB;AAAA;AAAA;;;ACVpC,0BAAiC,KAAwC;AACvE,QAAM,MAAM,MAAM,aAAa;AAC/B,QAAM,OAAO,MAAO,IAAK;AAEzB,MAAI,CAAC,KAAK,KAAK,WAAW,WAAW;AACnC,UAAM,IAAI,MAAM,wEAAwE,KAAK,kBAAkB,IAAI;AAAA;AAErH,SAAO,cAAc;AAAA;;;ACVhB,sBAAsB,KAAyB,kBAA0B;AAC9E,QAAM,0BAA0B,GAAG;AAEnC,MAAI,CAAC,KAAK;AACR,WAAO;AAAA,MACL,cAAc;AAAA,MACd,aAAa;AAAA;AAAA;AAIjB,MAAI,QAAQ,KAAK;AACf,WAAO;AAAA,MACL,cAAc;AAAA,MACd,aAAa,IAAI;AAAA;AAAA;AAIrB,QAAM,WAAW,IAAI,WAAW,aAAa,YAAY,IAAI,WAAW,cAAc,aAAa;AACnG,QAAM,IAAI,QAAQ,UAAU;AAE5B,QAAM,QAAQ,IAAI,MAAM,KAAK,OAAO,CAAC,MAAM;AAE3C,QAAM,eAAe,IAAI,SAAS,WAC9B,MAAM,MAAM,SAAS,KACrB;AAEJ,MAAI,eAAe,WAAY,KAAI,SAAS,WAAW,MAAM,MAAM,GAAG,MAAM,SAAS,KAAK,OAAO,KAAK;AACtG,iBAAe,IAAI,WAAW,OAAO,IAAI,iBAAiB;AAE1D,SAAO;AAAA,IACL;AAAA,IACA,aAAa,iBAAiB,MAAM,IAAI,iBAAiB,GAAG,gBAAgB;AAAA;AAAA;;;AC1BhF,6BACE,KACA,kBAC4B;AAC5B,QAAM,EAAE,aAAa,iBAAiB,aAAa,KAAK;AACxD,QAAM,WAAW,MAAM,UAAuC;AAE9D,SAAO,AAAG,oBAAG,YAAY,UAAU;AAAA;;;ACT9B,yBAAyB,OAAoB,WAAwB,qBAA8B,OAAO;AAC/G,QAAM,EAAE,OAAO,WAAW,qBACtB,mBAAmB,aACnB;AACJ,QAAM,QAAQ;AACd,QAAM,SAAS;AACf,SAAO,EAAE,OAAO;AAAA;;;ACFX,0BAAyC;AAAA,EAC9C,YAAY,MAAc;AAIhB,mBAAkC;AAElC,0BAAiC;AALzC,SAAK,QAAQ;AAAA;AAAA,MASJ,SAAiC;AAAE,WAAO,KAAK;AAAA;AAAA,MAE/C,gBAAgC;AAAE,WAAO,KAAK;AAAA;AAAA,MAE9C,WAAoB;AAAE,WAAO,CAAC,CAAC,KAAK;AAAA;AAAA,EAExC,iBAAiB,WAA8B;AACpD,UAAM,EAAE,KAAK,YAAY,KAAK,qBAAqB;AACnD,WAAO,IAAI;AAAA;AAAA,EAGN,sBAAsB,WAAmB,SAAmB;AACjE,UAAM,EAAE,KAAK,YAAY,KAAK,qBAAqB;AACnD,QAAI,SAAS;AACb,QAAI,WAAW;AAAA;AAAA,EAGV,eAAe;AACpB,WAAO,KAAK,eAAe,IAAI,CAAC,EAAE,gBAAiB;AAAA,MACjD,MAAM;AAAA,MACN,QAAQ,KAAK,iBAAiB;AAAA;AAAA;AAAA,EAI3B,qBAAqB;AAC1B,WAAO,KAAK,eAAe,OAAO,CAAC,UAAU,MAAM,kBAAqB;AAAA;AAAA,EAGnE,kBAAkB;AACvB,WAAO,KAAK,eAAe,OAAO,CAAC,UAAU,CAAE,OAAM,kBAAqB;AAAA;AAAA,EAGrE,WAAW;AAChB,SAAK,kBAAkB,QAAQ,CAAC,EAAE,MAAM,sBAAa;AACnD,WAAK,sBAAsB,MAAM,QAAO;AAAA;AAAA;AAAA,EAIrC,SAAS;AACd,SAAK,qBAAqB,QAAQ,CAAC,EAAE,MAAM,QAAQ,eAAe;AAChE,YAAM,UAAS,AAAG,wBAAO,SAAS;AAClC,eAAS;AACT,WAAK,sBAAsB,MAAM;AAAA;AAAA;AAAA,EAI9B,QAAQ,mBAA4B,MAAM;AAC/C,SAAK,eAAe,QAAQ,CAAC,UAAU;AACrC,UAAI,oBAAoB,MAAM,OAAO,YAAY;AAC/C,cAAM,IAAI,MAAM,mDAAmD,MAAM;AAAA;AAE3E,YAAM,OAAO;AAAA;AAEf,SAAK,UAAU;AAAA;AAAA,EAGV,kBAAgC;AACrC,WAAO,IAAI,aACT,KAAK,eACF,IAAI,CAAC,EAAE,sBAAa,MAAM,KAAK,QAAO,aACtC,OAAO,CAAC,MAAM,QAAQ,KAAK,OAAO;AAAA;AAAA,QAI5B,KAAK,cAAgE;AAChF,QAAI,wBAAwB,cAAc;AACxC,WAAK,eAAe;AACpB;AAAA;AAEF,UAAM,KAAK,YAAY;AAAA;AAAA,QAGZ,YAAY,KAAyB;AAChD,QAAI,OAAO,OAAO,QAAQ,UAAU;AAClC,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAE1B,UAAM,YAAY,MAAM,cAAc,KAAK,KAAK;AAChD,SAAK,kBAAkB;AAAA;AAAA,QAGZ,aAAa,UAA8B;AACtD,QAAI,YAAY,OAAO,aAAa,UAAU;AAC5C,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAE1B,UAAM,EAAE,aAAa,IAAI;AACzB,UAAM,EAAE,aAAa,iBAAiB,aAAa,UAAU,KAAK;AAClE,UAAM,uBAAuB,CAAC,cAAwB,QAAQ,IAAI,UAAU,IAAI,CAAC,OAAO,SAAS,IAAI,KAAK,CAAC,QAAQ,IAAI;AACvH,UAAM,cAAc,AAAG,oBAAG,qBAAqB;AAC/C,UAAM,WAAW,KAAK,MAAO,OAAM,SAAS,cAAc;AAC1D,UAAM,YAAY,MAAM,YAAY,UAAU;AAC9C,SAAK,kBAAkB;AAAA;AAAA,EAGlB,kBAAkB,WAA8B;AACrD,UAAM,EAAE,eAAe,WAAW,KAAK,2BAA2B;AAClE,SAAK,iBAAiB;AACtB,SAAK,UAAU;AAAA;AAAA,EAGV,eAAe,SAAuB;AAC3C,UAAM,EAAE,eAAe,WAAW,KAAK,cAAc;AACrD,SAAK,iBAAiB;AACtB,SAAK,UAAU;AAAA;AAAA,EAGT,qBAAqB,WAAmB;AAC9C,QAAI,CAAC,KAAK,QAAQ;AAChB,YAAM,IAAI,MAAM;AAAA;AAGlB,UAAM,SAAS,UAAU,MAAM,KAAK,OAAO,CAAC,KAAoD,aAAY;AAE1G,UAAI,CAAC,IAAI,QAAQ,eAAe,WAAU;AACxC,cAAM,IAAI,MAAM,wDAAwD,sBAAqB;AAAA;AAE/F,aAAO,EAAE,KAAK,IAAI,SAAS,mBAAS,SAAS,IAAI,QAAQ;AAAA,OACxD,EAAE,SAAS,KAAK;AAEnB,UAAM,EAAE,KAAK,YAAY;AACzB,QAAI,CAAC,OAAO,CAAC,WAAW,CAAE,KAAI,oBAAuB,0BAAS;AAC5D,YAAM,IAAI,MAAM,8DAA8D;AAAA;AAGhF,WAAO,EAAE,KAAK;AAAA;AAAA;;;ACzIX,gCACL,GACA,QACA,QACa;AACb,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI,MAAM,AAAG,iCAAgB,GAAG,OAAO,kBAAkB,OAAO,kBAAkB,QAAQ;AAC1F,UAAM,AAAG,qBAAI,KAAK,OAAO;AACzB,WAAO;AAAA;AAAA;;;ACNJ,qBACL,GACA,kBACA,eAAwB,OACX;AACb,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,OAAO,AAAG,sBACd,eACI,AAAG,qBACH,AAAG,wBAAO,GAAI,iBAAiB,MAAqB,SAAS,CAAC,GAAG,IAAI,SACrE,iBAAiB,MAAM,QAEvB,uBAAuB,GAAG,iBAAiB,OAA8B,CAAC,GAAG;AAEnF,UAAM,OAAO,uBAAuB,MAAM,iBAAiB,OAAO,CAAC,GAAG;AAEtE,UAAM,MAAM,AAAG,sBAAK,AAAG,qBAAI,MAAM;AACjC,UAAM,OAAO,uBAAuB,KAAK,iBAAiB,OAAO,CAAC,GAAG;AAErE,WAAO,AAAG,sBAAK,AAAG,qBAAI,MAAM,AAAG,qBAAI,MAAM;AAAA;AAAA;AAItC,qBACL,GACA,kBACA,eAAwB,OACxB,cAAuB,MACV;AACb,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,OAAO,AAAG,sBACd,eACI,AAAG,qBACH,AAAG,wBAAO,GAAI,iBAAiB,MAAqB,SAAS,cAAc,CAAC,GAAG,KAAK,CAAC,GAAG,IAAI,SAC5F,iBAAiB,MAAM,QAEvB,uBAAuB,GAAG,iBAAiB,OAA8B,cAAc,CAAC,GAAG,KAAK,CAAC,GAAG;AAE1G,UAAM,OAAO,uBAAuB,MAAM,iBAAiB,OAAO,CAAC,GAAG;AAEtE,UAAM,MAAM,AAAG,sBAAK,AAAG,qBAAI,MAAM;AACjC,UAAM,OAAO,uBAAuB,KAAK,iBAAiB,OAAO,CAAC,GAAG;AAErE,UAAM,MAAM,AAAG,sBAAK,AAAG,qBAAI,MAAM,AAAG,qBAAI,MAAM;AAC9C,UAAM,OAAO,uBAAuB,KAAK,iBAAiB,OAAO,CAAC,GAAG;AAErE,WAAO,AAAG,sBAAK,AAAG,qBAAI,MAAM,AAAG,qBAAI,MAAM,AAAG,qBAAI,MAAM;AAAA;AAAA;;;AChDnD,mBACL,GACA,QACA,UAA4B,QAC5B,WAAoB,OACP;AACb,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,MAAM,AAAG,qBACb,AAAG,wBAAO,GAAG,OAAO,SAAS,CAAC,GAAG,IAAI,UACrC,OAAO;AAGT,WAAO,WAAW,AAAG,sBAAK,OAAO;AAAA;AAAA;;;ACd9B,oCAAoC,WAAgB,eAA+B;AACxF,SAAO,KAAK,WAAW,QAAQ,CAAC,SAAS;AACvC,QAAI,CAAC,cAAc,KAAK,CAAC,OAAO,GAAG,iBAAiB,OAAO;AACzD,gBAAU,MAAM;AAAA;AAAA;AAAA;;;ACDf,kCACL,gBACA,eACA;AACA,SAAO,CACL,YACA,aACA,YACA,iBACe;AACf,UAAM,UAAU,AAAG,0BACjB,eAAe,aAAa,cAAc,aAAa,aACvD,CAAC,YAAY,YAAY,YAAY;AAEvC,UAAM,OAAO,AAAG,0BAAS,eAAe;AAExC,kBAAc,KACZ,EAAE,WAAW,GAAG,0BAChB,EAAE,WAAW,GAAG;AAGlB,WAAO,EAAE,SAAS;AAAA;AAAA;;;ACrBf,gCACL,gBACA,eACA;AACA,SAAO,CACL,YACA,aACA,iBACa;AACb,UAAM,aAAa,AAAG,0BAAS,eAAe,aAAa,cAAc,CAAC,YAAY;AACtF,UAAM,UAAU,AAAG,0BAAS,eAAe;AAE3C,kBAAc,KACZ,EAAE,WAAW,GAAG,0BAChB,EAAE,WAAW,GAAG;AAGlB,WAAO;AAAA,MACL,SAAS;AAAA,MACT,MAAM;AAAA;AAAA;AAAA;;;ACHL,gCAA0B;AAAA,EAE/B,YAES,kBAEA,kBAEA,MAEP;AANO;AAEA;AAEA;AAAA;AAAA;;;ACxBJ,2CACL,gBACA,eACA;AACA,SAAO,CAAC,YAAoB,aAAqB,iBAA8C;AAC7F,UAAM,mBAAmB,AAAG,0BAAS,eAAe,IAAI,IAAI,aAAa,CAAC,GAAG,GAAG,YAAY;AAC5F,UAAM,mBAAmB,AAAG,0BAAS,eAAe,aAAa,cAAc,CAAC,GAAG,GAAG,YAAY;AAClG,UAAM,OAAO,AAAG,0BAAS,eAAe;AAExC,kBAAc,KACZ,EAAE,WAAW,GAAG,mCAChB,EAAE,WAAW,GAAG,mCAChB,EAAE,WAAW,GAAG;AAGlB,WAAO,IAAI,oBACT,kBACA,kBACA;AAAA;AAAA;AAKC,wCAEL,oBACA;AACA,SAAO,CAAC,WAAwC;AAC9C,UAAM,mBAAmB,mBAAgC,GAAG,2BAA2B;AACvF,UAAM,mBAAmB,mBAAgC,GAAG,2BAA2B;AACvF,UAAM,OAAO,mBAAgC,GAAG,eAAe;AAE/D,WAAO,IAAI,oBACT,kBACA,kBACA;AAAA;AAAA;;;ACpCC,mCAAmC,WAAgB,eAA+B;AACvF,SAAO,CAAC,cAAsB,WAAmB,eAAwB;AACvE,UAAM,UAAS,UAAU;AAEzB,QAAI,CAAC,SAAS,SAAQ,YAAY;AAChC,YAAM,IAAI,MAAM,sBAAsB,+BAA+B,4BAA4B;AAAA;AAGnG,kBAAc,KACZ,EAAE,cAAc,WAAW,cAAc;AAG3C,WAAO;AAAA;AAAA;;;ACfJ,+BAA+B,SAAuB;AAC3D,MAAI,mBAAmB;AAEvB,0BAAwB,YAAkC;AACxD,UAAM,MAAM,iBAAiB,MAAM,GAAG;AACtC,uBAAmB,iBAAiB,MAAM;AAC1C,WAAO;AAAA;AAGT,iCAA6C;AAC3C,WAAO;AAAA;AAGT,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;;;ACZG,2BAA2B,gBAAwC,eAA+B;AACvG,QAAM,oBAAoB,yBAAyB,gBAAgB;AACnE,QAAM,6BAA6B,kCAAkC,gBAAgB;AAErF,oCAAkC,YAAoB,aAAqB,cAAsB,eAAwB,OAA0B;AACjJ,UAAM,QAAQ,eACV,kBAAkB,YAAY,aAAa,GAAG,GAAG,wBACjD,2BAA2B,YAAY,aAAa,GAAG;AAC3D,UAAM,QAAQ,2BAA2B,aAAa,aAAa,GAAG;AACtE,UAAM,SAAQ,2BAA2B,aAAa,aAAa,GAAG;AAEtE,WAAO,EAAE,OAAO,OAAO;AAAA;AAGzB,oCAAkC,YAAoB,aAAqB,cAAsB,eAAwB,OAA0B;AACjJ,UAAM,EAAE,OAAO,OAAO,kBAAU,yBAAyB,YAAY,aAAa,cAAc;AAChG,UAAM,QAAQ,2BAA2B,aAAa,aAAa,GAAG;AAEtE,WAAO;AAAA,MACL;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA;AAAA;AAIzB,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;;;ACxBG,uBAAuB,SAA8F;AAC1H,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM;AAAA,IACJ;AAAA,MACE,kBAAkB,gBAAgB;AAEtC,QAAM,SAAS,yBAAyB,GAAG,IAAI,UAAU;AACzD,QAAM,SAAS,yBAAyB,IAAI,IAAI;AAChD,QAAM,SAAS,yBAAyB,IAAI,KAAK;AACjD,QAAM,SAAS,yBAAyB,KAAK,KAAK;AAElD,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL;AAAA,IACA,QAAQ;AAAA,MACN;AAAA,MAAQ;AAAA,MAAQ;AAAA,MAAQ;AAAA;AAAA;AAAA;;;ACvBvB,+BAA+B,oBAAuE;AAC3G,SAAO,CAAC,WAA+B;AACrC,UAAM,UAAU,mBAAgC,GAAG,kBAAkB;AACrE,UAAM,OAAO,mBAAgC,GAAG,eAAe;AAE/D,WAAO,EAAE,SAAS;AAAA;AAAA;;;ACNf,2BAA2B,WAAgB,eAA+B;AAC/E,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,QAAM,oBAAoB,sBAAsB;AAChD,QAAM,6BAA6B,+BAA+B;AAElE,oCAAkC,QAAgB,eAAwB,OAA0B;AAClG,UAAM,QAAQ,eACV,kBAAkB,GAAG,kBACrB,2BAA2B,GAAG;AAClC,UAAM,QAAQ,2BAA2B,GAAG;AAC5C,UAAM,SAAQ,2BAA2B,GAAG;AAE5C,WAAO,EAAE,OAAO,OAAO;AAAA;AAGzB,oCAAkC,QAAgB,eAAwB,OAA0B;AAClG,UAAM,QAAQ,eACV,kBAAkB,GAAG,kBACrB,2BAA2B,GAAG;AAClC,UAAM,QAAQ,2BAA2B,GAAG;AAC5C,UAAM,SAAQ,2BAA2B,GAAG;AAC5C,UAAM,QAAQ,2BAA2B,GAAG;AAE5C,WAAO;AAAA,MACL;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA;AAAA;AAIzB,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;;;AC7BG,oCACL,WACuE;AACvE,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,MACE,kBAAkB,WAAW;AAEjC,QAAM,SAAS;AAAA,IACb,QAAQ,yBAAyB,UAAU;AAAA,IAC3C,QAAQ,yBAAyB;AAAA,IACjC,QAAQ,yBAAyB;AAAA,IACjC,QAAQ,yBAAyB;AAAA;AAGnC,6BAA2B,WAAW;AAEtC,SAAO,EAAE,QAAQ;AAAA;;;ACdZ,yCAAmC,cAAuG;AAAA,EAC/I,cAAc;AACZ,UAAM;AAAA;AAAA,EAGD,aAAa,OAA8B;AAChD,UAAM,EAAE,WAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM;AAAA;AAGlB,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,cAAc,AAAG,sBAAK,MAAM,cAAc,KAAK,OAAO;AAC5D,YAAM,UAAU,CAAC,SAAS,SAAS;AACnC,YAAM,aAAa,UAAU,aAAa,SAAS,IAAI;AAEvD,UAAI,MAAM,YAAY,YAAY,OAAO,QAAQ;AACjD,YAAM,YAAY,KAAK,OAAO;AAC9B,YAAM,YAAY,KAAK,OAAO;AAC9B,YAAM,YAAY,KAAK,OAAO;AAC9B,YAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AAEtC,aAAO;AAAA;AAAA;AAAA,QAIE,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,EAGlC,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,2BAA2B;AAAA;AAAA,EAG1B,cAAc,SAAuB;AAC7C,WAAO,cAAc;AAAA;AAAA;;;AC9ClB,6BACL,GACA,QACa;AACb,SAAO,AAAG,sBAAK,MAAM,AAAG,qBACtB,AAAG,wBAAO,GAAG,OAAO,UACpB,OAAO;AAAA;;;ACPJ,wBAAuB,SAAuB,YAAoB,aAA2E;AAClJ,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM,kBAAkB,uBAAuB,gBAAgB;AAE/D,QAAM,KAAK,gBAAgB,YAAY,aAAa;AAEpD,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL;AAAA,IACA,QAAQ,EAAE;AAAA;AAAA;;;AChBP,qCACL,WACsD;AACtD,QAAM,gBAAgC;AAEtC,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,2BAAyB,QAA0B;AACjD,UAAM,UAAU,mBAAmB,GAAG,kBAAkB;AACxD,UAAM,OAAO,mBAAmB,GAAG,eAAe;AAClD,WAAO,EAAE,SAAS;AAAA;AAGpB,QAAM,SAAS;AAAA,IACb,IAAI,gBAAgB;AAAA;AAGtB,6BAA2B,WAAW;AAEtC,SAAO,EAAE,QAAQ;AAAA;;;ACtBZ,4BAA4B,WAA8B;AAC/D,QAAM,sBAAyC;AAC/C,QAAM,gBAAmC;AAEzC,SAAO,KAAK,WAAW,QAAQ,CAAC,QAAQ;AACtC,UAAM,MAAM,IAAI,WAAW,QAAQ,gBAAgB;AACnD,QAAI,OAAO,UAAU;AAAA;AAGvB,SAAO,EAAE,qBAAqB;AAAA;;;ACAzB,kCAGG,cAAyB;AAAA,EAGjC,YAAY,OAAe,sBAA+D;AACxF,UAAM;AACN,SAAK,wBAAwB;AAAA;AAAA,MAGpB,uBAAgE;AACzE,WAAO,KAAK;AAAA;AAAA,EASP,OAAO,OAA4C;AACxD,UAAM,EAAE,WAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAG1B,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,qBAAqB,iBAAiB,WACxC,KAAK,qBAAqB,aAAa,SACvC;AACJ,aAAO,oBAAoB,mBAAmB,KAAK,mBAAmB,MAAM,IAAI,KAAK,OAAO;AAAA;AAAA;AAAA,EAIzF,QAAQ,mBAA4B,MAAM;AAC/C,SAAK,qBAAqB,QAAQ;AAClC,UAAM,QAAQ;AAAA;AAAA,EAGT,qBAAqB,SAAuB;AACjD,UAAM,EAAE,QAAQ,kBAAkB,KAAK,wBAAwB;AAC/D,SAAK,UAAU;AACf,SAAK,iBAAiB;AAAA;AAAA,EAGjB,wBAAwB,SAAuB;AACpD,WAAO,eAAc,SAAS,KAAK,2BAA2B,KAAK;AAAA;AAAA,EAG3D,2BAA2B,WAA8B;AACjE,UAAM,EAAE,qBAAqB,kBAAkB,mBAAmB;AAElE,SAAK,qBAAqB,kBAAkB;AAE5C,WAAO,4BAA2B;AAAA;AAAA,EAG1B,cAAc,SAAuB;AAC7C,UAAM,MAAM,KAAK;AACjB,UAAM,OAAO,KAAK;AAClB,UAAM,uBAAwB,OAAO,MAAO;AAE5C,UAAM,0BAA0B,QAAQ,MAAM,GAAG,QAAQ,SAAS;AAClE,UAAM,oBAAoB,QAAQ,MAAM,QAAQ,SAAS;AAEzD,SAAK,qBAAqB,eAAe;AACzC,WAAO,KAAK,wBAAwB;AAAA;AAAA;;;AC/EjC,IAAM,yBAAyB,CAAC,WAAW,SAAS,OAAO,SAAS,WAAW,aAAa;AAE5F,4BAAsB;AAAA,EAe3B,YAAY,eAAwC;AAClD,QAAI,cAAc,WAAW,GAAG;AAC9B,YAAM,IAAI,MAAM,8EAA8E,cAAc;AAAA;AAG9G,2BAAuB,QAAQ,CAAC,YAAY,QAAQ;AAClD,WAAK,cAAc,cAAc;AAAA;AAAA;AAAA,EAIrC,gBAAgB;AACd,WAAO,uBACJ,IAAI,CAAC,eAAgB,GAAE,YAAY,aAAa,KAAK,gBACrD,KAAK,CAAC,IAAI,OAAO,GAAG,cAAc,GAAG;AAAA;AAAA;;;ACtBrC,sCAAgC,cAA0C;AAAA,EAC/E,YAAY,uBAA6C,IAAI,wBAAwB;AACnF,UAAM,qBAAqB;AAAA;AAAA,EAGtB,aAAa,OAA4C;AAC9D,WAAO,AAAG,sBAAK,MAAM,AAAG,yBAAQ,KAAK,OAAO;AAAA;AAAA,QAGjC,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,QAG/B,mBAAmB,OAAkB;AAChD,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,MAAM,MAAM,KAAK,aAAa;AACpC,UAAM,sBAAsB,MAAM,QAAQ,IAAI,AAAG,yBAAQ,KAAK,IAAI,OAAO,MAAM;AAC7E,YAAM,OAAO,EAAE;AACf,QAAE;AACF,aAAO;AAAA;AAET,QAAI;AAEJ,UAAM,qBAAqB,oBACxB,IAAI,CAAC,iBAAiB,IAAI,gBAAgB;AAE7C,WAAO,SAAS,eACZ,qBACA,mBAAmB;AAAA;AAAA,EAGf,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,0BAAkC;AAC1C,WAAO;AAAA;AAAA,EAGC,2BAAmC;AAC3C,WAAO;AAAA;AAAA;;;AC5CJ,+BAA+B,KAA0C;AAC9E,SAAO,IAAI,uBAAuB;AAAA;AAG7B,mCAA4C,WAAoB,aAA4D;AACjI,QAAM,YAAY,EAAE;AACpB,SAAO,KAAK,cAAc;AAAA;;;ACDrB,6BACL,WACA,iBACA,gBAAgB,KAChB,iBACA;AACA,QAAM,uBAAuB,MAAM,QAAQ,mBAAmB,kBAAkB,CAAC;AAEjF,uBAAqB,QAAQ,CAAC,MAAM;AAElC,UAAM,OAAO,aAAa,kBACtB,IACC,sBAAsB,KAAK,EAAE,cAAc;AAChD,QAAI,CAAC,MAAM;AACT,YAAM,IAAI,MAAM;AAAA;AAGlB,UAAM,SAAS,KAAK;AACpB,UAAM,mBAAmB,OAAO,OAAO,CAAC,cAAc,UAAU,cAAc;AAE9E,UAAM,SAAS,oBAAoB,KAC/B,EAAE,UAAU,IAAI,aACf,mBAAmB,IAAI,MAAM,GAAG;AAErC,UAAM,gBAAgB,IAAI,cACxB,iBAAiB,IAAI,CAAC,cAAc,GAAG,UAAU,eAAe,MAAM,UAAU,kBAChF;AAEF,kBAAc,KAAK;AAAA;AAAA;;;ACvBhB,6BAA6B,KAA0E;AAC5G,SAAO,oBAAoB,QAEtB,IAAI,wBAAwB,iBAE5B,IAAI,iCAAiC,iBAErC,IAAI,0BAA0B;AAAA;AAGrC,4BAA4B,MAAM;AAEhC,QAAM,UAAU,CAAC,IAAI,IAAI,IAAI,OAAQ,KAAK,MAAM,KAAK,IAAI,KAAK,MAAM,KAAK;AAGzE,QAAM,UAAU,CAAC,UAAW,QAAQ,MAAO,KAAK;AAEhD,QAAM,QAAQ,EAAE,MAA0B,QAAW,OAA2B,QAAW,KAAyB;AAEpH,MAAI,CAAC,QAAQ,CAAC,KAAK,cAAc,KAAK,WAAW,WAAW;AAAI,WAAO;AACvE,QAAM,KAAK,KAAK;AAOhB,QAAM,OAAO,CAAC,QAAQ,GAAG,IAAI,IAAI,GAAG,IAAI,IAAI,GAAG,IAAI,IAAI,GAAG,IAAI;AAK9D,QAAM,QAAQ,QAAQ,GAAG,KAAK,IAAI,GAAG,GAAG,KAAK,GAAG,IAAI,MAAM,GAAG,IAAI,IAAI,KAAK,IAAI,KAAK,IAAI,GAAG,IAAI,KAAK,GAAG,IAAI,MAAM,GAAG,IAAI;AAMvH,QAAM,SAAS,GAAG,OAAO,CAAC,MAAM,QAAS,OAAO,IAAI,KAAK,OAAO,IAAI,IAAK;AACzE,QAAM,MAAM,GAAG,OAAO,CAAC,MAAM,QAAS,OAAO,IAAI,KAAK,OAAO,IAAI,IAAK;AACtE,QAAM,MAAM,KAAK,KAAM,MAAK,SAAS,UAAW,OAAM,UAAU,MAAO;AAEvE,SAAO;AAAA;AAGF,iCAEoD,WAAoB,oBAAgF;AAC7J,QAAM,EAAE,KAAK,UAAU,UAAU;AACjC,QAAM,YAAY,mBAAmB,QAAwB,MAAM,GAAG,MAAM;AAE5E,QAAM,OAAO,UAAU;AACvB,QAAM,EAAE,cAAc,UAAU;AAChC,QAAM,cAAc,IAAI,cAAc,UAAU,UAAU,OAAO,KAAK,QAAQ,UAAU,YAAY;AACpG,QAAM,QAAQ,mBAAmB;AAEjC,QAAM,YAAY;AAAA,IAChB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAGF,SAAO,KAAK,cAAc;AAAA;;;AC3DrB,qCAA+B;AAAA,EAapC,YAAY,UAAqC,IAAI;AACnD,UAAM;AAAA,MACJ,YAAY;AAAA,MAAM,aAAa;AAAA,MAAM;AAAA,MAAW;AAAA,MAAW;AAAA,MAAW;AAAA,QACpE;AACJ,SAAK,YAAY;AACjB,SAAK,aAAa;AAClB,SAAK,YAAY,aAAa;AAC9B,SAAK,YAAY,aAAa;AAC9B,SAAK,YAAY,aAAa;AAC9B,SAAK,aAAa,cAAc;AAAA;AAAA;AAI7B,8BAAwB;AAAA,EAK7B,YACE,eACA,UAAqC,IACrC;AACA,SAAK,gBAAgB;AACrB,SAAK,UAAU,IAAI,yBAAyB;AAAA;AAAA,EAG9C,KAAK,WAAkE;AACrE,UAAM,MAAM,oBAAoB;AAEhC,UAAM;AAAA,MACJ;AAAA,MAAW;AAAA,MAAY;AAAA,MAAW;AAAA,MAAW;AAAA,MAAW;AAAA,QACtD,KAAK;AAET,QAAI,aAAa,KAAK,yBAAyB,iBAAiB;AAC9D,UAAI,cAAc;AAClB,UAAI,YAAY;AAChB,kBAAY,KAAK,KAAK,cAAc;AACpC,kBAAY,KAAK,KAAK,cAAc;AACpC,kBAAY,KAAK,KAAK,cAAc;AACpC,kBAAY,KAAK,KAAK,cAAc;AACpC,kBAAY,KAAK,KAAK,cAAc,cAAc;AAClD,kBAAY,KAAK,KAAK,cAAc,eAAe;AACnD,kBAAY,KAAK,KAAK,cAAc,YAAY;AAAA;AAGlD,QAAI,YAAY;AACd,UAAI,cAAc;AAClB,UAAI,YAAY;AAEhB,YAAM,YAAY,CAAC,OAAe;AAChC,YAAI;AACJ,YAAI,IAAI,GAAG,GAAG,GAAG,GAAG,WAAW,GAAG,IAAI,KAAK;AAC3C,YAAI;AAAA;AAEN,WAAK,cAAc,UAAU,QAAQ;AAAA;AAAA;AAAA;AAOpC,2BACL,WACA,eACA;AACA,QAAM,qBAAqB,MAAM,QAAQ,iBAAiB,gBAAgB,CAAC;AAC3E,qBAAmB,QAAQ,CAAC,MAAM;AAEhC,UAAM,YAAY,aAAa,gBAC3B,IACC,oBAAoB,KAAK,EAAE,YAAY;AAC5C,QAAI,CAAC,WAAW;AACd,YAAM,IAAI,MAAM;AAAA;AAGlB,QAAI,kBAAkB,WAAW,KAAK;AAAA;AAAA;;;;;;ACrG1C,4BAA2B,gBAAwC,eAA+B;AAChG,QAAM,oBAAoB,yBAAyB,gBAAgB;AACnE,QAAM,6BAA6B,kCAAkC,gBAAgB;AAErF,uCAAqC,YAAoB,aAAqB,cAA4C;AACxH,UAAM,kBAAkB,2BAA2B,YAAY,aAAa,GAAG;AAC/E,UAAM,kBAAkB,2BAA2B,aAAa,aAAa,GAAG;AAChF,UAAM,iBAAiB,kBAAkB,YAAY,aAAa,GAAG,GAAG;AAExE,WAAO,EAAE,iBAAiB,iBAAiB;AAAA;AAG7C,kCAAgC,UAAkB,cAAuC;AACvF,UAAM,kBAAkB,2BAA2B,UAAU,UAAU,GAAG;AAC1E,UAAM,kBAAkB,2BAA2B,UAAU,UAAU,GAAG;AAC1E,UAAM,kBAAkB,2BAA2B,UAAU,UAAU,GAAG;AAE1E,WAAO,EAAE,iBAAiB,iBAAiB;AAAA;AAG7C,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAAA;AAIG,wBAAuB,SAAuB,eAAsF;AACzI,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,MACE,mBAAkB,gBAAgB;AAEtC,QAAM,qBAAqB,kBAAkB,GAAG,IAAI,GAAG;AACvD,QAAM,+BAA+B,4BAA4B,IAAI,IAAI;AACzE,QAAM,+BAA+B,4BAA4B,IAAI,KAAK;AAE1E,QAAM,aAAa;AAAA,IACjB,SAAS;AAAA,IACT,mBAAmB;AAAA,IACnB,mBAAmB;AAAA;AAGrB,QAAM,cAAc;AACpB,QAAM,eAAe,GAAG,GAAG,QAAQ,CAAC,QAAQ;AAC1C,gBAAY,cAAc,SAAS,uBAAuB,KAAK,0BAA0B;AAAA;AAG3F,QAAM,4BAA4B,4BAA4B,KAAK,KAAK;AACxE,QAAM,2BAA2B,2BAA2B,KAAK,KAAK;AAEtE,QAAM,YAAY;AAAA,IAChB,iBAAiB;AAAA,IACjB,gBAAgB;AAAA;AAGlB,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL;AAAA,IACA,QAAQ,EAAE,YAAY,aAAa;AAAA;AAAA;;;ACtEvC,4BAA2B,WAAgB,eAA+B;AACxE,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,QAAM,oBAAoB,sBAAsB;AAChD,QAAM,6BAA6B,+BAA+B;AAElE,uCAAqC,cAA4C;AAC/E,UAAM,kBAAkB,2BAA2B,GAAG;AACtD,UAAM,kBAAkB,2BAA2B,GAAG;AACtD,UAAM,iBAAiB,kBAAkB,GAAG;AAE5C,WAAO,EAAE,iBAAiB,iBAAiB;AAAA;AAG7C,kCAAgC,cAAuC;AACrE,UAAM,kBAAkB,2BAA2B,GAAG;AACtD,UAAM,kBAAkB,2BAA2B,GAAG;AACtD,UAAM,kBAAkB,2BAA2B,GAAG;AAEtD,WAAO,EAAE,iBAAiB,iBAAiB;AAAA;AAG7C,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAAA;AAIG,qCACL,WACA,eAC+D;AAC/D,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,MACE,mBAAkB,WAAW;AAEjC,QAAM,qBAAqB,kBAAkB;AAC7C,QAAM,+BAA+B,4BAA4B;AACjE,QAAM,+BAA+B,4BAA4B;AAEjE,QAAM,aAAa;AAAA,IACjB,SAAS;AAAA,IACT,mBAAmB;AAAA,IACnB,mBAAmB;AAAA;AAGrB,QAAM,cAAc;AACpB,QAAM,eAAe,GAAG,GAAG,QAAQ,CAAC,QAAQ;AAC1C,gBAAY,cAAc,SAAS,uBAAuB,0BAA0B;AAAA;AAGtF,QAAM,4BAA4B,4BAA4B;AAC9D,QAAM,2BAA2B,2BAA2B;AAE5D,QAAM,YAAY;AAAA,IAChB,iBAAiB;AAAA,IACjB,gBAAgB;AAAA;AAGlB,6BAA2B,WAAW;AAEtC,SAAO,EAAE,QAAQ,EAAE,YAAY,aAAa,aAAa;AAAA;;;AChE3D,cAAc,GAAgB,QAAoB,QAAuC;AACvF,SAAO,AAAG,qBAAI,AAAG,wBAAO,GAAG,OAAO,SAAS,QAAQ,SAAS,OAAO;AAAA;AAGrE,wBAAwB,GAAgB,QAA8B,kBAA2B,MAAmB;AAClH,MAAI,MAAM,kBAAkB,AAAG,sBAAK,KAAK;AACzC,QAAM,uBAAuB,KAAK,OAAO,iBAAiB,CAAC,GAAG;AAC9D,QAAM,uBAAuB,AAAG,sBAAK,MAAM,OAAO,iBAAiB,CAAC,GAAG;AACvE,QAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,QAAM,AAAG,qBAAI,KAAK,KAAK,GAAG,OAAO,gBAAgB,CAAC,GAAG;AACrD,SAAO;AAAA;AAGT,mBAAmB,GAAgB,QAAsC;AACvE,MAAI,MAAM,uBAAuB,AAAG,sBAAK,IAAI,OAAO,iBAAiB,CAAC,GAAG;AACzE,QAAM,uBAAuB,AAAG,sBAAK,MAAM,OAAO,iBAAiB,CAAC,GAAG;AACvE,QAAM,uBAAuB,AAAG,sBAAK,MAAM,OAAO,iBAAiB,CAAC,GAAG;AACvE,QAAM,AAAG,qBAAI,KAAK;AAClB,SAAO;AAAA;AAGF,iCAA2B,cAAkC;AAAA,EAGlE,YAAY,eAAuB;AACjC,UAAM;AACN,SAAK,iBAAiB;AAAA;AAAA,EAGjB,aAAa,OAA8B;AAChD,UAAM,EAAE,WAAW;AACnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM;AAAA;AAElB,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,cAAc,AAAG,sBAAK,MAAM,cAAc,KAAK,OAAO;AAC5D,YAAM,UAAU,CAAC,SAAS,SAAS;AACnC,YAAM,aAAa,UAAU,aAAa,SAAS,IAAI;AACvD,UAAI,MAAM,AAAG,sBAAK,KAAK,YAAY,OAAO,WAAW,SAAS,CAAC,GAAG;AAClE,YAAM,eAAe,KAAK,OAAO,WAAW,mBAAmB;AAC/D,YAAM,eAAe,KAAK,OAAO,WAAW;AAC5C,YAAM,KAAK,gBAAgB,GAAG,GAAG,QAAQ,CAAC,QAAQ;AAChD,cAAM,UAAU,KAAK,OAAO,YAAY,cAAc;AAAA;AAExD,YAAM,eAAe,KAAK,OAAO,UAAU;AAC3C,YAAM,AAAG,sBAAK,uBAAuB,KAAK,OAAO,UAAU,gBAAgB,CAAC,GAAG;AAC/E,aAAO;AAAA;AAAA;AAAA,QAIE,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,EAGlC,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,4BAA2B,WAAW,KAAK;AAAA;AAAA,EAG1C,cAAc,SAAuB;AAC7C,WAAO,eAAc,SAAS,KAAK;AAAA;AAAA;;;ACvEhC,wBAAuB,SAA6E;AACzG,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM,kBAAkB,uBAAuB,gBAAgB;AAE/D,QAAM,MAAM,gBAAgB,KAAK,GAAG;AACpC,QAAM,SAAS,gBAAgB,KAAK,GAAG;AAEvC,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL;AAAA,IACA,QAAQ,EAAE,IAAI,EAAE,KAAK;AAAA;AAAA;;;ACjBlB,qCACL,WACsD;AACtD,QAAM,gBAAgC;AAEtC,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,2BAAyB,QAA0B;AACjD,UAAM,UAAU,mBAAmB,GAAG,kBAAkB;AACxD,UAAM,OAAO,mBAAmB,GAAG,eAAe;AAClD,WAAO,EAAE,SAAS;AAAA;AAGpB,QAAM,SAAS;AAAA,IACb,IAAI;AAAA,MACF,KAAK,gBAAgB;AAAA,MACrB,QAAQ,gBAAgB;AAAA;AAAA;AAI5B,6BAA2B,WAAW;AAEtC,SAAO,EAAE,QAAQ;AAAA;;;ACtBZ,IAAK;AAAL,UAAK,SAAL;AAEL,sBAAS;AAET,oBAAO;AAAA,GAJG;;;ACML,iCAA2B,cAAyB;AAAA,EAGzD,YAAY,uBAAqC,IAAI,aAAa,IAAI;AACpE,UAAM;AACN,SAAK,wBAAwB;AAAA;AAAA,MAGpB,uBAAqC;AAC9C,WAAO,KAAK;AAAA;AAAA,EAGP,OAAO,OAA0C;AACtD,UAAM,EAAE,WAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAG1B,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,qBAAqB,iBAAiB,WACxC,KAAK,qBAAqB,aAAa,SACvC;AAEJ,YAAM,SAAS,AAAG,yBAAQ,oBAAoB,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI,SAAS,KAAK,mBAAmB,MAAM,IAAI;AACzG,YAAM,MAAM,oBAAoB,QAAQ,OAAO,GAAG,KAAK;AACvD,YAAM,SAAS,oBAAoB,QAAQ,OAAO,GAAG;AACrD,aAAO,EAAE,KAAK;AAAA;AAAA;AAAA,EAIX,aAAa,OAA0C;AAC5D,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,EAAE,KAAK,WAAW,KAAK,OAAO;AACpC,aAAO,EAAE,KAAK,QAAQ,AAAG,yBAAQ;AAAA;AAAA;AAAA,QAIxB,QAAQ,OAAsC;AACzD,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,QAG/B,oBAAoB,OAA8E;AAC7G,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,MAAM,MAAM,KAAK,aAAa;AAEpC,UAAM,OAAO,AAAG,yBAAQ,IAAI;AAC5B,UAAM,UAAU,AAAG,yBAAQ,IAAI;AAC/B,UAAM,sBAAsB,KAAK,IAAI,CAAC,WAAW,MAAO;AAAA,MACtD;AAAA,MACA,cAAc,QAAQ;AAAA;AAGxB,UAAM,qBAAqB,MAAM,QAAQ,IACvC,oBAAoB,IAAI,OAAO,EAAE,WAAW,mBAAmB;AAC7D,YAAM,MAAO,UAAU,WAAY;AACnC,YAAM,WAAY,aAAa,WAAY;AAC3C,YAAM,SAAS,WAAW;AAC1B,YAAM,SAAS,SAAS,OAAO,OAAO,OAAO;AAC7C,YAAM,oBAAoB,SAAS,WAAY,IAAI;AAEnD,gBAAU;AACV,mBAAa;AACb,aAAO,EAAE,KAAK,QAAQ;AAAA;AAG1B,QAAI,IAAI;AACR,QAAI,OAAO;AAEX,WAAO,SAAS,eAAe,qBAAiD,mBAAmB;AAAA;AAAA,EAG3F,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGF,QAAQ,mBAA4B,MAAM;AAC/C,SAAK,qBAAqB,QAAQ;AAClC,UAAM,QAAQ;AAAA;AAAA,EAGT,qBAAqB,SAAuB;AACjD,UAAM,EAAE,QAAQ,kBAAkB,KAAK,wBAAwB;AAC/D,SAAK,UAAU;AACf,SAAK,iBAAiB;AAAA;AAAA,EAGjB,wBAAwB,SAAuB;AACpD,WAAO,eAAc;AAAA;AAAA,EAGb,2BAA2B,WAA8B;AACjE,UAAM,EAAE,qBAAqB,kBAAkB,mBAAmB;AAElE,SAAK,qBAAqB,kBAAkB;AAE5C,WAAO,4BAA2B;AAAA;AAAA,EAG1B,cAAc,SAAuB;AAC7C,UAAM,uBAAwB,MAAM,IAAI,IAAM,OAAM,IAAI;AAExD,UAAM,0BAA0B,QAAQ,MAAM,GAAG,QAAQ,SAAS;AAClE,UAAM,oBAAoB,QAAQ,MAAM,QAAQ,SAAS;AAEzD,SAAK,qBAAqB,eAAe;AACzC,WAAO,KAAK,wBAAwB;AAAA;AAAA;;;AC5GjC,0CAGG,cAAgC;AAAA,EACjC,YAAY,QAAqB,WAAmB,oBAAgD;AACzG,UAAM,kBAAkB,mBAAmB,IAAI,CAAC,EAAE,OAAO,aAAa;AACpE,YAAM,SAAQ,YAAY,KAAK,IAAI,QAAQ;AAC3C,aAAO;AAAA,QACL,OAAO,QAAQ;AAAA,QACf,QAAQ,SAAS;AAAA;AAAA;AAIrB,UAAM,YAAY,gBAAgB;AAElC,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,0BAA0B,CAAC,OAAe,UAAkB,AAAG,uBAAM,CAAC,AAAG,sBAAK,CAAC,KAAK,OAAO,YAAY,AAAG,sBAAK,CAAC,KAAK,OAAO,aAAa,GAAG,KAAK,GAAG,KAAK;AAG/J,YAAM,aAAa,CAAC,UAAkB,SAAoD;AACxF,cAAM,EAAE,OAAO,WAAW,gBAAgB;AAC1C,eAAO,KAAK,OAAO,UAAU,KAAK,IAAI,QAAQ,UAAU,IAAI;AAAA;AAG9D,YAAM,cAAc,CAAC,aAAqB,WAAW,UAAU,CAAC,GAAG,MAAM,IAAI;AAC7E,YAAM,cAAc,CAAC,aAAqB,WAAW,UAAU,CAAC,GAAG,MAAM,IAAI;AAE7E,YAAM,kBAAkB,OACrB,IAAI,AAAG,sBAAK,CAAC,WAAW,MAAM,WAAW,YACzC,IAAI,AAAG,uBAAM,MAAM,KAAK,MAAM,YAAY,CAAC,GAAG,aAAa,wBAC1D,YAAY,WACZ,YAAY,cAEb,IAAI,AAAG,uBAAM,MAAM,KAAK,MAAM,YAAY,CAAC,GAAG,aAAa,wBAC1D,gBAAgB,UAAU,OAC1B,gBAAgB,UAAU;AAG9B,aAAO;AAAA;AAAA;AAAA,EAIJ,aAAa,OAA8B;AAChD,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,MAAM,KAAK,OAAO;AACxB,aAAO,KAAK,YACV,KACA,MAAM,WACN,MAAM,gBAAgB,IAAI,CAAC,CAAC,QAAQ,WAAY,GAAE,QAAQ;AAAA;AAAA;AAAA,QAKnD,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,QAG/B,gBAAgB,OAAgE;AAC3F,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,kBAAkB,AAAG,sBACzB,MAAM,AAAG,yBAAQ,KAAK,aAAa;AAGrC,UAAM,oBAAoB,MAAM,QAAQ,IAAI,gBAAgB,IAC1D,OAAO,gBAAgB,aAAa;AAClC,YAAM,iBAAiB,MAAM,KAAK,eAAe;AACjD,YAAM,UAAU,eAAe,OAAO,CAAC,GAAG,MAAM,OAAO;AACvD,YAAM,UAAU,eAAe,OAAO,CAAC,GAAG,MAAM,CAAC,OAAO;AAExD,aAAO,IAAI,gBACT,MAAM,IAAI,KAAK,GAAG,IAAI,CAAC,GAAG,MAAM,IAAI,MAAM,QAAQ,IAAc,QAAQ,MACxE;AAAA,QACE,QAAQ,SAAS,eAAe;AAAA,QAChC,OAAO,SAAS,cAAc;AAAA;AAAA;AAMtC,oBAAgB,QAAQ,CAAC,MAAM,EAAE;AAEjC,WAAO,SAAS,eAAe,oBAAyC,kBAAkB;AAAA;AAAA,EAGlF,2BAAmC;AAC3C,WAAO;AAAA;AAAA;;;AC1FJ,sCAAgC,sBAAkD;AAAA,EACvF,YAAY,uBAA6C,IAAI,wBAAwB;AACnF,UAAM,qBAAqB;AAAA;AAAA,EAGnB,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,0BAAkC;AAC1C,WAAO;AAAA;AAAA;;;ACRJ,wCACL,WAC2E;AAC3E,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,MACE,kBAAkB,WAAW;AAEjC,QAAM,SAAS;AAAA,IACb,QAAQ,yBAAyB,UAAU;AAAA,IAC3C,QAAQ,yBAAyB;AAAA,IACjC,QAAQ,yBAAyB;AAAA;AAGnC,6BAA2B,WAAW;AAEtC,SAAO,EAAE,QAAQ;AAAA;;;ACnBZ,2BAA2B,SAAkG;AAClI,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM;AAAA,IACJ;AAAA,MACE,kBAAkB,gBAAgB;AAEtC,QAAM,SAAS,yBAAyB,GAAG,IAAI,UAAU;AACzD,QAAM,SAAS,yBAAyB,IAAI,IAAI;AAChD,QAAM,SAAS,yBAAyB,IAAI,KAAK;AAEjD,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL;AAAA,IACA,QAAQ,EAAE,QAAQ,QAAQ;AAAA;AAAA;;;AChBvB,6CAAuC,cAA+G;AAAA,EAC3J,cAAc;AACZ,UAAM;AAAA;AAAA,EAGD,aAAa,OAA8B;AAChD,UAAM,EAAE,WAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM;AAAA;AAGlB,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,cAAc,AAAG,sBAAK,MAAM,cAAc,KAAK,OAAO;AAC5D,YAAM,UAAU,CAAC,SAAS,SAAS;AACnC,YAAM,aAAa,UAAU,aAAa,SAAS,IAAI;AAEvD,UAAI,MAAM,YAAY,YAAY,OAAO,QAAQ;AACjD,YAAM,YAAY,KAAK,OAAO;AAC9B,YAAM,YAAY,KAAK,OAAO;AAC9B,YAAM,AAAG,yBAAQ,KAAK,CAAC,IAAI,KAAK,CAAC,GAAG,IAAI;AAExC,aAAO;AAAA;AAAA;AAAA,QAIE,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,EAGlC,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,+BAA+B;AAAA;AAAA,EAG9B,cAAc,SAAuB;AAC7C,WAAO,kBAAkB;AAAA;AAAA;;;AC7CtB,0CAAoC,sBAAsD;AAAA,EAC/F,YAAY,uBAAiD,IAAI,4BAA4B;AAC3F,UAAM,yBAAyB;AAAA;AAAA,EAGvB,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,0BAAkC;AAC1C,WAAO;AAAA;AAAA;;;ACVJ,oCAA8B,kBAAkB;AAAA;;;ACAhD,eAAe,GAAgB,QAAuC;AAC3E,SAAO,AAAG,qBAAI,AAAG,qBAAI,GAAG,OAAO,UAAU,OAAO;AAAA;;;ACAlD,oBACE,GACA,QACA,SACA,UACA,UAA4B,QACf;AACb,QAAM,EAAE,SAAS,SAAS,OAAO;AAEjC,MAAI,MAAM,AAAG,wBAAO,GAAG,SAAS,SAAS;AACzC,QAAM,AAAG,qBAAI,KAAK;AAClB,QAAM,MAAM,KAAK,OAAO;AACxB,SAAO,WAAW,AAAG,sBAAK,OAAO;AAAA;AAG5B,eAAc,GAAgB,QAAyB;AAC5D,SAAO,WAAU,GAAG,QAAQ,CAAC,GAAG,IAAI;AAAA;AAG/B,oBAAoB,GAAgB,QAAyB;AAClE,SAAO,WAAU,GAAG,QAAQ,CAAC,GAAG,IAAI;AAAA;AAG/B,kBAAkB,GAAgB,QAAyB;AAChE,SAAO,WAAU,GAAG,QAAQ,CAAC,GAAG,IAAI,MAAM;AAAA;;;ACvB5C,4BAA2B,gBAAwC,eAA+B;AAChG,+BAA6B,iBAAyB,YAAoB,YAAiC;AACzG,UAAM,UAAU,eAAe;AAC/B,UAAM,QAAQ,QAAQ,SAAU,cAAa,aAAa;AAE1D,QAAI,QAAQ,QAAQ;AAClB,YAAM,IAAI,MAAM,+BAA+B,0BAA0B,QAAQ,uBAAuB,2BAA2B;AAAA;AAGrI,WAAO,AAAG,sBACR,MAAM,AAAG,2BACP,AAAG,0BAAS,SAAS,CAAC,YAAY,OAAO,YAAY,cACrD,CAAC,GAAG,GAAG,GAAG;AAAA;AAKhB,6BACE,iBACA,YACA,YACA,cACY;AACZ,UAAM,UAAU,oBAAoB,iBAAiB,YAAY;AACjE,UAAM,OAAO,AAAG,0BAAS,eAAe;AAExC,kBAAc,KACZ,EAAE,WAAW,GAAG,0BAChB,EAAE,WAAW,GAAG;AAGlB,WAAO,EAAE,SAAS;AAAA;AAGpB,mCAAiC,YAAoB,cAAwC;AAC3F,UAAM,UAAU,AAAG,0BAAS,eAAe;AAC3C,UAAM,SAAS,AAAG,0BAAS,eAAe;AAE1C,kBAAc,KACZ,EAAE,WAAW,GAAG,0BAChB,EAAE,WAAW,GAAG;AAGlB,WAAO;AAAA,MACL;AAAA,MACA;AAAA;AAAA;AAIJ,kCACE,iBACA,YACA,YACA,cACiB;AACjB,UAAM,QAAO,kBAAkB,iBAAiB,YAAY,YAAY,GAAG;AAC3E,UAAM,SAAQ,wBAAwB,YAAY,GAAG;AAErD,WAAO,EAAE,aAAM;AAAA;AAGjB,sCACE,iBACA,YACA,YACA,cACA,SAAkB,OACG;AACrB,UAAM,QAAQ,uBAAwB,UAAS,MAAM,KAAK,iBAAiB,YAAY,YAAY,GAAG;AACtG,UAAM,SAAQ,uBAAuB,iBAAiB,YAAY,YAAY,GAAG;AAEjF,WAAO,EAAE,OAAO;AAAA;AAGlB,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;AAIG,wBAAuB,SAA6E;AACzG,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,mBAAkB,gBAAgB;AAEtC,QAAM,cAAc,uBAAuB,MAAM,IAAI,GAAG;AACxD,QAAM,WAAW,2BAA2B,MAAM,IAAI,GAAG;AACzD,QAAM,WAAW,2BAA2B,MAAM,IAAI,GAAG;AACzD,QAAM,WAAW,2BAA2B,MAAM,IAAI,GAAG;AAEzD,QAAM,cAAc,2BAA2B,OAAO,IAAI,GAAG,eAAe;AAC5E,QAAM,WAAW,2BAA2B,OAAO,IAAI,GAAG;AAC1D,QAAM,WAAW,2BAA2B,OAAO,IAAI,GAAG;AAC1D,QAAM,WAAW,2BAA2B,OAAO,IAAI,GAAG;AAE1D,QAAM,eAAe,2BAA2B,QAAQ,KAAK,GAAG,gBAAgB;AAChF,QAAM,YAAY,2BAA2B,QAAQ,KAAK,GAAG;AAC7D,QAAM,YAAY,2BAA2B,QAAQ,KAAK,GAAG;AAE7D,QAAM,eAAe,2BAA2B,QAAQ,KAAK,GAAG,gBAAgB;AAChF,QAAM,YAAY,2BAA2B,QAAQ,KAAK,GAAG;AAC7D,QAAM,YAAY,2BAA2B,QAAQ,KAAK,GAAG;AAC7D,QAAM,mBAAmB,2BAA2B,QAAQ,KAAK,GAAG;AAEpE,QAAM,KAAK,AAAG,sBACZ,MAAM,AAAG,2BAAU,AAAG,0BAAS,eAAe,MAAM,MAAM,CAAC,KAAK,OAAO,CAAC,GAAG;AAE7E,gBAAc,KAAK,EAAE,WAAW;AAEhC,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAGF,SAAO,EAAE,QAAQ;AAAA;;;AC5InB,4BAA2B,WAAgB,eAA+B;AACxE,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,mCAAiC,QAAkC;AACjE,UAAM,UAAU,mBAAmB,GAAG,wBAAwB;AAC9D,UAAM,SAAS,mBAAmB,GAAG,uBAAuB;AAE5D,WAAO,EAAE,SAAS;AAAA;AAGpB,kCAAgC,QAAiC;AAC/D,UAAM,UAAU,mBAAmB,GAAG,uBAAuB;AAC7D,UAAM,OAAO,mBAAmB,GAAG,oBAAoB;AACvD,UAAM,SAAQ,wBAAwB;AAEtC,WAAO,EAAE,MAAM,EAAE,SAAS,QAAQ;AAAA;AAGpC,sCAAoC,QAAqC;AACvE,WAAO;AAAA,MACL,OAAO,uBAAuB,GAAG;AAAA,MACjC,OAAO,uBAAuB,GAAG;AAAA;AAAA;AAIrC,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;AAIG,qCACL,WACsD;AACtD,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,mBAAkB,WAAW;AAEjC,QAAM,cAAc,uBAAuB;AAC3C,QAAM,WAAW,2BAA2B;AAC5C,QAAM,WAAW,2BAA2B;AAC5C,QAAM,WAAW,2BAA2B;AAE5C,QAAM,cAAc,2BAA2B;AAC/C,QAAM,WAAW,2BAA2B;AAC5C,QAAM,WAAW,2BAA2B;AAC5C,QAAM,WAAW,2BAA2B;AAE5C,QAAM,eAAe,2BAA2B;AAChD,QAAM,YAAY,2BAA2B;AAC7C,QAAM,YAAY,2BAA2B;AAE7C,QAAM,eAAe,2BAA2B;AAChD,QAAM,YAAY,2BAA2B;AAC7C,QAAM,YAAY,2BAA2B;AAC7C,QAAM,mBAAmB,2BAA2B;AAEpD,QAAM,EAAE,OAAO;AACf,gBAAc,KAAK,EAAE,cAAc,MAAM,WAAW;AAEpD,MAAI,CAAC,WAAW,KAAK;AACnB,UAAM,IAAI,MAAM,yDAAyD;AAAA;AAG3E,QAAM,SAAS;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAGF,6BAA2B,WAAW;AAEtC,SAAO,EAAE,QAAQ;AAAA;;;ACzFZ,kBAAkB,GAAgB,QAA0C;AACjF,MAAI,MAAM,MAAK,GAAG,OAAO;AACzB,QAAM,WAAW,KAAK,OAAO;AAC7B,QAAM,AAAG,qBAAI,KAAK;AAClB,QAAM,AAAG,sBAAK;AACd,SAAO;AAAA;AAGF,sBAAsB,GAAgB,QAA0C;AACrF,MAAI,MAAM,SAAS,GAAG,OAAO;AAC7B,QAAM,WAAW,KAAK,OAAO;AAE7B,MAAI,SAAS,AAAG,yBAAQ,GAAG,GAAG,GAAG;AACjC,QAAM,SAAQ,AAAG,uBAAkB,OAAO;AAC1C,QAAM,QAAQ,OAAO,MAAM,OAAO,IAAI,MAAM;AAC5C,QAAM,gBAAgB,OAAO,MAAM,OAAO,IAAI,MAAM,MAAM,OAAO,MAAM,OAAO,IAAI,MAAM;AAExF,MAAI,eAAe;AACjB,UAAM,YAAY,CAAC,GAAG,IAAI;AAC1B,cAAU,KAAK;AACf,UAAM,SAAS,AAAG,uBAAkB;AACpC,UAAM,AAAG,wBAAO,CAAC,KAAK,SAAS;AAE/B,UAAM,YAAY,CAAC,GAAG,IAAI;AAC1B,cAAU,KAAK;AACf,UAAM,SAAS,AAAG,uBAAkB;AACpC,UAAM,AAAG,wBAAO,CAAC,KAAK,SAAS;AAAA;AAGjC,WAAS,QAAQ,AAAG,wBAAO,CAAC,QAAQ,SAAQ,KAAK;AACjD,QAAM,AAAG,qBAAI,QAAQ;AAErB,QAAM,AAAG,sBAAK;AACd,SAAO;AAAA;;;AC3BF,uCAAiC,cAAyB;AAAA,EAC/D,cAAc;AACZ,UAAM;AAAA;AAAA,EAGD,aAAa,OAA8B;AAChD,UAAM,EAAE,WAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM;AAAA;AAGlB,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,cAAc,AAAG,sBAAK,MAAM,cAAc,KAAK,OAAO;AAE5D,YAAM,UAAU,CAAC,SAAS,SAAS;AACnC,YAAM,aAAa,UAAU,aAAa,SAAS,IAAI;AAEvD,UAAI,MAAM,SAAS,YAAY,OAAO;AACtC,YAAM,AAAG,yBAAQ,KAAK,GAAG,GAAG;AAE5B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAE3B,YAAM,aAAa,KAAK,OAAO;AAC/B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAE3B,YAAM,aAAa,KAAK,OAAO;AAC/B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAE3B,YAAM,aAAa,KAAK,OAAO;AAC/B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,SAAS,KAAK,OAAO;AAC3B,YAAM,aAAa,KAAK,OAAO;AAE/B,YAAM,YAAY,IAAI,KAAK,CAAC,GAAG;AAC/B,YAAM,iBAAiB,AAAG,wBAAO,WAAW,OAAO;AAEnD,aAAO;AAAA;AAAA;AAAA,QAIE,QAAQ,OAAwC;AAC3D,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,QAG/B,sBAAsB,OAAwD;AA7D7F;AA8DI,QAAI,qCAAO,UAAP,mBAAc,KAAK,CAAC,QAAQ,OAAO;AAAI,aAAO,IAAI,aAAa;AACnE,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,wBAAwB,AAAG,sBAAK,MAAM,AAAG,yBAAQ,KAAK,aAAa;AACzE,UAAM,0BAA0B,MAAM,QAAQ,IAAI,sBAAsB,IAAI,CAAC,MAAM,EAAE;AACrF,0BAAsB,QAAQ,CAAC,MAAM,EAAE;AACvC,WAAO,SAAS,eAAe,0BAA0B,wBAAwB;AAAA;AAAA,EAGzE,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,4BAA2B;AAAA;AAAA,EAG1B,cAAc,SAAuB;AAC7C,WAAO,eAAc;AAAA;AAAA;;;AC3ElB,kCAAkC,SAAuB;AAC9D,QAAM,MAAM,IAAI;AAChB,MAAI,eAAe;AACnB,SAAO;AAAA;;;ACHF,kCAGL,WACA,YAC6B;AAC7B,QAAM,YAAY,EAAE;AACpB,SAAO,KAAK,cAAc;AAAA;;;ACPrB,mBAAmB,KAA8B;AACtD,SAAO,OAAO,IAAI,QAAQ;AAAA;AAGrB,uBAGL,WACA,KACkB;AAClB,QAAM,YAAY,EAAE;AACpB,SAAO,KAAK,cAAc;AAAA;;;ACPrB,sBAAsB,KAAiC;AAC5D,SAAQ,KAAI,WAAW,OAAO,QAAQ,IAAI,WAAW,OAAO,WACvD,mBAAmB,IAAI;AAAA;AAGvB,0BAGL,WACA,QACA,mBACqB;AACrB,QAAM,YAAY,EAAE,QAAQ;AAC5B,SAAO,KAAK,cAAc;AAAA;;;AChB5B,4BAA2B,gBAAwC,eAA+B;AAChG,sCAAoC,aAAqB,cAAuD;AAC9G,UAAM,UAAU,AAAG,0BAAS,eAAe,IAAI,IAAI,cAAc,CAAC,GAAG,GAAG,aAAa;AACrF,UAAM,mBAAmB,AAAG,0BAAS,eAAe;AACpD,UAAM,oBAAoB,AAAG,0BAAS,eAAe;AACrD,UAAM,kBAAkB,AAAG,0BAAS,eAAe;AACnD,UAAM,sBAAsB,AAAG,0BAAS,eAAe;AAEvD,kBAAc,KACZ,EAAE,WAAW,GAAG,0BAChB,EAAE,WAAW,GAAG,mCAChB,EAAE,WAAW,GAAG,oCAChB,EAAE,WAAW,GAAG,kCAChB,EAAE,WAAW,GAAG;AAGlB,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA;AAIJ,6BACE,YACA,aACA,YACA,cACA,iBACY;AACZ,UAAM,UAAU,AAAG,0BACjB,eAAe,aAAa,cAAc,aAAa,aACvD,CAAC,YAAY,YAAY,YAAY;AAEvC,UAAM,OAAO,AAAG,0BAAS,eAAe;AAExC,kBAAc,KACZ,EAAE,WAAW,GAAG,0BAChB,EAAE,WAAW,GAAG,gBAAgB,kBAAkB,sBAAsB;AAG1E,WAAO,EAAE,SAAS;AAAA;AAGpB,sCACE,YACA,aACA,YACA,cACqB;AACrB,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,QACE,kBAAkB,YAAY,aAAa,YAAY,cAAc;AAEzE,WAAO;AAAA,MACL;AAAA,MACA,mBAAmB;AAAA;AAAA;AAIvB,iCACE,YACA,aACA,cAC4B;AAC5B,UAAM,iBAAiB,2BAA2B,YAAY,GAAG;AACjE,UAAM,iBAAiB,2BAA2B,YAAY,aAAa,GAAG,GAAG;AAEjF,WAAO,EAAE,gBAAgB;AAAA;AAG3B,sCAAwD;AACtD,UAAM,SAAS,2BAA2B,GAAG,IAAI,GAAG;AACpD,UAAM,SAAS,sBAAsB,IAAI,IAAI;AAC7C,UAAM,SAAS,sBAAsB,IAAI,KAAK;AAC9C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,SAAS,sBAAsB,KAAK,KAAK;AAC/C,UAAM,UAAU,sBAAsB,KAAK,KAAK;AAChD,UAAM,UAAU,sBAAsB,KAAK,KAAK;AAChD,UAAM,UAAU,sBAAsB,KAAK,MAAM;AACjD,UAAM,UAAU,sBAAsB,MAAM,MAAM;AAClD,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA;AAIJ,0CAA+D;AAC7D,UAAM,SAAS,2BAA2B,MAAM,KAAK,GAAG;AACxD,UAAM,SAAS,2BAA2B,KAAK,KAAK,GAAG;AACvD,UAAM,SAAS,2BAA2B,KAAK,KAAK,GAAG;AACvD,UAAM,SAAS,2BAA2B,KAAK,KAAK,GAAG;AACvD,UAAM,SAAS,2BAA2B,KAAK,KAAK,GAAG;AACvD,UAAM,SAAS,2BAA2B,KAAK,KAAK,GAAG;AACvD,UAAM,SAAS,2BAA2B,KAAK,IAAI,GAAG;AACtD,UAAM,SAAS,2BAA2B,IAAI,KAAK,GAAG;AACtD,UAAM,2BAA2B,kBAAkB,KAAK,IAAI,GAAG;AAC/D,UAAM,oBAAoB,kBAAkB,KAAK,GAAG,GAAG;AACvD,UAAM,2BAA2B,kBAAkB,MAAM,IAAI,GAAG;AAChE,UAAM,oBAAoB,kBAAkB,MAAM,IAAI,GAAG;AACzD,UAAM,2BAA2B,kBAAkB,KAAK,IAAI,GAAG;AAC/D,UAAM,oBAAoB,kBAAkB,KAAK,IAAI,GAAG;AACxD,UAAM,2BAA2B,kBAAkB,KAAK,IAAI,GAAG;AAC/D,UAAM,oBAAoB,kBAAkB,KAAK,IAAI,GAAG;AACxD,UAAM,2BAA2B,kBAAkB,KAAK,IAAI,GAAG;AAC/D,UAAM,oBAAoB,kBAAkB,KAAK,IAAI,GAAG;AACxD,UAAM,2BAA2B,kBAAkB,KAAK,IAAI,GAAG;AAC/D,UAAM,oBAAoB,kBAAkB,KAAK,IAAI,GAAG;AAExD,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,UAAM,kBAAkB;AAAA,MACtB,wBAAwB;AAAA,MACxB,iBAAiB;AAAA;AAEnB,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA;AAIJ,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;AAIG,wBAAuB,SAA6E;AACzG,QAAM,gBAAgC;AACtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAC1B,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,mBAAkB,gBAAgB;AACtC,QAAM,cAAc;AACpB,QAAM,mBAAmB;AACzB,QAAM,YAAY,AAAG,0BACnB,eAAe,OAAO,IACtB,CAAC,GAAG,MAAM;AAEZ,QAAM,eAAe;AAAA,IACnB;AAAA;AAEF,gBAAc,KAAK,EAAE,WAAW;AAChC,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAG1E,SAAO;AAAA,IACL,QAAQ;AAAA,MACN;AAAA,MACA;AAAA,MACA;AAAA;AAAA,IAEF;AAAA;AAAA;;;AC9MJ,4BAA2B,WAAgB,eAA+B;AACxE,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,sCAAoC,QAAgB,KAAa,cAA2C;AAC1G,UAAM,UAAU,mBAAmB,GAAG,iBAAiB,yBAAyB,GAAG,GAAG;AACtF,UAAM,oBAAoB,mBAAmB,GAAG,iBAAiB,uCAAuC,GAAG,GAAG;AAC9G,WAAO,EAAE,SAAS;AAAA;AAGpB,iCAA+B,KAAyC;AACtE,UAAM,eAAe,oBAAoB;AACzC,UAAM,sBAAsB,sBAAsB;AAClD,UAAM,4BAA4B,GAAG;AACrC,UAAM,4BAA4B,GAAG;AAErC,UAAM,UAAU,mBAAmB,GAAG,yCAAyC,GAAG,GAAG;AACrF,UAAM,mBAAmB,mBAAmB,GAAG,uCAAuC,GAAG,GAAG;AAC5F,UAAM,oBAAoB,mBAAmB,GAAG,sCAAsC,GAAG,GAAG;AAC5F,UAAM,kBAAkB,mBAAmB,GAAG,6CAA6C,GAAG,GAAG;AACjG,UAAM,sBAAsB,mBAAmB,GAAG,iDAAiD,GAAG,GAAG;AAEzG,WAAO;AAAA,MACL,gBAAgB;AAAA,QACd;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA;AAAA,MAEF,gBAAgB,2BAA2B,eAAe,KAAK;AAAA;AAAA;AAInE,sCAAwD;AACtD,WAAO;AAAA,MACL,QAAQ,2BAA2B,eAAe,GAAG;AAAA,MACrD,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,QAAQ,sBAAsB;AAAA,MAC9B,SAAS,sBAAsB;AAAA,MAC/B,SAAS,sBAAsB;AAAA,MAC/B,SAAS,sBAAsB;AAAA,MAC/B,SAAS,sBAAsB;AAAA;AAAA;AAInC,6BAA2B,QAAgB,cAAkC;AAC3E,UAAM,UAAU,mBAAmB,GAAG,kBAAkB,GAAG,GAAG;AAC9D,UAAM,OAAO,mBAAmB,GAAG,iBAAiB,GAAG,GAAG;AAC1D,WAAO,EAAE,SAAS;AAAA;AAGpB,qCAAmC,KAAkC;AACnE,UAAM,yBAAyB,kBAC7B,2BAA2B,4BAC3B,kCAAkC;AAEpC,UAAM,kBAAkB,kBACtB,2BAA2B,sBAC3B,kCAAkC;AAEpC,WAAO,EAAE,wBAAwB;AAAA;AAGnC,0CAA+D;AAC7D,WAAO;AAAA,MACL,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,QAAQ,2BAA2B,cAAc,GAAG;AAAA,MACpD,iBAAiB,0BAA0B;AAAA,MAC3C,iBAAiB,0BAA0B;AAAA,MAC3C,iBAAiB,0BAA0B;AAAA,MAC3C,iBAAiB,0BAA0B;AAAA,MAC3C,iBAAiB,0BAA0B;AAAA,MAC3C,iBAAiB,0BAA0B;AAAA;AAAA;AAI/C,SAAO;AAAA,IACL;AAAA,IACA;AAAA;AAAA;AAIG,qCACL,WACsD;AACtD,QAAM,gBAAgC;AACtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,mBAAkB,WAAW;AACjC,QAAM,YAAY,UAAU;AAC5B,gBAAc,KAAK,EAAE,cAAc,oBAAoB,WAAW;AAClE,MAAI,CAAC,WAAW,YAAY;AAC1B,UAAM,IAAI,MAAM,yEAAyE;AAAA;AAG3F,QAAM,SAAS;AAAA,IACb,aAAa;AAAA,IACb,kBAAkB;AAAA,IAClB,cAAc;AAAA,MACZ;AAAA;AAAA;AAIJ,6BAA2B,WAAW;AACtC,SAAO,EAAE,QAAQ;AAAA;;;ACxHZ,4BAA4B,GAAgB,QAA6B,SAA2B;AACzG,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI,MAAM,AAAG,wBAAO,GAAG,OAAO,SAAS,SAAS;AAChD,UAAM,AAAG,qBAAI,KAAK,OAAO;AACzB,WAAO,AAAG,6BAAY,KAAK,GAAG;AAAA;AAAA;;;ACHlC,IAAM,UAAU;AAEhB,4BAA4B,GAAgB,QAAyC,SAA2B;AAC9G,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI,MAAM,AAAG,iCAAgB,GAAG,OAAO,SAAS,SAAS;AACzD,UAAM,AAAG,2BACP,KACA,OAAO,iBACP,OAAO,qBACP,OAAO,mBACP,OAAO,kBACP;AAEF,WAAO,AAAG,6BAAY,KAAK,GAAG;AAAA;AAAA;AAIlC,+BAA+B,UAAoC;AACjE,SAAO,CAAC,GAAG,GAAG,GAAG,IAAI,KAAK,CAAC,QAAQ,QAAQ,YAAY,CAAC,GAAG,KAAK,CAAC,GAAG;AAAA;AAG/D,qBAAqB,GAAgB,QAA4B;AACtE,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI;AACJ,QAAI,MAAM,mBAAmB,GAAG,OAAO,QAAQ,CAAC,GAAG;AAEnD,UAAM,iBAAiB;AAAA,MACrB,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA;AAGT,mBAAe,QAAQ,CAAC,OAAO,MAAM;AACnC,YAAM,WAAW,IAAI;AACrB,YAAM,uBAAuB,sBAAsB;AACnD,YAAM,mBAAmB,KAAK,MAAM,gBAAgB;AACpD,YAAM,mBAAmB,KAAK,MAAM,gBAAgB,CAAC,GAAG;AACxD,UAAI,aAAa;AAAI,iBAAS;AAAA;AAGhC,QAAI,WAAW,MAAM;AACnB,YAAM,IAAI,MAAM;AAAA;AAGlB,WAAO;AAAA,MACL;AAAA,MACA;AAAA;AAAA;AAAA;;;AC3DN,aAAa,OAAoB,GAAW,GAAW;AACrD,QAAM,YAAY,MAAM;AACxB,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAQ,KAAK,IAAI,UAAU,GAAG,IAAI,UAAU,GAAG;AACrD,QAAM,QAAS,SAAQ,SAAU,SAAQ;AACzC,QAAM,QAAS,SAAQ,SAAU,SAAQ;AACzC,MAAI,SAAS,KAAK,SAAS;AAAG,WAAO;AACrC,QAAM,mBAAmB,KAAK,IAAI,OAAO;AACzC,QAAM,mBAAmB,KAAK,IAAI,OAAO;AACzC,QAAM,mBAAmB,KAAK,IAAI,OAAO;AACzC,QAAM,mBAAmB,KAAK,IAAI,OAAO;AACzC,QAAM,mBAAmB,KAAK,IAAI,mBAAmB,kBAAkB,KAAO,KAAK,IAAI,mBAAmB,kBAAkB;AAC5H,SAAO,mBAAoB,SAAQ,QAAQ;AAAA;AAGtC,4BACL,OACA,QACA,eACA,cACA,gBACU;AACV,QAAM,WAAW,MAAM,MAAM;AAC7B,QAAM,aAAa,KAAK,IAAI,eAAe;AAE3C,QAAM,aAAa,OAChB,IAAI,CAAC,OAAO,aAAc,GAAE,OAAO,aACnC,OAAO,CAAC,MAAM,EAAE,QAAQ,gBACxB,KAAK,CAAC,IAAI,OAAO,GAAG,QAAQ,GAAG;AAElC,QAAM,eAAe,CAAC,MAAe,KAAK,eAAe,IAAI;AAC7D,QAAM,WAAqB;AAE3B,aAAW,QAAQ,CAAC,MAAM;AACxB,QAAI,SAAS,UAAU;AAAY;AACnC,UAAM,gBAAgB,EAAE;AACxB,aAAS,IAAI,SAAS,SAAS,GAAG,KAAK,GAAG,EAAE,GAAG;AAC7C,YAAM,OAAM,IAAI,OAAO,EAAE,UAAU,SAAS;AAC5C,UAAI,SAAQ;AAAK;AACjB,QAAE,SAAS,aAAa;AACxB,UAAI,EAAE,SAAS;AAAgB;AAAA;AAEjC,QAAI,kBAAkB,EAAE,OAAO;AAC7B,eAAS,KAAK,EAAE;AAAA;AAAA;AAGpB,SAAO;AAAA;;;AClDT,2CAA2C,GAAgB;AACzD,QAAM,MAAM,AAAG,yBAAQ,AAAG,2BAAU,GAAG,CAAC,GAAG;AAE3C,QAAM,QAAQ;AAAA,IACZ,AAAG,qBAAI,IAAI,IAAI,IAAI;AAAA,IACnB,AAAG,qBAAI,IAAI,IAAI,IAAI;AAAA;AAErB,QAAM,UAAU;AAAA,IACd,AAAG,qBAAI,IAAI,IAAI,AAAG,qBAAI,MAAM,IAAI;AAAA,IAChC,AAAG,qBAAI,IAAI,IAAI,AAAG,qBAAI,MAAM,IAAI;AAAA;AAElC,SAAO,EAAE,OAAO;AAAA;AAGlB,0BAA0B,IAAiB,IAAiB;AAC1D,QAAM,EAAE,OAAO,YAAY,kCAAkC;AAE7D,QAAM,MAAM,AAAG,yBAAQ,AAAG,2BAAU,IAAI,CAAC,GAAG;AAC5C,QAAM,WAAW,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,IAAI,IAAI,KAAK,MAAM,KAAK;AACrE,QAAM,WAAW,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,IAAI,IAAI,KAAK,MAAM,KAAK,QAAQ;AACtE,QAAM,WAAW,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,IAAI,IAAI,KAAK,MAAM,KAAK;AACrE,QAAM,WAAW,AAAG,qBAAI,AAAG,qBAAI,AAAG,qBAAI,IAAI,IAAI,KAAK,MAAM,KAAK,QAAQ;AAEtE,SAAO,AAAG,2BACR,AAAG,uBAAM;AAAA,IACP,AAAG,qBAAI,UAAU;AAAA,IACjB,AAAG,qBAAI,UAAU;AAAA,IACjB,AAAG,qBAAI,UAAU;AAAA,IACjB,AAAG,qBAAI,UAAU;AAAA,MAEnB,CAAC,GAAG;AAAA;AAID,qBAAqB,gBAA6B,kBAA+B,QAA2B;AACjH,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,YAAY,eAAe,MAAM;AAEvC,QAAI,QAAQ,iBACV,AAAG,yBAAQ,AAAG,sBAAK,OAAO,WAAW,CAAC,WAAW,GAAG,KAAK,CAAC,IAAI,KAC9D,AAAG,yBAAQ,gBAAgB,CAAC,IAAI;AAElC,YAAQ,AAAG,yBAAQ,OAAO,CAAC,WAAY,MAAM,MAAM,KAAK,WAAY;AAEpE,UAAM,mBAAmB,AAAG,yBAAQ,AAAG,uBAAM,kBAAkB,CAAC,GAAG,GAAG,IAAI,CAAC,IAAI,IAAI;AACnF,QAAI,SAAS,AAAG,uBAAM,kBAAkB,CAAC,GAAG,GAAG,IAAI,CAAC,IAAI,IAAI;AAE5D,aAAS,AAAG,yBAAQ,QAAQ,CAAC,WAAW,OAAO,MAAM;AAErD,UAAM,eAAe,AAAG,yBAAQ;AAChC,UAAM,gBAAgB,AAAG,yBAAQ;AAEjC,WAAO,EAAE,OAAO,cAAc,QAAQ;AAAA;AAAA;;;ACnDnC,4BACL,GACA,QACA;AACA,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,YAAY,EAAE,MAAM;AAC1B,UAAM,wBAAwB,AAAG,yBAC/B,UAAU,GAAG,OAAO,yBACpB,CAAC,WAAW,IAAI,GAAG;AAErB,UAAM,kBAAkB,AAAG,yBACzB,UAAU,GAAG,OAAO,kBACpB,CAAC,WAAW,IAAI;AAElB,WAAO,EAAE,uBAAuB;AAAA;AAAA;;;ACb7B,yBACL,GACA,QACA,QACA;AACA,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,QAAQ,mBAAmB,GAAG,OAAO,QAAQ,CAAC,GAAG;AACvD,UAAM,QAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,SAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,QAAQ,mBAAmB,QAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,QAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,QAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,QAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAC3D,UAAM,QAAQ,mBAAmB,OAAO,OAAO,QAAQ,CAAC,GAAG;AAE3D,UAAM,iBAAiB,mBAAmB,QAAQ,OAAO;AACzD,UAAM,iBAAiB,mBAAmB,GAAG,OAAO;AACpD,UAAM,iBAAiB,mBAAmB,OAAO,OAAO;AACxD,UAAM,iBAAiB,mBAAmB,OAAO,OAAO;AACxD,UAAM,iBAAiB,mBAAmB,OAAO,OAAO;AACxD,UAAM,iBAAiB,mBAAmB,OAAO,OAAO;AAExD,UAAM,iBAAiB,AAAG,wBAAO;AAAA,MAC/B,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,OACd;AAEH,UAAM,mBAAmB,AAAG,wBAAO;AAAA,MACjC,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,MACf,eAAe;AAAA,OACd;AAEH,WAAO;AAAA,MACL;AAAA,MACA;AAAA;AAAA;AAAA;;;AC3CC,kCAA4B;AAAA,EAOjC,YAAY,EAAE,eAAe,eAAuC,IAAI;AAN9D,iBAAgB;AAOxB,SAAK,iBAAiB,iBAAiB;AACvC,SAAK,cAAc,cAAc;AAEjC,QAAI,OAAO,KAAK,mBAAmB,YAAY,KAAK,kBAAkB,KAAK,KAAK,kBAAkB,GAAG;AACnG,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAG1B,QAAI,OAAO,KAAK,gBAAgB,UAAU;AACxC,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAAA;AAAA,MAIxB,gBAAwB;AAAE,WAAO,KAAK;AAAA;AAAA,MAEtC,aAAqB;AAAE,WAAO,KAAK;AAAA;AAAA;;;ACZlC,mCAA6B,cAAyB;AAAA,EAC3D,cAAc;AACZ,UAAM;AAAA;AAAA,EAGD,aAAa,OAAiB;AACnC,UAAM,EAAE,WAAW;AACnB,QAAI,CAAC;AAAQ,YAAM,IAAI,MAAM;AAC7B,WAAO,AAAG,sBAAK,MAAM;AACnB,YAAM,cAAc,AAAG,sBAAK,MAAM,cAAc,KAAK,QAAQ;AAC7D,YAAM,IAAI,AAAG,qBAAI,AAAG,qBAAI,aAAa,QAAQ;AAC7C,YAAM,WAAW,YAAY,GAAG,OAAO;AACvC,YAAM,EAAE,gBAAgB,qBAAqB,gBAAgB,SAAS,KAAK,SAAS,QAAQ,OAAO;AACnG,aAAO,YAAY,gBAAgB,kBAAkB,OAAO;AAAA;AAAA;AAAA,QAInD,QAAQ,OAAkB;AACrC,WAAO,KAAK,aAAa,MAAM,WAAW;AAAA;AAAA,QAG/B,YAAY,OAAkB,UAAkC,IAA8B;AACzG,UAAM,EAAE,YAAY,kBAAkB,IAAI,sBAAsB;AAChE,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,EAAE,OAAO,QAAQ,QAAQ,YAAY,KAAK,aAAa;AAC7D,UAAM,QAAQ,OAAO;AACrB,UAAM,SAAS,QAAQ;AACvB,aAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACtC,aAAO,GAAG;AACV,cAAQ,GAAG;AAAA;AAEb,UAAM,aAAa,MAAM,KAAK,OAAO;AACrC,UAAM,eAAe;AACrB,UAAM,UAAU,mBAAkB,OAAO,YAAwB,YAAY,cAAc;AAC3F,UAAM,eAAe,SAAS,2BAA2B;AACzD,UAAM,YAAY,SAAS;AAC3B,UAAM,OAAO,YAAY,aAAa;AACtC,UAAM,OAAO,YAAY,aAAa;AACtC,UAAM,YAAY,MAAM;AACxB,UAAM,UAAU,QACb,IAAI,CAAC,QAAQ;AACZ,YAAM,CAAC,KAAK,UAAU;AAAA,QACpB,KAAK,IAAI,GAAG,UAAU,KAAK;AAAA,QAC3B,KAAK,IAAI,GAAK,UAAU,KAAK;AAAA,QAC7B,IAAI,CAAC,QAAQ,MAAM;AACrB,YAAM,CAAC,MAAM,SAAS;AAAA,QACpB,KAAK,IAAI,GAAG,UAAU,KAAK;AAAA,QAC3B,KAAK,IAAI,GAAK,UAAU,KAAK;AAAA,QAC7B,IAAI,CAAC,QAAQ,MAAM;AACrB,aAAO,IAAI,cACT,WAAW,MACX,IAAI,KAAK,MAAM,KAAK,QAAQ,MAAM,SAAS,MAC3C,EAAE,QAAQ,SAAS,eAAe,IAAI,OAAO,SAAS,cAAc;AAAA;AAG1E,UAAM;AACN,WAAO;AACP,WAAO;AAAA;AAAA,EAGC,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,4BAA2B;AAAA;AAAA,EAG1B,cAAc,SAAuB;AAC7C,WAAO,eAAc;AAAA;AAAA;;;AC/ElB,8BAA8B,SAAuB;AAC1D,QAAM,MAAM,IAAI;AAChB,MAAI,eAAe;AACnB,SAAO;AAAA;AAGF,gCAAgC,SAAuB;AAC5D,SAAO,qBAAqB;AAAA;AAIvB,qCAA+B,eAAe;AAAA;;;ACd9C,IAAM,gBAAgB;AAEtB,IAAM,cAAc;AAAA,EACzB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,SAAS;AAAA,EACnB,IAAI,MAAM,SAAS;AAAA,EACnB,IAAI,MAAM,QAAQ;AAAA,EAClB,IAAI,MAAM,SAAS;AAAA;AAGd,IAAM,wBAAwB;AAAA,EACnC,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA;AAGf,IAAM,qBAA+C,CAAC,SAAS,SAAS;AAExE,IAAM,qBAAqB;AAC3B,IAAM,oCAAoC;;;ACVjD,IAAM,WAAW,CAAC,QAAa,OAAO,QAAQ;AAEvC,wBAAwB,QAAa;AAC1C,MAAI,CAAC,QAAQ;AACX,UAAM,IAAI,MAAM,mBAAmB;AAAA;AAGrC,MAAI,OAAO,OAAO,uBAAuB,WAAW;AAClD,UAAM,IAAI,MAAM,wDAAwD,OAAO;AAAA;AAGjF,MAAI,CAAC,SAAS,OAAO,iBAAiB,OAAO,eAAe,KAAK,OAAO,eAAe,GAAK;AAC1F,UAAM,IAAI,MAAM,gEAAgE,OAAO;AAAA;AAGzF,MACE,CAAC,MAAM,QAAQ,OAAO,YACnB,CAAC,OAAO,QAAQ,UAChB,CAAC,OAAO,QAAQ,MAAM,CAAC,MAAW,OAAO,MAAM,WAClD;AACA,UAAM,IAAI,MAAM,kEAAkE,KAAK,UAAU,OAAO;AAAA;AAG1G,MACE,CAAC,MAAM,QAAQ,OAAO,YACnB,CAAC,OAAO,QAAQ,UAChB,CAAC,OAAO,QAAQ,IAAI,CAAC,MAAW,KAAK,IAAI,MAAM,CAAC,MAAW,SAAS,EAAE,MAAM,SAAS,EAAE,KAC1F;AACA,UAAM,IAAI,MAAM,wEAAwE,KAAK,UAAU,OAAO;AAAA;AAGhH,MAAI,OAAO,WACT,EAAC,MAAM,QAAQ,OAAO,YACnB,OAAO,QAAQ,WAAW,KAC1B,CAAC,OAAO,QAAQ,MAAM,YACxB;AACD,UAAM,IAAI,MAAM,8EAA8E,KAAK,UAAU,OAAO;AAAA;AAAA;;;AC/CjH,eAAe,GAA6B;AACjD,SAAO,AAAG,sBAAK,MAAM;AACnB,UAAM,MAAM,AAAG,qBAAI,GAAG,AAAG,wBAAO;AAChC,WAAO,AAAG,qBAAI,AAAG,sBAAK,AAAG,qBAAI,GAAG,OAAO;AAAA;AAAA;;;ACApC,2BAA2B,GAAgB,QAAwC;AACxF,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI,MAAM,AAAG,qBAAI,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI,CAAC,GAAG;AACjD,UAAM,AAAG,wBAAO,KAAK,OAAO,KAAK,SAAS,CAAC,GAAG,IAAI;AAClD,UAAM,AAAG,qBAAI,KAAK,OAAO,GAAG;AAC5B,UAAM,AAAG,qBAAI,KAAK,OAAO,GAAG;AAC5B,UAAM,AAAG,qBAAI,KAAK,OAAO,KAAK;AAC9B,WAAO,MAAM;AAAA;AAAA;;;ACPV,iCAAgC,GAAgB,QAA0C;AAC/F,SAAO,AAAG,sBAAK,MAAM;AACnB,QAAI,MAAM,AAAG,qBAAI,GAAG,CAAC,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI,CAAC,GAAG;AACjD,UAAM,AAAG,iCAAgB,KAAK,OAAO,kBAAkB,OAAO,kBAAkB,CAAC,GAAG,IAAI;AACxF,UAAM,AAAG,qBAAI,KAAK,OAAO;AACzB,WAAO,MAAM;AAAA;AAAA;;;ACDjB,4BAA2B,gBAAwC,eAA+B;AAChG,QAAM,oBAAoB,yBAAyB,gBAAgB;AAEnE,kCAAgC,MAAc,cAAiC;AAC7E,UAAM,OAAM,AAAG,0BAAS,eAAe;AACvC,UAAM,UAAU,AAAG,0BAAS,eAAe;AAE3C,kBAAc,KACZ,EAAE,WAAW,GAAG,sBAChB,EAAE,WAAW,GAAG;AAElB,WAAO,EAAE,WAAK;AAAA;AAGhB,0CAAwC,YAAoB,aAAqB,cAAyC;AACxH,UAAM,QAAO,kBAAkB,YAAY,aAAa,GAAG,GAAG;AAC9D,UAAM,KAAK,uBAAuB,aAAa,GAAG;AAClD,WAAO,EAAE,aAAM;AAAA;AAEjB,QAAM,6BAA6B,kCAAkC,gBAAgB;AAErF,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA;AAAA;AAIG,wBACL,SACA,QACA,iBACA,aACgE;AAChE,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,MACE,sBAAsB;AAE1B,QAAM,gBAAgC;AACtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,MACE,mBAAkB,gBAAgB;AACtC,MAAI;AAEJ,MAAI,OAAO,oBAAoB;AAC7B,UAAM,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,MAAM;AAC7C,UAAM,QAAQ,OAAO,qBACjB,kBAAkB,IAAI,IAAI,GAAG,WAC7B,2BAA2B,IAAI,IAAI;AACvC,UAAM,QAAQ,2BAA2B,IAAI,IAAI;AACjD,UAAM,SAAQ,2BAA2B,IAAI,IAAI;AACjD,UAAM,QAAQ,2BAA2B,IAAI,IAAI;AACjD,UAAM,QAAQ,2BAA2B,IAAI,IAAI;AACjD,UAAM,QAAQ,2BAA2B,IAAI,IAAI;AACjD,UAAM,QAAQ,KAAK,2BAA2B,IAAI,IAAI,WAAW;AACjE,UAAM,QAAQ,KAAK,2BAA2B,IAAI,IAAI,WAAW;AACjE,UAAM,QAAQ,kBAAkB,MAAM,MAAM,IAAI,IAAI,iBAAiB,GAAG;AACxE,aAAS;AAAA,MACP;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA;AAAA,SAErD;AACL,UAAM,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,MAAM;AAC7C,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,SAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,+BAA+B,IAAI,IAAI;AACrD,UAAM,QAAQ,kBAAkB,IAAI,IAAI,iBAAiB,GAAG;AAC5D,aAAS;AAAA,MACP;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA,MAAO;AAAA;AAAA;AAG5D,MAAI,sBAAsB,WAAW,GAAG;AACtC,UAAM,IAAI,MAAM,kCAAkC,sBAAsB;AAAA;AAE1E,SAAO,EAAE,QAAQ;AAAA;;;AChFnB,4BAA2B,WAAgB,eAA+B;AACxE,QAAM,qBAAqB,0BAA0B,WAAW;AAEhE,kCAAgC,QAA2B;AACzD,UAAM,OAAM,mBAAmB,GAAG,cAAc;AAChD,UAAM,UAAU,mBAAmB,GAAG,kBAAkB;AACxD,WAAO,EAAE,WAAK;AAAA;AAGhB,6BAA2B,QAA4B;AACrD,UAAM,UAAU,mBAAmB,GAAG,kBAAkB;AACxD,UAAM,OAAO,mBAAmB,GAAG,eAAe;AAClD,WAAO,EAAE,SAAS;AAAA;AAGpB,0CAAwC,QAAmC;AACzE,UAAM,QAAO,kBAAkB,GAAG;AAClC,UAAM,KAAK,uBAAuB,GAAG;AACrC,WAAO,EAAE,aAAM;AAAA;AAGjB,QAAM,6BAA6B,+BAA+B;AAClE,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA;AAAA;AAIG,qCACL,WACA,QACgE;AAChE,QAAM,gBAAgC;AAEtC,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,MACE,mBAAkB,WAAW;AAEjC,MAAI;AAEJ,MAAI,OAAO,oBAAoB;AAE7B,UAAM,aAAc,OAAO,eAAe,OAAO,YAAY,UAAU;AACvE,aAAS;AAAA,MACP,OAAO,OAAO,qBAAqB,kBAAkB,WAAW,2BAA2B;AAAA,MAC3F,OAAO,2BAA2B;AAAA,MAClC,OAAO,2BAA2B;AAAA,MAClC,OAAO,2BAA2B;AAAA,MAClC,OAAO,2BAA2B;AAAA,MAClC,OAAO,2BAA2B;AAAA,MAClC,OAAO,aAAa,IAAI,2BAA2B,WAAW;AAAA,MAC9D,OAAO,aAAa,IAAI,2BAA2B,WAAW;AAAA,MAC9D,OAAO,kBAAkB;AAAA;AAAA,SAEtB;AACL,aAAS;AAAA,MACP,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,+BAA+B;AAAA,MACtC,OAAO,kBAAkB;AAAA;AAAA;AAI7B,6BAA2B,WAAW;AACtC,SAAO,EAAE,QAAQ;AAAA;;;AC7EZ,8BAAwB;AAAA,EAO7B,YAAY,EAAE,WAAW,mBAAuC,IAAI;AAN1D,iBAAgB;AAOxB,SAAK,aAAa,aAAa;AAC/B,SAAK,kBAAkB,kBAAkB;AAEzC,QAAI,OAAO,KAAK,eAAe,YAAY,KAAK,aAAa,OAAO,GAAG;AACrE,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAG1B,QAAI,OAAO,KAAK,oBAAoB,YAAY,KAAK,mBAAmB,KAAK,KAAK,mBAAmB,GAAG;AACtG,YAAM,IAAI,MAAM,GAAG,KAAK;AAAA;AAAA;AAAA,MAIxB,YAAoB;AAAE,WAAO,KAAK;AAAA;AAAA,MAElC,iBAAyB;AAAE,WAAO,KAAK;AAAA;AAAA;;;ACJtC,oCAA6B,cAAmC;AAAA,EAKrE,YAAY,QAA0B;AACpC,UAAM;AACN,mBAAe;AACf,SAAK,UAAU;AAAA;AAAA,MAGN,SAA2B;AACpC,WAAO,KAAK;AAAA;AAAA,MAGH,kBAA2B;AACpC,WAAO,KAAK,OAAO,mBAAmB,KAAK,OAAO,QAAQ,SAAS;AAAA;AAAA,MAG1D,kBAA0B;AACnC,WAAO,IAAK,MAAK,kBAAkB,KAAK,OAAO,QAAQ,SAAS;AAAA;AAAA,EAG3D,cAAc,GAAgB,QAAiD;AACpF,QAAI,MAAM,kBAAkB,GAAG,OAAO;AACtC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,kBAAkB,KAAK,OAAO;AACpC,UAAM,kBAAkB,KAAK,OAAO;AACpC,WAAO,UAAU,KAAK,OAAO,OAAO,SAAS;AAAA;AAAA,EAGxC,aAAa,GAAgB,QAAsC;AACxE,QAAI,MAAM,KAAK,OAAO,qBAClB,MAAM,UAAU,GAAG,OAAO,OAAqB,SAAS,UACxD,wBAAuB,GAAG,OAAO;AACrC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,wBAAuB,KAAK,OAAO;AACzC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,wBAAuB,KAAK,OAAO;AACzC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,wBAAuB,KAAK,OAAO;AACzC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,wBAAuB,KAAK,OAAO;AACzC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,wBAAuB,KAAK,OAAO;AACzC,UAAM,AAAG,yBAAQ,KAAK,CAAC,GAAG,IAAI,CAAC,GAAG,IAAI;AACtC,UAAM,OAAO,QAAQ,wBAAuB,KAAK,OAAO,SAAS;AACjE,UAAM,OAAO,QAAQ,wBAAuB,KAAK,OAAO,SAAS;AACjE,WAAO,UAAU,KAAK,OAAO,OAAO,SAAS;AAAA;AAAA,EAGxC,aAAa,OAAiB,WAAgC;AACnE,UAAM,EAAE,WAAW;AAEnB,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI,MAAM;AAAA;AAGlB,WAAO,AAAG,sBAAK,MAAM;AACnB,UAAI,cAAc,AAAG,sBAAK,MAAM,cAAc,WAAW,QAAQ;AACjE,oBAAc,KAAK,OAAO,UACtB,UAAU,aAAa,KAAK,OAAO,WACnC;AACJ,oBAAc,YAAY,IAAI;AAC9B,aAAO,KAAK,OAAO,qBACf,KAAK,aAAa,aAAa,UAC/B,KAAK,cAAc,aAAa;AAAA;AAAA;AAAA,QAI3B,QAAQ,OAAkB,WAAyC;AAC9E,WAAO,KAAK,aAAa,MAAM,WAAW,QAAQ;AAAA;AAAA,QAGvC,OAAO,OAAkB,gBAAoC,IAAgC;AACxG,UAAM,EAAE,WAAW,mBAAmB,IAAI,kBAAkB;AAC5D,UAAM,WAAW,MAAM,WAAW;AAClC,UAAM,MAAM,MAAM,KAAK,aAAa,UAAU;AAC9C,UAAM,OAAO,AAAG,sBAAK,MAAM,AAAG,yBAAQ,KAAK,GAAG;AAC9C,UAAM,kBAAkB;AAAA,MACtB,OAAO,SAAS,cAAc;AAAA,MAC9B,QAAQ,SAAS,eAAe;AAAA;AAGlC,UAAM,UAAU,MAAM,KAAK,aAAa,MAAM,SAAS,2BAA2B,IAAI;AACtF,QAAI;AACJ,SAAK;AAEL,UAAM,QAAQ,QAAQ,IAAI,CAAC,QAAQ,IAAI;AACvC,UAAM,SAAS,QAAQ,IAAI,CAAC,QAAQ,IAAI;AACxC,UAAM,cAAc,QAAQ,IAAI,CAAC,QAAQ,IAAI;AAC7C,UAAM,aAAa,QAAQ,IAAI,CAAC,QAAQ,KAAK,OAAO,QAAQ,IAAI;AAEhE,UAAM,UAAU,kBACd,MAAM,IAAI,CAAC,QAAQ,IAAI,QAAQ,aAC/B,QACA,KAAK,OAAO,cACZ;AAGF,UAAM,aAAa,QAAQ,IAAI,CAAC,QAAQ,IAAI,gBAC1C,OAAO,MACP,YAAY,MACZ,WAAW,MACX,MAAM,MACN;AAEF,WAAO;AAAA;AAAA,EAGC,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8B;AACjE,WAAO,4BAA2B,WAAW,KAAK;AAAA;AAAA,EAG1C,cAAc,SAAuB;AAC7C,UAAM,cAAc,KAAK,OAAO,eAAe,gBAAe;AAE9D,UAAM,aAAa,cAAc,YAAY,SAAS;AACtD,QAAI,eAAe,KAAK,eAAe,KAAK,eAAe,GAAG;AAC5D,YAAM,IAAI,MAAM,oEAAoE;AAAA;AAEtF,WAAO,eAAc,SAAS,KAAK,QAAQ,KAAK,iBAAiB;AAAA;AAAA,QAGnD,aACd,cACA,qBACA,gBACA;AACA,UAAM,EAAE,OAAO,WAAW;AAC1B,UAAM,YAAY,KAAK,IAAI,OAAO;AAClC,UAAM,oBAAoB,YAAY;AACtC,UAAM,oBAAoB,YAAY;AAEtC,UAAM,WAAW,aAAa,MAAM;AACpC,UAAM,WAAW,KAAK,OAAO,QAAQ;AAErC,UAAM,CAAC,aAAa,cAAc,qBAAqB,AAAG,sBAAK,MAAM;AACnE,YAAM,WAAW,aAAa,QAAQ,CAAC,UAAU,UAAU,UAAU,KAAK;AAE1E,YAAM,QAAQ,SAAS,MAAM,CAAC,GAAG,GAAG,GAAG,IAAI,CAAC,UAAU,UAAU,UAAU;AAC1E,YAAM,SAAS,SAAS,MAAM,CAAC,GAAG,GAAG,GAAG,IAAI,CAAC,UAAU,UAAU,UAAU;AAC3E,YAAM,cAAc,KAAK,kBACrB,AAAG,yBAAQ,SAAS,MAAM,CAAC,GAAG,GAAG,GAAG,IAAI,CAAC,UAAU,UAAU,UAAU,KAAK,OAAO,QAAQ,UAAU,KACrG,AAAG,wBAAO;AACd,aAAO,CAAC,OAAO,QAAQ;AAAA;AAGzB,UAAM,UAAU;AAChB,UAAM,aAAa,MAAM,aAAa;AACtC,UAAM,YAAY,MAAM,YAAY;AACpC,aAAS,MAAM,GAAG,MAAM,UAAU,OAAO;AACvC,eAAS,MAAM,GAAG,MAAM,UAAU,OAAO;AACvC,iBAAS,SAAS,GAAG,SAAS,UAAU,UAAU;AAChD,gBAAM,QAAQ,QAAQ,WAAW,KAAK,KAAK,QAAQ;AACnD,cAAI,CAAC,kBAAkB,QAAQ,gBAAgB;AAC7C,kBAAM,MAAQ,OAAM,QAAQ,UAAU,KAAK,KAAK,QAAQ,OAAO,WAAY;AAC3E,kBAAM,MAAQ,OAAM,QAAQ,UAAU,KAAK,KAAK,QAAQ,OAAO,WAAY;AAC3E,kBAAM,aAAe,KAAK,IAAI,UAAU,KAAK,KAAK,QAAQ,MAAM,KAAK,OAAO,QAAQ,QAAQ,IAAK,WAAY;AAC7G,kBAAM,cAAgB,KAAK,IAAI,UAAU,KAAK,KAAK,QAAQ,MAAM,KAAK,OAAO,QAAQ,QAAQ,IAAK,WAAY;AAC9G,kBAAM,IAAK,MAAO,aAAa;AAC/B,kBAAM,IAAK,MAAO,cAAc;AAChC,kBAAM,MAAM,EAAE,KAAK,KAAK;AACxB,kBAAM,EAAE,YAAY,UAAU,KAAK,kBAC/B,MAAM,KAAK,sBAAsB,mBAAkC,OACnE,EAAE,YAAY,GAAG,OAAO;AAC5B,oBAAQ,KAAK;AAAA,cACX,KAAK,IAAI,YAAY,GAAG,GAAG,IAAI,YAAY,IAAI;AAAA,cAC/C;AAAA,cACA,YAAY,QAAQ;AAAA,cACpB;AAAA,iBACG;AAAA;AAAA;AAAA;AAAA;AAAA;AAOb,gBAAY;AACZ,iBAAa;AACb,sBAAkB;AAClB,WAAO;AAAA;AAAA,QAGK,sBAAsB,eAA4B,KAAmD;AACjH,UAAM,EAAE,KAAK,KAAK,WAAW;AAC7B,UAAM,cAAc,MAAM,cAAc;AACxC,WAAO,MAAM,KAAK,OAAO,QAAQ,QAAQ,KAAK,GAC3C,IAAI,CAAC,GAAG,MAAM,YAAY,KAAK,KAAK,QAAQ,IAC5C,IAAI,CAAC,YAAY,UAAW;AAAA,MAC3B;AAAA,MACA;AAAA,QAED,OAAO,CAAC,KAAK,SAAU,IAAI,aAAa,KAAK,aAAa,MAAM;AAAA;AAAA;AA/MhE;AACS,AADT,eACS,uBAAuB,CAAC,GAAG,IAAI,IAAI,IAAI,KAAK,KAAK,KAAK,MAAM;;;ACPrE,+BAAyB,eAAe;AAAA,EAC7C,YAAY,qBAA8B,MAAM;AAC9C,UAAM,SAAS;AAAA,MACb;AAAA,MACA,cAAc;AAAA,MACd,SAAS,CAAC;AAAA,SACN,qBACA;AAAA,QACA,SAAS;AAAA,QACT,SAAS;AAAA,UAET;AAAA,QACA,SAAS;AAAA,QACT,iBAAiB;AAAA;AAAA;AAIvB,UAAM;AAAA;AAAA,MAGG,qBAA8B;AACvC,WAAO,KAAK,OAAO;AAAA;AAAA,MAGV,UAAmB;AAC5B,WAAO,KAAK,OAAO;AAAA;AAAA,QAGR,YAAY,OAAkB,eAA6D;AACtG,UAAM,mBAAmB,MAAM,KAAK,OAAO,OAAO;AAClD,WAAO,iBAAiB,IAAI,CAAC,QAAQ,IAAI,cAAc,IAAI,OAAO,IAAI,aAAa,EAAE,OAAO,IAAI,YAAY,QAAQ,IAAI;AAAA;AAAA,EAGhH,sBAA8B;AACtC,WAAO,KAAK,qBAAqB,oCAAoC;AAAA;AAAA,EAG7D,2BAA2B,WAA8F;AACjI,WAAO,MAAM,2BAA2B;AAAA;AAAA;;;AChDrC,0BAA0B,SAAuB,qBAA8B,MAAM;AAC1F,QAAM,MAAM,IAAI,WAAW;AAC3B,MAAI,eAAe;AACnB,SAAO;AAAA;;;ACNF,4CAAsC,kBAAkB;AAAA,EAAxD,cAJP;AAIO;AACK,iBAAgB;AAAA;AAAA;;;ACLrB,2BAAwB;AAAA,QAEhB,KAAK,aAA2D;AAC3E,WAAO,YAAY,MAAM,KAAK;AAAA;AAAA,QAGnB,MAAkB;AAC7B,UAAM,IAAI,MAAM;AAAA;AAAA;;;ACApB,gDACE,eACA,OAEA,gBACA,gBAEA,sBAAwF,CAAC,EAAE,kBAAkB,aAC7G;AACA,QAAM,YAAY,cAAc,IAAI,CAAC,iBAAkB,oBAAoB,gBACvE,oBAAoB,gBACpB,aAAa;AACjB,QAAM,QAAgD,kBACpD,kBAAoB,0BAChB,MAAM,mBAAmB,OAAO,aAChC,MAAM,aAAa,OAAO;AAEhC,QAAM,UAAU,MAAM,eAAe;AACrC,QAAM,QAAQ,CAAC,MAAM,aAAgB,2BAAU,EAAE;AACjD,SAAO;AAAA;AAGT,iDACE,cACA,OAEA,eACA,gBAEA,qBACA;AACA,SAAO,iCACL,CAAC,eACD,OACA,OAAO,UAAU,cAAc,MAAM,KACrC,gBACA;AAAA;;;ACzCG,IAAM,iBAAgB;AAEtB,IAAM,eAAc;AAAA,EACzB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA,EACpB,IAAI,MAAM,UAAU;AAAA;AAGf,IAAM,WAAqC,CAAC,SAAS,SAAS;;;ACF9D,qCAA+B,eAAe;AAAA,EACnD,cAAc;AACZ,UAAM,SAAS;AAAA,MACb,oBAAoB;AAAA,MACpB,cAAc;AAAA,MACd,SAAS,CAAC;AAAA,MACV,SAAS;AAAA,MACT,SAAS;AAAA,MACT,oBAAoB;AAAA,MACpB,aAAa,CAAC,GAAG,IAAI,IAAI,IAAI,KAAK,KAAK;AAAA;AAGzC,UAAM;AAAA;AAAA,MAGG,UAAmB;AAC5B,WAAO,KAAK,OAAO;AAAA;AAAA,QAGR,YAAY,OAAkB,eAA6D;AACtG,UAAM,mBAAmB,MAAM,KAAK,OAAO,OAAO;AAClD,WAAO,iBAAiB,IAAI,CAAC,QAAQ,IAAI,cAAc,IAAI,OAAO,IAAI,aAAa,EAAE,OAAO,IAAI,YAAY,QAAQ,IAAI;AAAA;AAAA,EAGhH,sBAA8B;AACtC,WAAO;AAAA;AAAA,EAGC,2BAA2B,WAA8F;AACjI,WAAO,MAAM,2BAA2B;AAAA;AAAA;;;ACvBrC,IAAM,OAAO;AAAA,EAClB,gBAAgB,IAAI;AAAA,EACpB,kBAAkB,IAAI;AAAA,EACtB,YAAY,IAAI;AAAA,EAChB,mBAAmB,IAAI;AAAA,EACvB,uBAAuB,IAAI;AAAA,EAC3B,oBAAoB,IAAI;AAAA,EACxB,mBAAmB,IAAI;AAAA,EACvB,cAAc,IAAI;AAAA;AAUb,IAAM,iBAAiB,CAAC,OAAkB,YAA6D,KAAK,eAAe,YAAY,OAAO;AAS9I,IAAM,mBAAmB,CAAC,OAAkB,YAA+D,KAAK,iBAAiB,YAAY,OAAO;AASpJ,IAAM,aAAa,CAAC,OAAkB,YAA0D,KAAK,WAAW,YAAY,OAAO;AASnI,IAAM,sBAAsB,CAAC,UAAmE,KAAK,kBAAkB,gBAAgB;AAWvI,IAAM,0BAA0B,CAAC,UAAmE,KAAK,sBAAsB,gBAAgB;AAY/I,IAAM,wBAAwB,CAAC,UAA6D,KAAK,mBAAmB,sBAAsB;AAS1I,IAAM,2BAA2B,CAAC,UAAmE,KAAK,kBAAkB,mBAAmB;AAS/I,IAAM,sBAAsB,CAAC,UAAiF,KAAK,aAAa,oBAAoB;AAEpJ,IAAM,0BAA0B,CAAC,QAAgB,KAAK,eAAe,KAAK;AAC1E,IAAM,4BAA4B,CAAC,QAAgB,KAAK,iBAAiB,KAAK;AAC9E,IAAM,sBAAsB,CAAC,QAAgB,KAAK,WAAW,KAAK;AAClE,IAAM,wBAAwB,CAAC,QAAgB,KAAK,kBAAkB,KAAK;AAC3E,IAAM,4BAA4B,CAAC,QAAgB,KAAK,sBAAsB,KAAK;AACnF,IAAM,2BAA2B,CAAC,QAAgB,KAAK,mBAAmB,KAAK;AAC/E,IAAM,0BAA0B,CAAC,QAAgB,KAAK,kBAAkB,KAAK;AAC7E,IAAM,qBAAqB,CAAC,QAAgB,KAAK,aAAa,KAAK;AAGnE,IAAM,yBAAyB;AAC/B,IAAM,cAAc;AACpB,IAAM,kBAAkB;;;ACtGxB,mDAAqE,eAAwB;AAAA,EAClG,YAEY,YAEA,OAEA,gBACV;AACA;AANU;AAEA;AAEA;AAAA;AAAA;AAMP,kDAAmF,+BAA0E;AAAA,QACrJ,MAA+C;AAC1D,UAAM,gBAAgB,MAAM,KAAK;AAEjC,UAAM,wBAAwB,MAAM,iCAClC,eACA,KAAK,OACL,OAAO,UAAU,QAAQ,IACvB,MAAM,IAAI,CAAC,SAAS,KAAK,kBAAkB,mBAAmB,SAEhE,KAAK;AAGP,WAAO,cAAc,IACnB,CAAC,cAAc,MAAM,0BAAmC,cAAc,sBAAsB;AAAA;AAAA,EAIhG,mBAAmB;AACjB,WAAO,IAAI,2BAA2B,MAAM,KAAK;AAAA;AAAA;AAI9C,qDAAsF,+BAA8F;AAAA,QAC5K,MAAyD;AACpE,UAAM,eAAe,MAAM,KAAK;AAChC,QAAI,CAAC,cAAc;AACjB,aAAO;AAAA;AAGT,UAAM,kBAAkB,MAAM,kCAC5B,cACA,KAAK,OACL,CAAC,SAAS,KAAK,kBAAkB,mBAAmB,OACpD,KAAK;AAGP,WAAO,0BAA0B,cAAc;AAAA;AAAA,EAGjD,mBAAmB;AACjB,WAAO,IAAI,8BAA8B,MAAM,KAAK;AAAA;AAAA;AAIjD,mEAAuH,8BAAuC;AAAA,EACnK,mBAAmB;AACjB,WAAO,IAAI,4CAA4C,MAAM,KAAK;AAAA;AAAA,EAGpE,sBAAsB;AACpB,WAAO,IAAI,8BAA8B,MAAM,KAAK;AAAA;AAAA;AAIjD,sEAA0H,iCAA0C;AAAA,EACzK,mBAAmB;AACjB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA,EAGvE,qBAAqB;AACnB,WAAO,IAAI,gCAAgC,MAAM,KAAK;AAAA;AAAA;;;ACzEnD,gDAAkE,eAAwB;AAAA,EAC/F,YAEY,YAEA,OAEA,gBACV;AACA;AANU;AAEA;AAEA;AAAA;AAAA;AAMP,+CAAgF,4BAAuE;AAAA,QAC/I,MAA+C;AAC1D,UAAM,gBAAgB,MAAM,KAAK;AACjC,UAAM,qBAAqB,MAAM,iCAC/B,eACA,KAAK,OACL,OAAO,UAAU,QAAQ,IAAI,MAAM,IAAI,CAAC,SAAS,KAAK,aAAa,oBAAoB,SACvF,KAAK;AAEP,WAAO,cAAc,IAAI,CAAC,cAAc,MAAM;AAC5C,YAAM,EAAE,KAAK,QAAQ,sBAAsB,mBAAmB;AAC9D,aAAO,cAAc,iBAAiB,cAAc,QAAQ,oBAAoB;AAAA;AAAA;AAAA,EAIpF,sBAAsB;AACpB,WAAO,IAAI,8BAA8B,MAAM,KAAK;AAAA;AAAA;AAIjD,kDAAmF,4BAA2F;AAAA,QACtK,MAAyD;AACpE,UAAM,eAAe,MAAM,KAAK;AAChC,QAAI,CAAC;AAAc,aAAO;AAC1B,UAAM,EAAE,KAAK,QAAQ,sBAAsB,MAAM,kCAC/C,cACA,KAAK,OACL,CAAC,SAAS,KAAK,aAAa,oBAAoB,OAChD,KAAK;AAEP,WAAO,cAAc,iBAAiB,cAAc,QAAQ,oBAAoB;AAAA;AAAA,EAGlF,sBAAsB;AACpB,WAAO,IAAI,iCAAiC,MAAM,KAAK;AAAA;AAAA;AAIpD,gEAAoH,2BAAoC;AAAA,EAC7J,sBAAsB;AACpB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA,EAGvE,sBAAsB;AACpB,WAAO,IAAI,8BAA8B,MAAM,KAAK;AAAA;AAAA;AAIjD,mEAAuH,8BAAuC;AAAA,EACnK,sBAAsB;AACpB,WAAO,IAAI,kDAAkD,MAAM,KAAK;AAAA;AAAA,EAG1E,qBAAqB;AACnB,WAAO,IAAI,gCAAgC,MAAM,KAAK;AAAA;AAAA;;;ACvEnD,mDAAqE,eAAwB;AAAA,EAClG,YAEY,YAEA,OACV;AACA;AAJU;AAEA;AAAA;AAAA;AAMP,kDAAsG,+BAAyE;AAAA,QACvK,MAA8C;AACzD,UAAM,gBAAgB,MAAM,KAAK;AACjC,UAAM,cAAc,MAAM,iCACxB,eACA,KAAK,OACL,CAAC,UAAU,QAAQ,IAAI,MAAM,IAAI,CAAC,SAAS,KAAK,mBAAmB,sBAAsB,SACzF,MACA,CAAC,iBAAiB,aAAa,UAAU,MAAM,MAAM,EAAE,kBAAkB;AAE3E,WAAO,YAAY,IAAI,CAAC,YAAY,MAAM,yBAAkC,cAAc,IAAI;AAAA;AAAA,EAGhG,sBAAsB;AACpB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA,EAGvE,mBAAmB;AACjB,WAAO,IAAI,4CAA4C,MAAM,KAAK;AAAA;AAAA;AAI/D,oDAAwG,+BAA6F;AAAA,QAC7L,MAAwD;AACnE,UAAM,eAAe,MAAM,KAAK;AAChC,QAAI,CAAC,cAAc;AACjB,aAAO;AAAA;AAET,UAAM,aAAa,MAAM,kCACvB,cACA,KAAK,OACL,CAAC,SAAS,KAAK,mBAAmB,sBAAsB,OACxD,MAEA,CAAC,kBAAiB,cAAa,UAAU,MAAM,MAAM,EAAE,kBAAkB;AAG3E,WAAO,yBAAyB,cAAc;AAAA;AAAA,EAGhD,sBAAsB;AACpB,WAAO,IAAI,kDAAkD,MAAM,KAAK;AAAA;AAAA,EAG1E,mBAAmB;AACjB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA;;;ACpDlE,gDAAkE,eAAwB;AAAA,EAC/F,YAEY,YAEA,OAEA,oBACV;AACA;AANU;AAEA;AAEA;AAAA;AAAA,MAKE,cAAyD;AACrE,WAAO,KAAK,qBACR,KAAK,wBACL,KAAK;AAAA;AAAA;AAIN,+CAAgF,4BAAqE;AAAA,QAC7I,MAA6C;AACxD,UAAM,gBAAgB,MAAM,KAAK;AACjC,UAAM,aAAa,cAAc,IAAI,CAAC,QAAQ,IAAI;AAClD,UAAM,QAAgD,KAAK,iBAAoB,0BAC3E,MAAM,mBAAmB,KAAK,OAAO,cACrC,MAAM,aAAa,KAAK,OAAO;AACnC,UAAM,sBAAsB,MAAM,QAAQ,IACxC,MAAM,IAAI,CAAC,SAAS,KAAK,YAAY,gBAAgB;AAEvD,UAAM,QAAQ,CAAC,MAAM,aAAgB,2BAAU,EAAE;AACjD,WAAO,cAAc,IAAI,CAAC,cAAc,MAAM,wBAAiC,cAAc,oBAAoB;AAAA;AAAA,EAGnH,sBAAsB;AACpB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA,EAGvE,mBAAmB;AACjB,WAAO,IAAI,4CAA4C,MAAM,KAAK;AAAA;AAAA,EAGpE,sBAAsB;AACpB,WAAO,IAAI,8BAA8B,MAAM,KAAK;AAAA;AAAA;AAIjD,kDAAmF,4BAAyF;AAAA,QACpK,MAAuD;AAClE,UAAM,eAAe,MAAM,KAAK;AAChC,QAAI,CAAC,cAAc;AACjB,aAAO;AAAA;AAET,UAAM,EAAE,cAAc;AACtB,UAAM,QAAgD,KAAK,iBAAoB,0BAC3E,MAAM,mBAAmB,KAAK,OAAO,CAAC,cACtC,MAAM,aAAa,KAAK,OAAO,CAAC;AACpC,UAAM,YAAY,MAAM,KAAK,YAAY,gBAAgB,MAAM;AAC/D,UAAM,QAAQ,CAAC,MAAM,aAAgB,2BAAU,EAAE;AACjD,WAAO,wBAAiC,cAAc;AAAA;AAAA,EAGxD,sBAAsB;AACpB,WAAO,IAAI,kDAAkD,MAAM,KAAK;AAAA;AAAA,EAG1E,mBAAmB;AACjB,WAAO,IAAI,+CAA+C,MAAM,KAAK;AAAA;AAAA,EAGvE,qBAAqB;AACnB,WAAO,IAAI,gCAAgC,MAAM,KAAK;AAAA;AAAA;;;ACvEnD,wCAA2C,eAAwB;AAAA,EAExE,YAAsB,OAA4B,UAAgC,IAAI,yBAAyB;AAC7G;AADoB;AAA4B;AAAA;AAAA;AAK7C,uCAAiC,oBAAqC;AAAA,QAC9D,MAAgC;AAC3C,UAAM,EAAE,OAAO,YAAY;AAC3B,QAAI;AACJ,QAAI,mBAAmB;AAAyB,eAAS,KAAK,iBAAiB,YAAY,OAAO;AAAA,aACzF,mBAAmB;AAAuB,eAAS,KAAK,eAAe,YAAY,OAAO;AAAA,aAC1F,mBAAmB;AAAmB,eAAS,KAAK,WAAW,YAAY,OAAO;AAAA;AACtF,YAAM,IAAI,MAAM;AACrB,WAAO;AAAA;AAAA,EAGD,iCAAmE;AACzE,WAAO,IAAI,QAAiC,CAAC,SAAS,WAAW;AAC/D,WAAK,MACF,KAAK,CAAC,eAAe,QAAQ,WAAW,IAAI,CAAC,cAAc,wBAAwB,IAAI,cACvF,MAAM,CAAC,QAAQ,OAAO;AAAA;AAAA;AAAA,EAI7B,kBAAkB,qBAA8B,OAAO;AACrD,WAAO,IAAI,2BACT,KAAK,kCACL,KAAK,OACL;AAAA;AAAA,EAIJ,sBAAsB;AACpB,WAAO,IAAI,8BACT,KAAK,kCACL,KAAK;AAAA;AAAA,EAIT,mBAAmB;AACjB,WAAO,IAAI,2BACT,KAAK,kCACL,KAAK;AAAA;AAAA;AAKJ,yCAAmC,oBAA+C;AAAA,QAC1E,MAA0C;AACrD,UAAM,iBAAiB,MAAM,IAAI,mBAAmB,KAAK,OAAO,KAAK;AACrE,QAAI,gCAAgC,eAAe;AACnD,mBAAe,QAAQ,CAAC,kBAAkB;AACxC,UAAI,cAAc,QAAQ,8BAA8B;AAAO,wCAAgC;AAAA;AAEjG,WAAO;AAAA;AAAA,EAGD,gCAA4E;AAElF,WAAO,IAAI,QAA2C,OAAO,YAAY;AACvE,YAAM,YAAY,MAAM,KAAK;AAC7B,cAAQ,YAAY,wBAA4B,IAAI,aAAa;AAAA;AAAA;AAAA,EAIrE,kBAAkB,qBAA8B,OAAO;AACrD,WAAO,IAAI,8BACT,KAAK,iCACL,KAAK,OACL;AAAA;AAAA,EAIJ,sBAAsB;AACpB,WAAO,IAAI,iCACT,KAAK,iCACL,KAAK;AAAA;AAAA,EAIT,mBAAmB;AACjB,WAAO,IAAI,8BACT,KAAK,iCACL,KAAK;AAAA;AAAA;;;AC9FJ,0BAA0B,OAAkB,UAAgC,IAAI,yBAA+C;AACpI,SAAO,IAAI,qBAAqB,OAAO;AAAA;AAGlC,wBAAwB,OAAkB,UAAgC,IAAI,yBAA6C;AAChI,SAAO,IAAI,mBAAmB,OAAO;AAAA;;;ACJvC,sCAA6C,OAAkB,eAAiG;AAC9J,SAAO,eAAe,OAAO,IAAI,sBAAsB,gBAAgB,EAAE,kBAAkB,KACxF,oBACA;AAAA;AAGL,kCAAyC,OAAkB,gBAAoC,IAA6E;AAC1K,SAAO,eAAe,OAAO,IAAI,kBAAkB,gBAChD,oBACA;AAAA;AAGE,IAAM,WAAW;;;AClBjB,2BAA2B,MAA+B,MAA+B;AAC9F,MAAI,KAAK,WAAW,KAAK;AAAQ,UAAM,IAAI,MAAM;AAEjD,QAAM,QAAQ,MAAM,KAAK;AACzB,QAAM,QAAQ,MAAM,KAAK;AAEzB,SAAO,KAAK,KACV,MACG,IAAI,CAAC,KAAK,MAAM,MAAM,MAAM,IAC5B,OAAO,CAAC,KAAK,SAAS,MAAO,QAAQ,GAAI;AAAA;;;ACJzC,wBAAkB;AAAA,EAKvB,YACE,QACA,oBAA4B,KAC5B;AACA,SAAK,qBAAqB;AAE1B,UAAM,aAAa,MAAM,QAAQ,UAAU,SAAS,CAAC;AAErD,QAAI,CAAC,WAAW,QAAQ;AACtB,YAAM,IAAI,MAAM;AAAA;AAGlB,QAAI,QAAQ;AACZ,UAAM,oBAAoB,MAAM,UAAU;AAE1C,SAAK,sBAAsB,WAAW,IAAI,CAAC,SAAS;AAClD,UAAI,gBAAgB,wBAAwB;AAC1C,eAAO;AAAA;AAGT,UAAI,gBAAgB,cAAc;AAChC,eAAO,IAAI,uBAAuB,qBAAqB,CAAC;AAAA;AAG1D,UAAI,KAAK,cAAc,KAAK,sBAAsB,cAAc;AAC9D,eAAO,IAAI,uBAAuB,qBAAqB,CAAC,KAAK;AAAA;AAG/D,YAAM,IAAI,MAAM;AAAA;AAAA;AAAA,MAIT,qBAA+C;AAAE,WAAO,KAAK;AAAA;AAAA,MAE7D,oBAA4B;AAAE,WAAO,KAAK;AAAA;AAAA,EAE9C,oBAAoB,iBAA+B,aAAqC;AAC7F,WAAO,YACJ,IAAI,CAAC,MAAM,kBAAkB,GAAG,kBAChC,OAAO,CAAC,IAAI,OAAO,KAAK,IAAI,KACxB,aAAY,UAAU;AAAA;AAAA,EAGxB,gBAAgB,iBAA0C;AAC/D,WAAO,KAAK,mBACT,IAAI,CAAC,EAAE,aAAa,YAAY,IAAI,UACnC,OACA,KAAK,oBAAoB,iBAAiB,eAE3C,OAAO,CAAC,MAAM,SAAU,KAAK,WAAW,KAAK,WAAW,OAAO;AAAA;AAAA,EAG7D,cAAc,iBAA0C;AAC7D,UAAM,YAAY,KAAK,gBAAgB;AACvC,WAAO,UAAU,WAAW,KAAK,oBAC7B,YACA,IAAI,UAAU,WAAW,UAAU;AAAA;AAAA,EAGlC,SAAc;AACnB,WAAO;AAAA,MACL,mBAAmB,KAAK;AAAA,MACxB,oBAAoB,KAAK,mBAAmB,IAAI,CAAC,OAAO,GAAG;AAAA;AAAA;AAAA,SAIjD,SAAS,MAAwB;AAC7C,UAAM,qBAAqB,KAAK,mBAC7B,IAAI,CAAC,OAAY,uBAAuB,SAAS;AACpD,WAAO,IAAI,YAAY,oBAAoB,KAAK;AAAA;AAAA;;;AC1E7C,gCAAgC,SAAuB;AAC5D,QAAM,MAAM,IAAI;AAChB,MAAI,eAAe;AACnB,SAAO;AAAA;;;ACFF,uBAA0B,SAAY,YAA4B;AACvE,QAAM,EAAE,OAAO,WAAW,IAAI,WAAW,WAAW,OAAO,WAAW;AAEtE,MAAI,SAAS,KAAK,UAAU,GAAG;AAC7B,UAAM,IAAI,MAAM,uCAAuC,KAAK,UAAU,EAAE,OAAO;AAAA;AAGjF,MAAI,MAAM,QAAQ,UAAU;AAE1B,WAAQ,QAAuB,IAAI,CAAC,QAAQ,cAAc,KAAK,EAAE,OAAO;AAAA;AAG1E,MAAI,oBAAoB,UAAU;AAChC,UAAM,mBAAmB,QAAQ,UAAU,QAAQ,OAAO;AAC1D,UAAM,mBAAmB,QAAQ,mBAAmB,QAAQ,iBAAiB,IAAI,OAAO,iBAAiB,IAAI;AAC7G,WAAO,wBAAwB,wBAAwB,SAAS,mBAAmB;AAAA;AAGrF,MAAI,oBAAoB,UAAU;AAChC,WAAO,wBAAwB,SAAS,QAAQ,UAAU,QAAQ,OAAO;AAAA;AAG3E,MAAI,mBAAmB,iBAAiB,mBAAmB,eAAe;AACxE,WAAQ,QAAgB,QAAQ,OAAO;AAAA;AAGzC,SAAO;AAAA;;;ACRT,IAAM,OAAQ,OAAO,YAAY;AACjC,IAAM,WAAW,OAAO,cAAc,eAAiB,OAAO,UAAU,cAAc;AAC/E,IAAM,WAAU,EAAE,SAAa,SAAmB,MAAM;", "names": [] } diff --git a/dist/face-api.esm.js b/dist/face-api.esm.js index 92073fe..e70bac3 100644 --- a/dist/face-api.esm.js +++ b/dist/face-api.esm.js @@ -13,7 +13,7 @@ var __require = (x) => { }; var __export = (target, all5) => { for (var name in all5) - __defProp(target, name, {get: all5[name], enumerable: true}); + __defProp(target, name, { get: all5[name], enumerable: true }); }; // dist/tfjs.esm.js @@ -495,29 +495,29 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor; var __getOwnPropNames = Object.getOwnPropertyNames; var __getProtoOf = Object.getPrototypeOf; var __hasOwnProp = Object.prototype.hasOwnProperty; -var __markAsModule = (target) => __defProp2(target, "__esModule", {value: true}); +var __markAsModule = (target) => __defProp2(target, "__esModule", { value: true }); var __require2 = (x) => { if (typeof __require !== "undefined") return __require(x); throw new Error('Dynamic require of "' + x + '" is not supported'); }; var __commonJS = (cb, mod4) => function __require22() { - return mod4 || (0, cb[Object.keys(cb)[0]])((mod4 = {exports: {}}).exports, mod4), mod4.exports; + return mod4 || (0, cb[Object.keys(cb)[0]])((mod4 = { exports: {} }).exports, mod4), mod4.exports; }; var __export2 = (target, all5) => { for (var name in all5) - __defProp2(target, name, {get: all5[name], enumerable: true}); + __defProp2(target, name, { get: all5[name], enumerable: true }); }; var __reExport = (target, module2, desc) => { if (module2 && typeof module2 === "object" || typeof module2 === "function") { for (let key of __getOwnPropNames(module2)) if (!__hasOwnProp.call(target, key) && key !== "default") - __defProp2(target, key, {get: () => module2[key], enumerable: !(desc = __getOwnPropDesc(module2, key)) || desc.enumerable}); + __defProp2(target, key, { get: () => module2[key], enumerable: !(desc = __getOwnPropDesc(module2, key)) || desc.enumerable }); } return target; }; var __toModule = (module2) => { - return __reExport(__markAsModule(__defProp2(module2 != null ? __create(__getProtoOf(module2)) : {}, "default", module2 && module2.__esModule && "default" in module2 ? {get: () => module2.default, enumerable: true} : {value: module2, enumerable: true})), module2); + return __reExport(__markAsModule(__defProp2(module2 != null ? __create(__getProtoOf(module2)) : {}, "default", module2 && module2.__esModule && "default" in module2 ? { get: () => module2.default, enumerable: true } : { value: module2, enumerable: true })), module2); }; var require_browser = __commonJS({ "(disabled):node_modules/.pnpm/node-fetch@2.6.1/node_modules/node-fetch/browser.js"() { @@ -1021,7 +1021,7 @@ var require_seedrandom = __commonJS({ var global2 = this, width = 256, chunks = 6, digits = 52, rngname = "random", startdenom = math.pow(width, chunks), significance = math.pow(2, digits), overflow = significance * 2, mask = width - 1, nodecrypto; function seedrandom5(seed, options, callback) { var key = []; - options = options == true ? {entropy: true} : options || {}; + options = options == true ? { entropy: true } : options || {}; var shortseed = mixkey(flatten4(options.entropy ? [seed, tostring(pool3)] : seed == null ? autoseed() : seed, 3), key); var arc4 = new ARC4(key); var prng = function() { @@ -1657,7 +1657,7 @@ var require_seedrandom3 = __commonJS({ var width = 256, chunks = 6, digits = 52, rngname = "random", startdenom = math.pow(width, chunks), significance = math.pow(2, digits), overflow = significance * 2, mask = width - 1, nodecrypto; function seedrandom5(seed, options, callback) { var key = []; - options = options == true ? {entropy: true} : options || {}; + options = options == true ? { entropy: true } : options || {}; var shortseed = mixkey(flatten4(options.entropy ? [seed, tostring(pool3)] : seed == null ? autoseed() : seed, 3), key); var arc4 = new ARC4(key); var prng = function() { @@ -2082,7 +2082,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ return func2; } function ccall(ident, returnType, argTypes, args, opts) { - var toC = {"string": function(str) { + var toC = { "string": function(str) { var ret2 = 0; if (str !== null && str !== void 0 && str !== 0) { var len = (str.length << 2) + 1; @@ -2094,7 +2094,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ var ret2 = stackAlloc(arr.length); writeArrayToMemory(arr, ret2); return ret2; - }}; + } }; function convertReturnValue(ret2) { if (returnType === "string") return UTF8ToString(ret2); @@ -2257,7 +2257,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ if (Module["wasmMemory"]) { wasmMemory = Module["wasmMemory"]; } else { - wasmMemory = new WebAssembly.Memory({"initial": INITIAL_MEMORY / 65536, "maximum": 2147483648 / 65536, "shared": true}); + wasmMemory = new WebAssembly.Memory({ "initial": INITIAL_MEMORY / 65536, "maximum": 2147483648 / 65536, "shared": true }); if (!(wasmMemory.buffer instanceof SharedArrayBuffer)) { err("requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag"); if (ENVIRONMENT_IS_NODE) { @@ -2281,9 +2281,9 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ var runtimeInitialized = false; var runtimeExited = false; if (!ENVIRONMENT_IS_PTHREAD) - __ATINIT__.push({func: function() { + __ATINIT__.push({ func: function() { ___wasm_call_ctors(); - }}); + } }); function preRun() { if (ENVIRONMENT_IS_PTHREAD) return; @@ -2406,7 +2406,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ function getBinaryPromise() { if (!wasmBinary && (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER)) { if (typeof fetch === "function" && !isFileURI(wasmBinaryFile)) { - return fetch(wasmBinaryFile, {credentials: "same-origin"}).then(function(response) { + return fetch(wasmBinaryFile, { credentials: "same-origin" }).then(function(response) { if (!response["ok"]) { throw "failed to load wasm binary file at '" + wasmBinaryFile + "'"; } @@ -2429,7 +2429,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ }); } function createWasm() { - var info = {"a": asmLibraryArg}; + var info = { "a": asmLibraryArg }; function receiveInstance(instance, module22) { var exports3 = instance.exports; Module["asm"] = exports3; @@ -2461,7 +2461,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ } function instantiateAsync() { if (!wasmBinary && typeof WebAssembly.instantiateStreaming === "function" && !isDataURI(wasmBinaryFile) && !isFileURI(wasmBinaryFile) && typeof fetch === "function") { - return fetch(wasmBinaryFile, {credentials: "same-origin"}).then(function(response) { + return fetch(wasmBinaryFile, { credentials: "same-origin" }).then(function(response) { var result = WebAssembly.instantiateStreaming(response, info); return result.then(receiveInstantiatedSource, function(reason) { err("wasm streaming compile failed: " + reason); @@ -2485,13 +2485,13 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ instantiateAsync().catch(readyPromiseReject); return {}; } - var ASM_CONSTS = {9816: function() { + var ASM_CONSTS = { 9816: function() { throw "Canceled!"; }, 9834: function($0, $1) { setTimeout(function() { __emscripten_do_dispatch_to_thread($0, $1); }, 0); - }}; + } }; function initPthreadsJS() { PThread.initRuntime(); } @@ -2556,7 +2556,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ if (!pthread_ptr) throw "Internal Error! Null pthread_ptr in cancelThread!"; var pthread = PThread.pthreads[pthread_ptr]; - pthread.worker.postMessage({"cmd": "cancel"}); + pthread.worker.postMessage({ "cmd": "cancel" }); } function cleanupThread(pthread_ptr) { if (ENVIRONMENT_IS_PTHREAD) @@ -2570,7 +2570,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ PThread.returnWorkerToPool(worker); } } - var PThread = {unusedWorkers: [], runningWorkers: [], initMainThreadBlock: function() { + var PThread = { unusedWorkers: [], runningWorkers: [], initMainThreadBlock: function() { var pthreadPoolSize = Math.min(4, Math.max(1, (navigator.hardwareConcurrency || 1) / 2)); for (var i = 0; i < pthreadPoolSize; ++i) { PThread.allocateUnusedWorker(); @@ -2610,12 +2610,12 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ if (tb) { PThread.runExitHandlersAndDeinitThread(tb, exitCode); if (ENVIRONMENT_IS_PTHREAD) { - postMessage({"cmd": "exit"}); + postMessage({ "cmd": "exit" }); } } }, threadCancel: function() { PThread.runExitHandlersAndDeinitThread(_pthread_self(), -1); - postMessage({"cmd": "cancelDone"}); + postMessage({ "cmd": "cancelDone" }); }, terminateAllThreads: function() { for (var t in PThread.pthreads) { var pthread = PThread.pthreads[t]; @@ -2736,7 +2736,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ }; if (ENVIRONMENT_IS_NODE) { worker.on("message", function(data) { - worker.onmessage({data}); + worker.onmessage({ data }); }); worker.on("error", function(data) { worker.onerror(data); @@ -2744,7 +2744,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ worker.on("exit", function(data) { }); } - worker.postMessage({"cmd": "load", "urlOrBlob": Module["mainScriptUrlOrBlob"] || _scriptDir, "wasmMemory": wasmMemory, "wasmModule": wasmModule}); + worker.postMessage({ "cmd": "load", "urlOrBlob": Module["mainScriptUrlOrBlob"] || _scriptDir, "wasmMemory": wasmMemory, "wasmModule": wasmModule }); }, allocateUnusedWorker: function() { var pthreadMainJs = locateFile("tfjs-backend-wasm-threaded-simd.worker.js"); PThread.unusedWorkers.push(new Worker(pthreadMainJs)); @@ -2761,7 +2761,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ var t = performance.now() + msecs; while (performance.now() < t) { } - }}; + } }; function establishStackSpace(stackTop, stackMax) { _emscripten_stack_set_limits(stackTop, stackMax); stackRestore(stackTop); @@ -2807,16 +2807,16 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ } function __emscripten_notify_thread_queue(targetThreadId, mainThreadId) { if (targetThreadId == mainThreadId) { - postMessage({"cmd": "processQueuedMainThreadWork"}); + postMessage({ "cmd": "processQueuedMainThreadWork" }); } else if (ENVIRONMENT_IS_PTHREAD) { - postMessage({"targetThread": targetThreadId, "cmd": "processThreadQueue"}); + postMessage({ "targetThread": targetThreadId, "cmd": "processThreadQueue" }); } else { var pthread = PThread.pthreads[targetThreadId]; var worker = pthread && pthread.worker; if (!worker) { return; } - worker.postMessage({"cmd": "processThreadQueue"}); + worker.postMessage({ "cmd": "processThreadQueue" }); } return 1; } @@ -2945,7 +2945,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ } return false; } - var JSEvents = {inEventHandler: 0, removeAllEventListeners: function() { + var JSEvents = { inEventHandler: 0, removeAllEventListeners: function() { for (var i = JSEvents.eventHandlers.length - 1; i >= 0; --i) { JSEvents._removeHandler(i); } @@ -2972,7 +2972,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ return; } } - JSEvents.deferredCalls.push({targetFunction, precedence, argsList}); + JSEvents.deferredCalls.push({ targetFunction, precedence, argsList }); JSEvents.deferredCalls.sort(function(x, y) { return x.precedence < y.precedence; }); @@ -3053,7 +3053,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ return target && target.nodeName ? target.nodeName : ""; }, fullscreenEnabled: function() { return document.fullscreenEnabled || document.webkitFullscreenEnabled; - }}; + } }; function stringToNewUTF8(jsString) { var length = lengthBytesUTF8(jsString) + 1; var cString = _malloc(length); @@ -3181,7 +3181,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ function __webgl_enable_WEBGL_multi_draw(ctx) { return !!(ctx.multiDrawWebgl = ctx.getExtension("WEBGL_multi_draw")); } - var GL = {counter: 1, buffers: [], programs: [], framebuffers: [], renderbuffers: [], textures: [], uniforms: [], shaders: [], vaos: [], contexts: {}, offscreenCanvases: {}, timerQueriesEXT: [], programInfos: {}, stringCache: {}, unpackAlignment: 4, recordError: function recordError(errorCode) { + var GL = { counter: 1, buffers: [], programs: [], framebuffers: [], renderbuffers: [], textures: [], uniforms: [], shaders: [], vaos: [], contexts: {}, offscreenCanvases: {}, timerQueriesEXT: [], programInfos: {}, stringCache: {}, unpackAlignment: 4, recordError: function recordError(errorCode) { if (!GL.lastError) { GL.lastError = errorCode; } @@ -3207,7 +3207,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ }, registerContext: function(ctx, webGLContextAttributes) { var handle = _malloc(8); GROWABLE_HEAP_I32()[handle + 4 >> 2] = _pthread_self(); - var context = {handle, attributes: webGLContextAttributes, version: webGLContextAttributes.majorVersion, GLctx: ctx}; + var context = { handle, attributes: webGLContextAttributes, version: webGLContextAttributes.majorVersion, GLctx: ctx }; if (ctx.canvas) ctx.canvas.GLctxObject = context; GL.contexts[handle] = context; @@ -3250,7 +3250,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ }); }, populateUniformTable: function(program) { var p2 = GL.programs[program]; - var ptable = GL.programInfos[program] = {uniforms: {}, maxUniformLength: 0, maxAttributeLength: -1, maxUniformBlockNameLength: -1}; + var ptable = GL.programInfos[program] = { uniforms: {}, maxUniformLength: 0, maxAttributeLength: -1, maxUniformBlockNameLength: -1 }; var utable = ptable.uniforms; var numUniforms = GLctx.getProgramParameter(p2, 35718); for (var i = 0; i < numUniforms; ++i) { @@ -3273,12 +3273,12 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ } } } - }}; + } }; var __emscripten_webgl_power_preferences = ["default", "low-power", "high-performance"]; function _emscripten_webgl_do_create_context(target, attributes) { var a = attributes >> 2; var powerPreference = GROWABLE_HEAP_I32()[a + (24 >> 2)]; - var contextAttributes = {"alpha": !!GROWABLE_HEAP_I32()[a + (0 >> 2)], "depth": !!GROWABLE_HEAP_I32()[a + (4 >> 2)], "stencil": !!GROWABLE_HEAP_I32()[a + (8 >> 2)], "antialias": !!GROWABLE_HEAP_I32()[a + (12 >> 2)], "premultipliedAlpha": !!GROWABLE_HEAP_I32()[a + (16 >> 2)], "preserveDrawingBuffer": !!GROWABLE_HEAP_I32()[a + (20 >> 2)], "powerPreference": __emscripten_webgl_power_preferences[powerPreference], "failIfMajorPerformanceCaveat": !!GROWABLE_HEAP_I32()[a + (28 >> 2)], majorVersion: GROWABLE_HEAP_I32()[a + (32 >> 2)], minorVersion: GROWABLE_HEAP_I32()[a + (36 >> 2)], enableExtensionsByDefault: GROWABLE_HEAP_I32()[a + (40 >> 2)], explicitSwapControl: GROWABLE_HEAP_I32()[a + (44 >> 2)], proxyContextToMainThread: GROWABLE_HEAP_I32()[a + (48 >> 2)], renderViaOffscreenBackBuffer: GROWABLE_HEAP_I32()[a + (52 >> 2)]}; + var contextAttributes = { "alpha": !!GROWABLE_HEAP_I32()[a + (0 >> 2)], "depth": !!GROWABLE_HEAP_I32()[a + (4 >> 2)], "stencil": !!GROWABLE_HEAP_I32()[a + (8 >> 2)], "antialias": !!GROWABLE_HEAP_I32()[a + (12 >> 2)], "premultipliedAlpha": !!GROWABLE_HEAP_I32()[a + (16 >> 2)], "preserveDrawingBuffer": !!GROWABLE_HEAP_I32()[a + (20 >> 2)], "powerPreference": __emscripten_webgl_power_preferences[powerPreference], "failIfMajorPerformanceCaveat": !!GROWABLE_HEAP_I32()[a + (28 >> 2)], majorVersion: GROWABLE_HEAP_I32()[a + (32 >> 2)], minorVersion: GROWABLE_HEAP_I32()[a + (36 >> 2)], enableExtensionsByDefault: GROWABLE_HEAP_I32()[a + (40 >> 2)], explicitSwapControl: GROWABLE_HEAP_I32()[a + (44 >> 2)], proxyContextToMainThread: GROWABLE_HEAP_I32()[a + (48 >> 2)], renderViaOffscreenBackBuffer: GROWABLE_HEAP_I32()[a + (52 >> 2)] }; var canvas = findCanvasEventTarget(target); if (!canvas) { return 0; @@ -3292,7 +3292,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ function _emscripten_webgl_create_context(a0, a12) { return _emscripten_webgl_do_create_context(a0, a12); } - var SYSCALLS = {mappings: {}, buffers: [null, [], []], printChar: function(stream, curr) { + var SYSCALLS = { mappings: {}, buffers: [null, [], []], printChar: function(stream, curr) { var buffer3 = SYSCALLS.buffers[stream]; if (curr === 0 || curr === 10) { (stream === 1 ? out : err)(UTF8ArrayToString(buffer3, 0)); @@ -3309,7 +3309,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ return ret; }, get64: function(low, high) { return low; - }}; + } }; function _fd_close(fd) { if (ENVIRONMENT_IS_PTHREAD) return _emscripten_proxy_to_main_thread_js(3, 1, fd); @@ -3358,7 +3358,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ GROWABLE_HEAP_I32()[tlsMemory + i * 4 >> 2] = 0; } var stackHigh = threadParams.stackBase + threadParams.stackSize; - var pthread = PThread.pthreads[threadParams.pthread_ptr] = {worker, stackBase: threadParams.stackBase, stackSize: threadParams.stackSize, allocatedOwnStack: threadParams.allocatedOwnStack, threadInfoStruct: threadParams.pthread_ptr}; + var pthread = PThread.pthreads[threadParams.pthread_ptr] = { worker, stackBase: threadParams.stackBase, stackSize: threadParams.stackSize, allocatedOwnStack: threadParams.allocatedOwnStack, threadInfoStruct: threadParams.pthread_ptr }; var tis = pthread.threadInfoStruct >> 2; Atomics.store(GROWABLE_HEAP_U32(), tis + (64 >> 2), threadParams.detached); Atomics.store(GROWABLE_HEAP_U32(), tis + (100 >> 2), tlsMemory); @@ -3372,7 +3372,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ var global_locale = global_libc + 40; Atomics.store(GROWABLE_HEAP_U32(), tis + (172 >> 2), global_locale); worker.pthread = pthread; - var msg = {"cmd": "run", "start_routine": threadParams.startRoutine, "arg": threadParams.arg, "threadInfoStruct": threadParams.pthread_ptr, "stackBase": threadParams.stackBase, "stackSize": threadParams.stackSize}; + var msg = { "cmd": "run", "start_routine": threadParams.startRoutine, "arg": threadParams.arg, "threadInfoStruct": threadParams.pthread_ptr, "stackBase": threadParams.stackBase, "stackSize": threadParams.stackSize }; worker.runPthread = function() { msg.time = performance.now(); worker.postMessage(msg, threadParams.transferList); @@ -3423,7 +3423,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ GROWABLE_HEAP_I32()[threadInfoStruct + 12 >> 2] = threadInfoStruct; var headPtr = threadInfoStruct + 152; GROWABLE_HEAP_I32()[headPtr >> 2] = headPtr; - var threadParams = {stackBase, stackSize, allocatedOwnStack, detached, startRoutine: start_routine, pthread_ptr: threadInfoStruct, arg, transferList}; + var threadParams = { stackBase, stackSize, allocatedOwnStack, detached, startRoutine: start_routine, pthread_ptr: threadInfoStruct, arg, transferList }; if (ENVIRONMENT_IS_PTHREAD) { threadParams.cmd = "spawnThread"; postMessage(threadParams, transferList); @@ -3598,7 +3598,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ PThread.initMainThreadBlock(); var GLctx; var proxiedFunctionTable = [null, _atexit, _emscripten_set_canvas_element_size_main_thread, _fd_close, _fd_seek, _fd_write, _sysconf]; - var asmLibraryArg = {"e": ___assert_fail, "r": ___call_main, "x": __emscripten_notify_thread_queue, "b": _abort, "y": _emscripten_asm_const_int, "j": _emscripten_conditional_set_current_thread_status, "c": _emscripten_futex_wait, "d": _emscripten_futex_wake, "f": _emscripten_get_now, "p": _emscripten_memcpy_big, "z": _emscripten_num_logical_cores, "u": _emscripten_receive_on_main_thread_js, "q": _emscripten_resize_heap, "v": _emscripten_set_canvas_element_size, "i": _emscripten_set_current_thread_status, "t": _emscripten_set_thread_name, "w": _emscripten_webgl_create_context, "m": _fd_close, "n": _fd_seek, "g": _fd_write, "o": initPthreadsJS, "a": wasmMemory || Module["wasmMemory"], "k": _pthread_cleanup_pop, "l": _pthread_cleanup_push, "h": _pthread_create, "s": _sysconf}; + var asmLibraryArg = { "e": ___assert_fail, "r": ___call_main, "x": __emscripten_notify_thread_queue, "b": _abort, "y": _emscripten_asm_const_int, "j": _emscripten_conditional_set_current_thread_status, "c": _emscripten_futex_wait, "d": _emscripten_futex_wake, "f": _emscripten_get_now, "p": _emscripten_memcpy_big, "z": _emscripten_num_logical_cores, "u": _emscripten_receive_on_main_thread_js, "q": _emscripten_resize_heap, "v": _emscripten_set_canvas_element_size, "i": _emscripten_set_current_thread_status, "t": _emscripten_set_thread_name, "w": _emscripten_webgl_create_context, "m": _fd_close, "n": _fd_seek, "g": _fd_write, "o": initPthreadsJS, "a": wasmMemory || Module["wasmMemory"], "k": _pthread_cleanup_pop, "l": _pthread_cleanup_push, "h": _pthread_create, "s": _sysconf }; var asm = createWasm(); var ___wasm_call_ctors = Module["___wasm_call_ctors"] = function() { return (___wasm_call_ctors = Module["___wasm_call_ctors"] = Module["asm"]["A"]).apply(null, arguments); @@ -3939,7 +3939,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ if (ENVIRONMENT_IS_PTHREAD) { readyPromiseResolve(Module); initRuntime(); - postMessage({"cmd": "loaded"}); + postMessage({ "cmd": "loaded" }); return; } preRun(); @@ -3979,7 +3979,7 @@ var require_tfjs_backend_wasm_threaded_simd = __commonJS({ } if (!implicit) { if (ENVIRONMENT_IS_PTHREAD) { - postMessage({"cmd": "exitProcess", "returnCode": status}); + postMessage({ "cmd": "exitProcess", "returnCode": status }); throw new ExitStatus(status); } else { } @@ -4218,7 +4218,7 @@ var require_tfjs_backend_wasm = __commonJS({ return func2; } function ccall(ident, returnType, argTypes, args, opts) { - var toC = {"string": function(str) { + var toC = { "string": function(str) { var ret2 = 0; if (str !== null && str !== void 0 && str !== 0) { var len = (str.length << 2) + 1; @@ -4230,7 +4230,7 @@ var require_tfjs_backend_wasm = __commonJS({ var ret2 = stackAlloc(arr.length); writeArrayToMemory(arr, ret2); return ret2; - }}; + } }; function convertReturnValue(ret2) { if (returnType === "string") return UTF8ToString(ret2); @@ -4381,9 +4381,9 @@ var require_tfjs_backend_wasm = __commonJS({ var __ATMAIN__ = []; var __ATPOSTRUN__ = []; var runtimeInitialized = false; - __ATINIT__.push({func: function() { + __ATINIT__.push({ func: function() { ___wasm_call_ctors(); - }}); + } }); function preRun() { if (Module["preRun"]) { if (typeof Module["preRun"] == "function") @@ -4490,7 +4490,7 @@ var require_tfjs_backend_wasm = __commonJS({ function getBinaryPromise() { if (!wasmBinary && (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER)) { if (typeof fetch === "function" && !isFileURI(wasmBinaryFile)) { - return fetch(wasmBinaryFile, {credentials: "same-origin"}).then(function(response) { + return fetch(wasmBinaryFile, { credentials: "same-origin" }).then(function(response) { if (!response["ok"]) { throw "failed to load wasm binary file at '" + wasmBinaryFile + "'"; } @@ -4513,7 +4513,7 @@ var require_tfjs_backend_wasm = __commonJS({ }); } function createWasm() { - var info = {"a": asmLibraryArg}; + var info = { "a": asmLibraryArg }; function receiveInstance(instance, module22) { var exports3 = instance.exports; Module["asm"] = exports3; @@ -4536,7 +4536,7 @@ var require_tfjs_backend_wasm = __commonJS({ } function instantiateAsync() { if (!wasmBinary && typeof WebAssembly.instantiateStreaming === "function" && !isDataURI(wasmBinaryFile) && !isFileURI(wasmBinaryFile) && typeof fetch === "function") { - return fetch(wasmBinaryFile, {credentials: "same-origin"}).then(function(response) { + return fetch(wasmBinaryFile, { credentials: "same-origin" }).then(function(response) { var result = WebAssembly.instantiateStreaming(response, info); return result.then(receiveInstantiatedSource, function(reason) { err("wasm streaming compile failed: " + reason); @@ -4613,7 +4613,7 @@ var require_tfjs_backend_wasm = __commonJS({ } return false; } - var SYSCALLS = {mappings: {}, buffers: [null, [], []], printChar: function(stream, curr) { + var SYSCALLS = { mappings: {}, buffers: [null, [], []], printChar: function(stream, curr) { var buffer3 = SYSCALLS.buffers[stream]; if (curr === 0 || curr === 10) { (stream === 1 ? out : err)(UTF8ArrayToString(buffer3, 0)); @@ -4630,7 +4630,7 @@ var require_tfjs_backend_wasm = __commonJS({ return ret; }, get64: function(low, high) { return low; - }}; + } }; function _fd_close(fd) { return 0; } @@ -4816,7 +4816,7 @@ var require_tfjs_backend_wasm = __commonJS({ setErrNo(28); return -1; } - var asmLibraryArg = {"a": _abort, "d": _emscripten_memcpy_big, "e": _emscripten_resize_heap, "f": _fd_close, "c": _fd_seek, "b": _fd_write, "g": _pthread_create, "h": _sysconf}; + var asmLibraryArg = { "a": _abort, "d": _emscripten_memcpy_big, "e": _emscripten_resize_heap, "f": _fd_close, "c": _fd_seek, "b": _fd_write, "g": _pthread_create, "h": _sysconf }; var asm = createWasm(); var ___wasm_call_ctors = Module["___wasm_call_ctors"] = function() { return (___wasm_call_ctors = Module["___wasm_call_ctors"] = Module["asm"]["j"]).apply(null, arguments); @@ -5459,7 +5459,7 @@ function squeezeShape(shape, axis) { keptDims.push(i); } } - return {newShape, keptDims}; + return { newShape, keptDims }; } function getTypedArrayFromDType(dtype, size) { let values = null; @@ -5698,7 +5698,7 @@ var Environment = class { this.platform = platform; } registerFlag(flagName, evaluationFn, setHook) { - this.flagRegistry[flagName] = {evaluationFn, setHook}; + this.flagRegistry[flagName] = { evaluationFn, setHook }; if (this.urlFlags[flagName] != null) { const flagValue = this.urlFlags[flagName]; console.warn(`Setting feature override from URL ${flagName}: ${flagValue}.`); @@ -6009,7 +6009,7 @@ function getKernelsForBackend(backendName) { const it = kernelRegistry.entries(); const result = []; while (true) { - const {done, value} = it.next(); + const { done, value } = it.next(); if (done) { break; } @@ -6022,7 +6022,7 @@ function getKernelsForBackend(backendName) { return result; } function registerKernel(config) { - const {kernelName, backendName} = config; + const { kernelName, backendName } = config; const key = makeKey(kernelName, backendName); if (kernelRegistry.has(key)) { console.warn(`The kernel '${kernelName}' for backend '${backendName}' is already registered`); @@ -6030,7 +6030,7 @@ function registerKernel(config) { kernelRegistry.set(key, config); } function registerGradient(config) { - const {kernelName} = config; + const { kernelName } = config; if (gradRegistry.has(kernelName)) { if (env().getBool("DEBUG")) { console.warn(`Overriding the gradient for '${kernelName}'`); @@ -6054,7 +6054,7 @@ function unregisterGradient(kernelName) { function copyRegisteredKernels(registeredBackendName, newBackendName) { const kernels = getKernelsForBackend(registeredBackendName); kernels.forEach((kernelConfig) => { - const newKernelConfig = Object.assign({}, kernelConfig, {backendName: newBackendName}); + const newKernelConfig = Object.assign({}, kernelConfig, { backendName: newBackendName }); registerKernel(newKernelConfig); }); } @@ -6190,7 +6190,7 @@ var Profiler = class { for (const output of outputs) { output.dataSync(); } - timer = Promise.resolve({kernelMs: now() - start}); + timer = Promise.resolve({ kernelMs: now() - start }); } if (env().getBool("CHECK_COMPUTATION_FOR_ERRORS")) { for (let i = 0; i < outputs.length; i++) { @@ -6210,7 +6210,7 @@ var Profiler = class { return kernelProfile; } logKernelProfile(kernelProfile) { - const {kernelName, outputs, timeMs, inputs, extraInfo} = kernelProfile; + const { kernelName, outputs, timeMs, inputs, extraInfo } = kernelProfile; outputs.forEach((result) => { Promise.all([result.data(), timeMs, extraInfo]).then((valueContainer) => { this.logger.logKernelProfile(kernelName, result, valueContainer[0], valueContainer[1], inputs, valueContainer[2]); @@ -6866,7 +6866,7 @@ var Engine = class { throw new Error(`Backend '${this.backendName}' has not yet been initialized. Make sure to await tf.ready() or await tf.setBackend() before calling other methods`); } if (this.backendInstance == null) { - const {name, asyncInit} = this.initializeBackendsAndReturnBest(); + const { name, asyncInit } = this.initializeBackendsAndReturnBest(); if (asyncInit) { throw new Error(`The highest priority backend '${name}' has not yet been initialized. Make sure to await tf.ready() or await tf.setBackend() before calling other methods`); } @@ -6880,7 +6880,7 @@ var Engine = class { findBackend(backendName) { if (!(backendName in this.registry)) { if (backendName in this.registryFactory) { - const {asyncInit} = this.initializeBackend(backendName); + const { asyncInit } = this.initializeBackend(backendName); if (asyncInit) { return null; } @@ -6901,7 +6901,7 @@ var Engine = class { console.warn(`${backendName} backend was already registered. Reusing existing backend factory.`); return false; } - this.registryFactory[backendName] = {factory, priority}; + this.registryFactory[backendName] = { factory, priority }; return true; } async setBackend(backendName) { @@ -6911,7 +6911,7 @@ var Engine = class { this.backendName = backendName; if (this.registry[backendName] == null) { this.backendInstance = null; - const {success, asyncInit} = this.initializeBackend(backendName); + const { success, asyncInit } = this.initializeBackend(backendName); const result = asyncInit ? await success : success; if (!result) { return false; @@ -6964,15 +6964,15 @@ var Engine = class { return false; }); this.pendingBackendInit = success; - return {success, asyncInit: true}; + return { success, asyncInit: true }; } else { this.registry[backendName] = backend2; - return {success: true, asyncInit: false}; + return { success: true, asyncInit: false }; } } catch (err) { console.warn(`Initialization of backend ${backendName} failed`); console.warn(err.stack || err.message); - return {success: false, asyncInit: false}; + return { success: false, asyncInit: false }; } } removeBackend(backendName) { @@ -7006,9 +7006,9 @@ var Engine = class { const sortedBackends = this.getSortedBackends(); for (let i = 0; i < sortedBackends.length; i++) { const backendName = sortedBackends[i]; - const {success, asyncInit} = this.initializeBackend(backendName); + const { success, asyncInit } = this.initializeBackend(backendName); if (asyncInit || success) { - return {name: backendName, asyncInit}; + return { name: backendName, asyncInit }; } } throw new Error(`Could not initialize any backends, all backend initializations failed.`); @@ -7068,13 +7068,13 @@ var Engine = class { return Engine.nextVariableId++; } clone(x) { - const y = ENGINE.runKernel(Identity, {x}); - const inputs = {x}; + const y = ENGINE.runKernel(Identity, { x }); + const inputs = { x }; const grad2 = (dy) => ({ x: () => { const dtype = "float32"; - const gradInputs = {x: dy}; - const attrs = {dtype}; + const gradInputs = { x: dy }; + const attrs = { dtype }; return ENGINE.runKernel(Cast, gradInputs, attrs); } }); @@ -7087,7 +7087,7 @@ var Engine = class { if (!hasKernel) { throw new Error(`Kernel '${kernelName}' not registered for backend '${this.backendName}'`); } - return this.runKernelFunc({kernelName, inputs, attrs}); + return this.runKernelFunc({ kernelName, inputs, attrs }); } shouldCheckForMemLeaks() { return this.ENV.getBool("IS_TEST"); @@ -7120,7 +7120,7 @@ var Engine = class { let out; const kernelOrScopeName = isRegisteredKernelInvocation(kernelParams) ? kernelParams.kernelName : this.state.activeScope != null ? this.state.activeScope.name : ""; if (isRegisteredKernelInvocation(kernelParams)) { - const {kernelName, inputs: inputs2, attrs: attrs2} = kernelParams; + const { kernelName, inputs: inputs2, attrs: attrs2 } = kernelParams; if (this.backendName == null) { this.backend; } @@ -7128,7 +7128,7 @@ var Engine = class { assert(kernel != null, () => `Cannot find registered kernel '${kernelName}' for backend '${this.backendName}'`); kernelFunc3 = () => { const numDataIdsBefore = this.backend.numDataIds(); - out = kernel.kernelFunc({inputs: inputs2, attrs: attrs2, backend: this.backend}); + out = kernel.kernelFunc({ inputs: inputs2, attrs: attrs2, backend: this.backend }); const outInfos = Array.isArray(out) ? out : [out]; if (this.shouldCheckForMemLeaks()) { this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos); @@ -7137,7 +7137,7 @@ var Engine = class { if (outInfo.rank != null) { return outInfo; } - const {dataId, shape, dtype} = outInfo; + const { dataId, shape, dtype } = outInfo; return this.makeTensorFromDataId(dataId, shape, dtype); }); if (isTapeOn) { @@ -7147,7 +7147,7 @@ var Engine = class { return outTensors; }; } else { - const {forwardFunc} = kernelParams; + const { forwardFunc } = kernelParams; const saveFunc = (tensors) => { if (!isTapeOn) { return; @@ -7164,7 +7164,7 @@ var Engine = class { return outs; }; } - const {inputs, attrs} = kernelParams; + const { inputs, attrs } = kernelParams; const backwardsFunc = isRegisteredKernelInvocation(kernelParams) ? null : kernelParams.backwardsFunc; let kernelProfile; this.scopedRun(() => this.state.kernelDepth++, () => this.state.kernelDepth--, () => { @@ -7354,7 +7354,7 @@ var Engine = class { return this.state.gradientDepth > 0 && this.state.kernelDepth === 0; } addTapeNode(kernelName, inputs, outputs, gradientsFunc, saved, attrs) { - const tapeNode = {id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved}; + const tapeNode = { id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved }; const gradConfig = getGradient(kernelName); if (gradConfig != null) { gradientsFunc = gradConfig.gradFunc; @@ -7440,7 +7440,7 @@ var Engine = class { }); this.state.activeTape = null; } - return {value: y, grads: grads2}; + return { value: y, grads: grads2 }; }); } customGrad(f) { @@ -7533,7 +7533,7 @@ function getOrMakeEngine() { } var ENGINE = getOrMakeEngine(); function add(a, b) { - const inputs = {a, b}; + const inputs = { a, b }; return ENGINE.runKernel(Add, inputs); } var device_util_exports = {}; @@ -7672,17 +7672,17 @@ function op(f) { throw ex; } }; - Object.defineProperty(f2, "name", {value: opName, configurable: true}); + Object.defineProperty(f2, "name", { value: opName, configurable: true }); return f2; } function complex_(real4, imag4) { const $real = convertToTensor(real4, "real", "complex"); const $imag = convertToTensor(imag4, "imag", "complex"); assertShapesMatch($real.shape, $imag.shape, `real and imag shapes, ${$real.shape} and ${$imag.shape}, must match in call to tf.complex().`); - const inputs = {real: $real, imag: $imag}; + const inputs = { real: $real, imag: $imag }; return ENGINE.runKernel(Complex, inputs); } -var complex = op({complex_}); +var complex = op({ complex_ }); function makeTensor(values, shape, inferredShape, dtype) { if (dtype == null) { dtype = inferDtype(values); @@ -7735,7 +7735,7 @@ async function encodeWeights(tensors, group) { if (t.dtype !== "float32" && t.dtype !== "int32" && t.dtype !== "bool" && t.dtype !== "string" && t.dtype !== "complex64") { throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`); } - const spec = {name, shape: t.shape, dtype: t.dtype}; + const spec = { name, shape: t.shape, dtype: t.dtype }; if (t.dtype === "string") { const utf8bytes = new Promise(async (resolve) => { const vals = await t.bytes(); @@ -7762,7 +7762,7 @@ async function encodeWeights(tensors, group) { specs.push(spec); } const tensorValues = await Promise.all(dataPromises); - return {data: concatenateTypedArrays(tensorValues), specs}; + return { data: concatenateTypedArrays(tensorValues), specs }; } function decodeWeights(buffer2, specs) { const out = {}; @@ -8064,8 +8064,8 @@ function getIndexedDBFactory() { } function setUpDatabase(openRequest) { const db = openRequest.result; - db.createObjectStore(MODEL_STORE_NAME, {keyPath: "modelPath"}); - db.createObjectStore(INFO_STORE_NAME, {keyPath: "modelPath"}); + db.createObjectStore(MODEL_STORE_NAME, { keyPath: "modelPath" }); + db.createObjectStore(INFO_STORE_NAME, { keyPath: "modelPath" }); } var BrowserIndexedDB = class { constructor(modelPath) { @@ -8111,7 +8111,7 @@ var BrowserIndexedDB = class { const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts); const infoTx = db.transaction(INFO_STORE_NAME, "readwrite"); let infoStore = infoTx.objectStore(INFO_STORE_NAME); - const putInfoRequest = infoStore.put({modelPath: this.modelPath, modelArtifactsInfo}); + const putInfoRequest = infoStore.put({ modelPath: this.modelPath, modelArtifactsInfo }); let modelTx; putInfoRequest.onsuccess = () => { modelTx = db.transaction(MODEL_STORE_NAME, "readwrite"); @@ -8121,7 +8121,7 @@ var BrowserIndexedDB = class { modelArtifacts, modelArtifactsInfo }); - putModelRequest.onsuccess = () => resolve({modelArtifactsInfo}); + putModelRequest.onsuccess = () => resolve({ modelArtifactsInfo }); putModelRequest.onerror = (error) => { infoStore = infoTx.objectStore(INFO_STORE_NAME); const deleteInfoRequest = infoStore.delete(this.modelPath); @@ -8314,7 +8314,7 @@ var BrowserLocalStorage = class { result.modelInitializer = modelArtifacts.modelInitializer; } this.LS.setItem(this.keys.modelMetadata, JSON.stringify(result)); - return {modelArtifactsInfo}; + return { modelArtifactsInfo }; } catch (err) { this.LS.removeItem(this.keys.info); this.LS.removeItem(this.keys.topology); @@ -8589,17 +8589,17 @@ function cast_(x, dtype) { if (dtype === "string" && $x.dtype !== "string" || dtype !== "string" && $x.dtype === "string") { throw new Error("Only strings can be casted to strings"); } - const inputs = {x: $x}; - const attrs = {dtype}; + const inputs = { x: $x }; + const attrs = { dtype }; return ENGINE.runKernel(Cast, inputs, attrs); } -var cast = op({cast_}); +var cast = op({ cast_ }); function clone_(x) { const $x = convertToTensor(x, "x", "clone", "string_or_numeric"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Identity, inputs); } -var clone = op({clone_}); +var clone = op({ clone_ }); function print2(x, verbose = false) { console.log(x.toString(verbose)); } @@ -8658,7 +8658,7 @@ var BrowserDownloads = class { if (typeof document === "undefined") { throw new Error("Browser downloads are not supported in this environment since `document` is not present"); } - const weightsURL = window.URL.createObjectURL(new Blob([modelArtifacts.weightData], {type: "application/octet-stream"})); + const weightsURL = window.URL.createObjectURL(new Blob([modelArtifacts.weightData], { type: "application/octet-stream" })); if (modelArtifacts.modelTopology instanceof ArrayBuffer) { throw new Error("BrowserDownloads.save() does not support saving model topology in binary formats yet."); } else { @@ -8682,7 +8682,7 @@ var BrowserDownloads = class { if (modelArtifacts.modelInitializer != null) { modelTopologyAndWeightManifest.modelInitializer = modelArtifacts.modelInitializer; } - const modelTopologyAndWeightManifestURL = window.URL.createObjectURL(new Blob([JSON.stringify(modelTopologyAndWeightManifest)], {type: "application/json"})); + const modelTopologyAndWeightManifestURL = window.URL.createObjectURL(new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: "application/json" })); const jsonAnchor = this.jsonAnchor == null ? document.createElement("a") : this.jsonAnchor; jsonAnchor.download = this.modelTopologyFileName; jsonAnchor.href = modelTopologyAndWeightManifestURL; @@ -8693,7 +8693,7 @@ var BrowserDownloads = class { weightDataAnchor.href = weightsURL; await defer(() => weightDataAnchor.dispatchEvent(new MouseEvent("click"))); } - return {modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts)}; + return { modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts) }; } } }; @@ -8718,7 +8718,7 @@ var BrowserFiles = class { return; } if (weightFiles.length === 0) { - resolve({modelTopology}); + resolve({ modelTopology }); } const weightsManifest = modelJSON.weightsManifest; if (weightsManifest == null) { @@ -8850,7 +8850,7 @@ async function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) { loadOptions = {}; } const fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch : loadOptions.fetchFunc; - const requests = fetchURLs.map((fetchURL) => fetchFunc(fetchURL, loadOptions.requestInit, {isBinary: true})); + const requests = fetchURLs.map((fetchURL) => fetchFunc(fetchURL, loadOptions.requestInit, { isBinary: true })); const fetchStartFraction = 0; const fetchEndFraction = 0.5; const responses = loadOptions.onProgress == null ? await Promise.all(requests) : await monitorPromisesProgress(requests, loadOptions.onProgress, fetchStartFraction, fetchEndFraction); @@ -8861,7 +8861,7 @@ async function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) { return buffers; } async function loadWeights(manifest, filePathPrefix = "", weightNames, requestInit) { - const fetchWeights = (fetchUrls) => loadWeightsAsArrayBuffer(fetchUrls, {requestInit}); + const fetchWeights = (fetchUrls) => loadWeightsAsArrayBuffer(fetchUrls, { requestInit }); const loadWeights2 = weightsLoaderFactory(fetchWeights); return loadWeights2(manifest, filePathPrefix, weightNames); } @@ -8980,7 +8980,7 @@ var HTTPRequest = class { if (modelArtifacts.modelTopology instanceof ArrayBuffer) { throw new Error("BrowserHTTPRequest.save() does not support saving model topology in binary formats yet."); } - const init2 = Object.assign({method: this.DEFAULT_METHOD}, this.requestInit); + const init2 = Object.assign({ method: this.DEFAULT_METHOD }, this.requestInit); init2.body = new FormData(); const weightsManifest = [{ paths: ["./model.weights.bin"], @@ -9002,9 +9002,9 @@ var HTTPRequest = class { if (modelArtifacts.modelInitializer != null) { modelTopologyAndWeightManifest.modelInitializer = modelArtifacts.modelInitializer; } - init2.body.append("model.json", new Blob([JSON.stringify(modelTopologyAndWeightManifest)], {type: JSON_TYPE}), "model.json"); + init2.body.append("model.json", new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: JSON_TYPE }), "model.json"); if (modelArtifacts.weightData != null) { - init2.body.append("model.weights.bin", new Blob([modelArtifacts.weightData], {type: OCTET_STREAM_MIME_TYPE}), "model.weights.bin"); + init2.body.append("model.weights.bin", new Blob([modelArtifacts.weightData], { type: OCTET_STREAM_MIME_TYPE }), "model.weights.bin"); } const response = await this.fetch(this.path, init2); if (response.ok) { @@ -9157,7 +9157,7 @@ function fromMemory(modelArtifacts, weightSpecs, weightData, trainingConfig) { return new PassthroughLoader(modelArtifacts); } else { console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."); - return new PassthroughLoader({modelTopology: modelArtifacts}); + return new PassthroughLoader({ modelTopology: modelArtifacts }); } } else { console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."); @@ -9180,21 +9180,21 @@ function matMul_(a, b, transposeA = false, transposeB = false) { let $a = convertToTensor(a, "a", "matMul"); let $b = convertToTensor(b, "b", "matMul"); [$a, $b] = makeTypesMatch($a, $b); - const inputs = {a: $a, b: $b}; - const attrs = {transposeA, transposeB}; + const inputs = { a: $a, b: $b }; + const attrs = { transposeA, transposeB }; return ENGINE.runKernel(BatchMatMul, inputs, attrs); } -var matMul = op({matMul_}); +var matMul = op({ matMul_ }); function oneHot_(indices, depth, onValue = 1, offValue = 0) { if (depth < 2) { throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`); } const $indices = convertToTensor(indices, "indices", "oneHot", "int32"); - const inputs = {indices: $indices}; - const attrs = {depth, onValue, offValue}; + const inputs = { indices: $indices }; + const attrs = { depth, onValue, offValue }; return ENGINE.runKernel(OneHot, inputs, attrs); } -var oneHot = op({oneHot_}); +var oneHot = op({ oneHot_ }); function transpose_(x, perm) { const $x = convertToTensor(x, "x", "transpose"); if (perm == null) { @@ -9207,11 +9207,11 @@ function transpose_(x, perm) { if ($x.rank <= 1) { return $x.clone(); } - const inputs = {x: $x}; - const attrs = {perm}; + const inputs = { x: $x }; + const attrs = { perm }; return ENGINE.runKernel(Transpose, inputs, attrs); } -var transpose = op({transpose_}); +var transpose = op({ transpose_ }); function confusionMatrix_(labels, predictions, numClasses) { const $labels = convertToTensor(labels, "labels", "confusionMatrix"); const $predictions = convertToTensor(predictions, "predictions", "confusionMatrix"); @@ -9226,7 +9226,7 @@ function confusionMatrix_(labels, predictions, numClasses) { const product = matMul(oneHotLabelsT, oneHotPredictions); return cast(product, "int32"); } -var confusionMatrix = op({confusionMatrix_}); +var confusionMatrix = op({ confusionMatrix_ }); var browser_exports = {}; __export2(browser_exports, { fromPixels: () => fromPixels, @@ -9284,8 +9284,8 @@ function fromPixels_(pixels, numChannels = 3) { } const kernel = getKernel(FromPixels, ENGINE.backendName); if (kernel != null) { - const inputs = {pixels}; - const attrs = {numChannels}; + const inputs = { pixels }; + const attrs = { numChannels }; return ENGINE.runKernel(FromPixels, inputs, attrs); } const [width, height] = isVideo ? [ @@ -9338,7 +9338,7 @@ async function fromPixelsAsync(pixels, numChannels = 3) { if (env().getBool("WRAP_TO_IMAGEBITMAP") && canWrapPixelsToImageBitmap(pixels)) { let imageBitmap; try { - imageBitmap = await createImageBitmap(pixels, {premultiplyAlpha: "none"}); + imageBitmap = await createImageBitmap(pixels, { premultiplyAlpha: "none" }); } catch (e) { imageBitmap = null; } @@ -9412,7 +9412,7 @@ async function toPixels(img, canvas) { } return bytes; } -var fromPixels = op({fromPixels_}); +var fromPixels = op({ fromPixels_ }); var gather_nd_util_exports = {}; __export2(gather_nd_util_exports, { prepareAndValidate: () => prepareAndValidate @@ -9520,7 +9520,7 @@ function calculateShapes(updates, indices, shape) { const numUpdates = sizeFromShape(indices.shape) / safeSliceDim; const strides = [...computeStrides(shape.slice(0, sliceRank)), 1]; const outputSize = sizeFromShape(shape); - return {sliceRank, numUpdates, sliceSize, strides, outputSize}; + return { sliceRank, numUpdates, sliceSize, strides, outputSize }; } var slice_util_exports = {}; __export2(slice_util_exports, { @@ -9782,7 +9782,7 @@ function sliceInfo(xShape, begin, end, strides, beginMask, endMask, ellipsisMask $end[axis] = 1; newShape.splice(axis, 0, 1); }); - const {begin: normalizedBegin, end: normalizedEnd, strides: normalizedStrides} = getNormalizedAxes(newShape, ellipsisAxes, numInterpolatedAxes, $begin, $end, $strides, beginMask, endMask, ellipsisMask); + const { begin: normalizedBegin, end: normalizedEnd, strides: normalizedStrides } = getNormalizedAxes(newShape, ellipsisAxes, numInterpolatedAxes, $begin, $end, $strides, beginMask, endMask, ellipsisMask); $begin = normalizedBegin; $end = normalizedEnd; $strides = normalizedStrides; @@ -9794,7 +9794,7 @@ function sliceInfo(xShape, begin, end, strides, beginMask, endMask, ellipsisMask const size = computeOutShape($begin, $end, $strides); const outShape = size.filter((_, axis) => shrinkAxes.indexOf(axis) === -1); const nonStrided = $strides.every((v) => v === 1); - return {nonStrided, $begin, $end, $strides, size, newShape, outShape}; + return { nonStrided, $begin, $end, $strides, size, newShape, outShape }; } var serialization_exports = {}; __export2(serialization_exports, { @@ -10013,18 +10013,18 @@ function add_(a, b) { let $a = convertToTensor(a, "a", "add"); let $b = convertToTensor(b, "b", "add"); [$a, $b] = makeTypesMatch($a, $b); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(Add, inputs); } -var add2 = op({add_}); +var add2 = op({ add_ }); function floorDiv_(a, b) { let $a = convertToTensor(a, "a", "floorDiv"); let $b = convertToTensor(b, "b", "floorDiv"); [$a, $b] = makeTypesMatch($a, $b); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(FloorDiv, inputs); } -var floorDiv = op({floorDiv_}); +var floorDiv = op({ floorDiv_ }); function div_(a, b) { let $a = convertToTensor(a, "a", "div"); let $b = convertToTensor(b, "b", "div"); @@ -10032,42 +10032,42 @@ function div_(a, b) { if ($a.dtype === "int32" && $b.dtype === "int32") { return floorDiv($a, $b); } - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; const attrs = {}; return ENGINE.runKernel(RealDiv, inputs, attrs); } -var div = op({div_}); +var div = op({ div_ }); function mul_(a, b) { let $a = convertToTensor(a, "a", "mul"); let $b = convertToTensor(b, "b", "mul"); [$a, $b] = makeTypesMatch($a, $b); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(Multiply, inputs); } -var mul = op({mul_}); +var mul = op({ mul_ }); function abs_(x) { const $x = convertToTensor(x, "x", "abs"); if ($x.dtype === "complex64") { - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(ComplexAbs, inputs); } else { - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Abs, inputs); } } -var abs = op({abs_}); +var abs = op({ abs_ }); function acos_(x) { const $x = convertToTensor(x, "x", "acos"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Acos, inputs); } -var acos = op({acos_}); +var acos = op({ acos_ }); function acosh_(x) { const $x = convertToTensor(x, "x", "acosh"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Acosh, inputs); } -var acosh = op({acosh_}); +var acosh = op({ acosh_ }); function addN_(tensors) { assert(Array.isArray(tensors), () => "The argument passed to tf.addN() must be a list of tensors"); assert(tensors.length >= 1, () => `Must pass at least one tensor to tf.addN(), but got ${tensors.length}`); @@ -10086,67 +10086,67 @@ function addN_(tensors) { const inputs = $tensors; return ENGINE.runKernel(AddN, inputs); } -var addN = op({addN_}); +var addN = op({ addN_ }); function all_(x, axis = null, keepDims = false) { const $x = convertToTensor(x, "x", "all", "bool"); - const inputs = {x: $x}; - const attrs = {axis, keepDims}; + const inputs = { x: $x }; + const attrs = { axis, keepDims }; return ENGINE.runKernel(All, inputs, attrs); } -var all = op({all_}); +var all = op({ all_ }); function any_(x, axis = null, keepDims = false) { const $x = convertToTensor(x, "x", "any", "bool"); - const inputs = {x: $x}; - const attrs = {axis, keepDims}; + const inputs = { x: $x }; + const attrs = { axis, keepDims }; return ENGINE.runKernel(Any, inputs, attrs); } -var any = op({any_}); +var any = op({ any_ }); function argMax_(x, axis = 0) { const $x = convertToTensor(x, "x", "argMax"); - const inputs = {x: $x}; - const attrs = {axis}; + const inputs = { x: $x }; + const attrs = { axis }; return ENGINE.runKernel(ArgMax, inputs, attrs); } -var argMax = op({argMax_}); +var argMax = op({ argMax_ }); function argMin_(x, axis = 0) { const $x = convertToTensor(x, "x", "argMin"); - const inputs = {x: $x}; - const attrs = {axis}; + const inputs = { x: $x }; + const attrs = { axis }; return ENGINE.runKernel(ArgMin, inputs, attrs); } -var argMin = op({argMin_}); +var argMin = op({ argMin_ }); function asin_(x) { const $x = convertToTensor(x, "x", "asin"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Asin, inputs); } -var asin = op({asin_}); +var asin = op({ asin_ }); function asinh_(x) { const $x = convertToTensor(x, "x", "asinh"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Asinh, inputs); } -var asinh = op({asinh_}); +var asinh = op({ asinh_ }); function atan_(x) { const $x = convertToTensor(x, "x", "atan"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Atan, inputs); } -var atan = op({atan_}); +var atan = op({ atan_ }); function atan2_(a, b) { let $a = convertToTensor(a, "a", "atan2"); let $b = convertToTensor(b, "b", "atan2"); [$a, $b] = makeTypesMatch($a, $b); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(Atan2, inputs); } -var atan2 = op({atan2_}); +var atan2 = op({ atan2_ }); function atanh_(x) { const $x = convertToTensor(x, "x", "atanh"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Atanh, inputs); } -var atanh = op({atanh_}); +var atanh = op({ atanh_ }); function computeDilation2DInfo(inputShape, filterShape, strides, pad3, dataFormat = "NHWC", dilations) { const inputChannels = inputShape[3]; const $filterShape = [...filterShape, inputChannels]; @@ -10194,7 +10194,7 @@ function computeConv2DInfo(inShape, filterShape, strides, dilations, pad3, round const [dilationHeight, dilationWidth] = parseTupleParam(dilations); const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight); const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth); - const {padInfo, outHeight, outWidth} = getPadAndOutInfo(pad3, inHeight, inWidth, strideHeight, strideWidth, effectiveFilterHeight, effectiveFilterWidth, roundingMode, dataFormat); + const { padInfo, outHeight, outWidth } = getPadAndOutInfo(pad3, inHeight, inWidth, strideHeight, strideWidth, effectiveFilterHeight, effectiveFilterWidth, roundingMode, dataFormat); const outChannels = depthwise ? filterChannels * inChannels : filterChannels; let outShape; if (dataFormat === "channelsFirst") { @@ -10240,7 +10240,7 @@ function computeConv3DInfo(inShape, filterShape, strides, dilations, pad3, depth const effectiveFilterDepth = getEffectiveFilterSize(filterDepth, dilationDepth); const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight); const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth); - const {padInfo, outDepth, outHeight, outWidth} = get3DPadAndOutInfo(pad3, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth, effectiveFilterDepth, effectiveFilterHeight, effectiveFilterWidth, roundingMode); + const { padInfo, outDepth, outHeight, outWidth } = get3DPadAndOutInfo(pad3, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth, effectiveFilterDepth, effectiveFilterHeight, effectiveFilterWidth, roundingMode); const outChannels = depthwise ? filterChannels * inChannels : filterChannels; let outShape; if (dataFormat === "channelsFirst") { @@ -10327,7 +10327,7 @@ function getPadAndOutInfo(pad3, inHeight, inWidth, strideHeight, strideWidth, fi let outWidth; if (typeof pad3 === "number") { const padType = pad3 === 0 ? "VALID" : "NUMBER"; - padInfo = {top: pad3, bottom: pad3, left: pad3, right: pad3, type: padType}; + padInfo = { top: pad3, bottom: pad3, left: pad3, right: pad3, type: padType }; const outShape = computeOutputShape2D([inHeight, inWidth], filterHeight, strideHeight, pad3, roundingMode); outHeight = outShape[0]; outWidth = outShape[1]; @@ -10340,9 +10340,9 @@ function getPadAndOutInfo(pad3, inHeight, inWidth, strideHeight, strideWidth, fi const bottom = padAlongHeight - top; const left = Math.floor(padAlongWidth / 2); const right = padAlongWidth - left; - padInfo = {top, bottom, left, right, type: "SAME"}; + padInfo = { top, bottom, left, right, type: "SAME" }; } else if (pad3 === "valid") { - padInfo = {top: 0, bottom: 0, left: 0, right: 0, type: "VALID"}; + padInfo = { top: 0, bottom: 0, left: 0, right: 0, type: "VALID" }; outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight); outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth); } else if (typeof pad3 === "object") { @@ -10351,13 +10351,13 @@ function getPadAndOutInfo(pad3, inHeight, inWidth, strideHeight, strideWidth, fi const left = dataFormat === "channelsLast" ? pad3[2][0] : pad3[3][0]; const right = dataFormat === "channelsLast" ? pad3[2][1] : pad3[3][1]; const padType = top === 0 && bottom === 0 && left === 0 && right === 0 ? "VALID" : "EXPLICIT"; - padInfo = {top, bottom, left, right, type: padType}; + padInfo = { top, bottom, left, right, type: padType }; outHeight = round((inHeight - filterHeight + top + bottom) / strideHeight + 1, roundingMode); outWidth = round((inWidth - filterWidth + left + right) / strideWidth + 1, roundingMode); } else { throw Error(`Unknown padding parameter: ${pad3}`); } - return {padInfo, outHeight, outWidth}; + return { padInfo, outHeight, outWidth }; } function get3DPadAndOutInfo(pad3, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth, filterDepth, filterHeight, filterWidth, roundingMode) { let padInfo; @@ -10392,7 +10392,7 @@ function get3DPadAndOutInfo(pad3, inDepth, inHeight, inWidth, strideDepth, strid const bottom = padAlongHeight - top; const left = Math.floor(padAlongWidth / 2); const right = padAlongWidth - left; - padInfo = {top, bottom, left, right, front, back, type: "SAME"}; + padInfo = { top, bottom, left, right, front, back, type: "SAME" }; } else if (pad3 === "valid") { padInfo = { top: 0, @@ -10409,7 +10409,7 @@ function get3DPadAndOutInfo(pad3, inDepth, inHeight, inWidth, strideDepth, strid } else { throw Error(`Unknown padding parameter: ${pad3}`); } - return {padInfo, outDepth, outHeight, outWidth}; + return { padInfo, outDepth, outHeight, outWidth }; } function round(value, roundingMode) { if (!roundingMode) { @@ -10444,11 +10444,11 @@ function convertConv2DDataFormat(dataFormat) { } function reshape_(x, shape) { const $x = convertToTensor(x, "x", "reshape", "string_or_numeric"); - const inputs = {x: $x}; - const attrs = {shape}; + const inputs = { x: $x }; + const attrs = { shape }; return ENGINE.runKernel(Reshape, inputs, attrs); } -var reshape = op({reshape_}); +var reshape = op({ reshape_ }); function avgPool_(x, filterSize, strides, pad3, dimRoundingMode) { const $x = convertToTensor(x, "x", "avgPool", "float32"); const dilations = 1; @@ -10463,8 +10463,8 @@ function avgPool_(x, filterSize, strides, pad3, dimRoundingMode) { if (dimRoundingMode != null) { assert(isInt(pad3), () => `Error in avgPool: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad3}.`); } - const inputs = {x: x4D}; - const attrs = {filterSize, strides, pad: pad3, dimRoundingMode}; + const inputs = { x: x4D }; + const attrs = { filterSize, strides, pad: pad3, dimRoundingMode }; let res = ENGINE.runKernel(AvgPool, inputs, attrs); res = cast(res, $x.dtype); if (reshapedTo4D) { @@ -10472,7 +10472,7 @@ function avgPool_(x, filterSize, strides, pad3, dimRoundingMode) { } return res; } -var avgPool = op({avgPool_}); +var avgPool = op({ avgPool_ }); function avgPool3d_(x, filterSize, strides, pad3, dimRoundingMode, dataFormat = "NDHWC") { const $x = convertToTensor(x, "x", "avgPool3d", "float32"); let x5D = $x; @@ -10486,8 +10486,8 @@ function avgPool3d_(x, filterSize, strides, pad3, dimRoundingMode, dataFormat = if (dimRoundingMode != null) { assert(isInt(pad3), () => `Error in avgPool3d: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad3}.`); } - const inputs = {x: x5D}; - const attrs = {filterSize, strides, pad: pad3, dimRoundingMode, dataFormat}; + const inputs = { x: x5D }; + const attrs = { filterSize, strides, pad: pad3, dimRoundingMode, dataFormat }; let res = ENGINE.runKernel(AvgPool3D, inputs, attrs); res = cast(res, x5D.dtype); if (reshapedTo5D) { @@ -10495,7 +10495,7 @@ function avgPool3d_(x, filterSize, strides, pad3, dimRoundingMode, dataFormat = } return res; } -var avgPool3d = op({avgPool3d_}); +var avgPool3d = op({ avgPool3d_ }); function concat_(tensors, axis = 0) { assert(tensors.length >= 1, () => "Pass at least one tensor to concat"); const $tensors = convertToTensorArray(tensors, "tensors", "concat", "string_or_numeric"); @@ -10511,32 +10511,32 @@ function concat_(tensors, axis = 0) { return clone($tensors[0]); } const inputs = $tensors; - const attr = {axis}; + const attr = { axis }; return ENGINE.runKernel(Concat, inputs, attr); } -var concat = op({concat_}); +var concat = op({ concat_ }); function sigmoid_(x) { const $x = convertToTensor(x, "x", "sigmoid"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Sigmoid, inputs); } -var sigmoid = op({sigmoid_}); +var sigmoid = op({ sigmoid_ }); function slice_(x, begin, size) { const $x = convertToTensor(x, "x", "slice", "string_or_numeric"); if ($x.rank === 0) { throw new Error("Slicing scalar is not possible"); } - const inputs = {x: $x}; - const attrs = {begin, size}; + const inputs = { x: $x }; + const attrs = { begin, size }; return ENGINE.runKernel(Slice, inputs, attrs); } -var slice = op({slice_}); +var slice = op({ slice_ }); function tanh_(x) { const $x = convertToTensor(x, "x", "tanh"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Tanh, inputs); } -var tanh2 = op({tanh_}); +var tanh2 = op({ tanh_ }); function basicLSTMCell_(forgetBias, lstmKernel, lstmBias, data, c, h) { const $forgetBias = convertToTensor(forgetBias, "forgetBias", "basicLSTMCell"); const $lstmKernel = convertToTensor(lstmKernel, "lstmKernel", "basicLSTMCell"); @@ -10558,18 +10558,18 @@ function basicLSTMCell_(forgetBias, lstmKernel, lstmBias, data, c, h) { const newH = mul(tanh2(newC), sigmoid(o)); return [newC, newH]; } -var basicLSTMCell = op({basicLSTMCell_}); +var basicLSTMCell = op({ basicLSTMCell_ }); function batchToSpaceND_(x, blockShape, crops) { const $x = convertToTensor(x, "x", "batchToSpaceND"); const prod5 = blockShape.reduce((a, b) => a * b); assert($x.rank >= 1 + blockShape.length, () => `input rank is ${$x.rank} but should be > than blockShape.length ${blockShape.length}`); assert(crops.length === blockShape.length, () => `crops.length is ${crops.length} but should be equal to blockShape.length ${blockShape.length}`); assert($x.shape[0] % prod5 === 0, () => `input tensor batch is ${$x.shape[0]} but is not divisible by the product of the elements of blockShape ${blockShape.join(" * ")} === ${prod5}`); - const inputs = {x: $x}; - const attrs = {blockShape, crops}; + const inputs = { x: $x }; + const attrs = { blockShape, crops }; return ENGINE.runKernel(BatchToSpaceND, inputs, attrs); } -var batchToSpaceND = op({batchToSpaceND_}); +var batchToSpaceND = op({ batchToSpaceND_ }); function xAs4D(x) { let x4D; if (x.rank === 0 || x.rank === 1) { @@ -10609,11 +10609,11 @@ function batchNorm_(x, mean4, variance, offset, scale22, varianceEpsilon) { mean: $mean, variance: $variance }; - const attrs = {varianceEpsilon}; + const attrs = { varianceEpsilon }; const res = ENGINE.runKernel(FusedBatchNorm, inputs, attrs); return reshape(res, $x.shape); } -var batchNorm = op({batchNorm_}); +var batchNorm = op({ batchNorm_ }); function batchNorm2d_(x, mean4, variance, offset, scale22, varianceEpsilon) { const $x = convertToTensor(x, "x", "batchNorm"); const $mean = convertToTensor(mean4, "mean", "batchNorm"); @@ -10637,7 +10637,7 @@ function batchNorm2d_(x, mean4, variance, offset, scale22, varianceEpsilon) { } return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon); } -var batchNorm2d = op({batchNorm2d_}); +var batchNorm2d = op({ batchNorm2d_ }); function batchNorm3d_(x, mean4, variance, offset, scale22, varianceEpsilon) { const $x = convertToTensor(x, "x", "batchNorm"); const $mean = convertToTensor(mean4, "mean", "batchNorm"); @@ -10661,7 +10661,7 @@ function batchNorm3d_(x, mean4, variance, offset, scale22, varianceEpsilon) { } return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon); } -var batchNorm3d = op({batchNorm3d_}); +var batchNorm3d = op({ batchNorm3d_ }); function batchNorm4d_(x, mean4, variance, offset, scale22, varianceEpsilon) { const $x = convertToTensor(x, "x", "batchNorm"); const $mean = convertToTensor(mean4, "mean", "batchNorm"); @@ -10685,18 +10685,18 @@ function batchNorm4d_(x, mean4, variance, offset, scale22, varianceEpsilon) { } return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon); } -var batchNorm4d = op({batchNorm4d_}); +var batchNorm4d = op({ batchNorm4d_ }); function bincount_(x, weights, size) { const $x = convertToTensor(x, "x", "bincount"); const $weights = convertToTensor(weights, "weights", "bincount"); assert($x.dtype === "int32", () => `Error in bincount: input dtype must be int32, but got ${$x.dtype}`); assert(size >= 0, () => `size must be non-negative, but got ${size}.`); assert($weights.size === $x.size || $weights.size === 0, () => `Error in bincount: weights must have the same size as input or0-length, but got input shape: ${$x.shape}, weights shape: ${$weights.shape}.`); - const inputs = {x: $x, weights: $weights}; - const attrs = {size}; + const inputs = { x: $x, weights: $weights }; + const attrs = { size }; return ENGINE.runKernel(Bincount, inputs, attrs); } -var bincount = op({bincount_}); +var bincount = op({ bincount_ }); function broadcastTo_(x, shape) { let input2 = convertToTensor(x, "broadcastTo", "x"); const xShape = input2.shape; @@ -10726,41 +10726,41 @@ function broadcastTo_(x, shape) { if (axes.length === 0) { return clone(input2); } - const inputs = {x: input2}; - const attrs = {reps}; + const inputs = { x: input2 }; + const attrs = { reps }; return ENGINE.runKernel(Tile, inputs, attrs); } -var broadcastTo = op({broadcastTo_}); +var broadcastTo = op({ broadcastTo_ }); function ceil_(x) { const $x = convertToTensor(x, "x", "ceil"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Ceil, inputs); } -var ceil = op({ceil_}); +var ceil = op({ ceil_ }); function clipByValue_(x, clipValueMin, clipValueMax) { const $x = convertToTensor(x, "x", "clipByValue"); assert(clipValueMin <= clipValueMax, () => `Error in clip: min (${clipValueMin}) must be less than or equal to max (${clipValueMax}).`); - const inputs = {x: $x}; - const attrs = {clipValueMin, clipValueMax}; + const inputs = { x: $x }; + const attrs = { clipValueMin, clipValueMax }; return ENGINE.runKernel(ClipByValue, inputs, attrs); } -var clipByValue = op({clipByValue_}); +var clipByValue = op({ clipByValue_ }); function concat1d_(tensors) { return concat(tensors, 0); } -var concat1d = op({concat1d_}); +var concat1d = op({ concat1d_ }); function concat2d_(tensors, axis) { return concat(tensors, axis); } -var concat2d = op({concat2d_}); +var concat2d = op({ concat2d_ }); function concat3d_(tensors, axis) { return concat(tensors, axis); } -var concat3d = op({concat3d_}); +var concat3d = op({ concat3d_ }); function concat4d_(tensors, axis) { return concat(tensors, axis); } -var concat4d = op({concat4d_}); +var concat4d = op({ concat4d_ }); function conv2d_(x, filter, strides, pad3, dataFormat = "NHWC", dilations = [1, 1], dimRoundingMode) { const $x = convertToTensor(x, "x", "conv2d"); const $filter = convertToTensor(filter, "filter", "conv2d"); @@ -10778,15 +10778,15 @@ function conv2d_(x, filter, strides, pad3, dataFormat = "NHWC", dilations = [1, const inDepth = dataFormat === "NHWC" ? x4D.shape[3] : x4D.shape[1]; assert(inDepth === $filter.shape[2], () => `Error in conv2d: depth of input (${inDepth}) must match input depth for filter ${$filter.shape[2]}.`); assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in conv2D: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); - const inputs = {x: x4D, filter: $filter}; - const attrs = {strides, pad: pad3, dataFormat, dilations, dimRoundingMode}; + const inputs = { x: x4D, filter: $filter }; + const attrs = { strides, pad: pad3, dataFormat, dilations, dimRoundingMode }; const res = ENGINE.runKernel(Conv2D, inputs, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } -var conv2d = op({conv2d_}); +var conv2d = op({ conv2d_ }); function conv1d_(x, filter, stride, pad3, dataFormat = "NWC", dilation = 1, dimRoundingMode) { const $x = convertToTensor(x, "x", "conv1d"); const $filter = convertToTensor(filter, "filter", "conv1d"); @@ -10815,7 +10815,7 @@ function conv1d_(x, filter, stride, pad3, dataFormat = "NWC", dilation = 1, dimR } return reshape(res, [res.shape[0], res.shape[2], res.shape[3]]); } -var conv1d = op({conv1d_}); +var conv1d = op({ conv1d_ }); function conv2DBackpropInput_(xShape, dy, filter, strides, pad3, dataFormat = "NHWC", dimRoundingMode) { assert(xShape.length === dy.rank, () => `Length of inShape (${xShape.length}) and rank of dy (${dy.rank}) must match`); let xShape4D = xShape; @@ -10836,21 +10836,21 @@ function conv2DBackpropInput_(xShape, dy, filter, strides, pad3, dataFormat = "N if (dimRoundingMode != null) { assert(isInt(pad3), () => `Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad3}.`); } - const inputs = {dy: dy4D, filter}; - const attrs = {strides, pad: pad3, dataFormat, dimRoundingMode, inputShape: xShape4D}; + const inputs = { dy: dy4D, filter }; + const attrs = { strides, pad: pad3, dataFormat, dimRoundingMode, inputShape: xShape4D }; const res = ENGINE.runKernel(Conv2DBackpropInput, inputs, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } -var conv2DBackpropInput = op({conv2DBackpropInput_}); +var conv2DBackpropInput = op({ conv2DBackpropInput_ }); function conv2dTranspose_(x, filter, outputShape, strides, pad3, dimRoundingMode) { const $x = convertToTensor(x, "x", "conv2dTranspose"); const $filter = convertToTensor(filter, "filter", "conv2dTranspose"); return conv2DBackpropInput(outputShape, $x, $filter, strides, pad3, "NHWC", dimRoundingMode); } -var conv2dTranspose = op({conv2dTranspose_}); +var conv2dTranspose = op({ conv2dTranspose_ }); function conv3d_(x, filter, strides, pad3, dataFormat = "NDHWC", dilations = [1, 1, 1]) { const $x = convertToTensor(x, "x", "conv3d"); const $filter = convertToTensor(filter, "filter", "conv3d"); @@ -10865,15 +10865,15 @@ function conv3d_(x, filter, strides, pad3, dataFormat = "NDHWC", dilations = [1, assert(x5D.shape[4] === $filter.shape[3], () => `Error in conv3d: depth of input (${x5D.shape[4]}) must match input depth for filter ${$filter.shape[3]}.`); assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in conv3D: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); assert(dataFormat === "NDHWC", () => `Error in conv3d: got dataFormat of ${dataFormat} but only NDHWC is currently supported.`); - const inputs = {x: x5D, filter: $filter}; - const attrs = {strides, pad: pad3, dataFormat, dilations}; + const inputs = { x: x5D, filter: $filter }; + const attrs = { strides, pad: pad3, dataFormat, dilations }; const res = ENGINE.runKernel(Conv3D, inputs, attrs); if (reshapedTo5D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); } return res; } -var conv3d = op({conv3d_}); +var conv3d = op({ conv3d_ }); function conv3DBackpropInput_(xShape, dy, filter, strides, pad3) { assert(xShape.length === dy.rank, () => `Length of inShape (${xShape.length}) and rank of dy (${dy.rank}) must match`); let xShape5D = xShape; @@ -10891,40 +10891,40 @@ function conv3DBackpropInput_(xShape, dy, filter, strides, pad3) { assert(filter.rank === 5, () => `Error in conv3dDerInput: filter must be rank 5, but got rank ${filter.rank}`); assert(inDepth === filter.shape[3], () => `Error in conv3dDerInput: depth of input (${inDepth}) must match input depth for filter ${filter.shape[3]}.`); assert(outDepth === filter.shape[4], () => `Error in conv3dDerInput: depth of output (${outDepth}) must match output depth for filter ${filter.shape[4]}.`); - const inputs = {dy: dy5D, filter}; - const attrs = {pad: pad3, strides, inputShape: xShape5D}; + const inputs = { dy: dy5D, filter }; + const attrs = { pad: pad3, strides, inputShape: xShape5D }; const res = ENGINE.runKernel(Conv3DBackpropInputV2, inputs, attrs); if (reshapedTo5D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); } return res; } -var conv3DBackpropInput = op({conv3DBackpropInput_}); +var conv3DBackpropInput = op({ conv3DBackpropInput_ }); function conv3dTranspose_(x, filter, outputShape, strides, pad3) { const $x = convertToTensor(x, "x", "conv3dTranspose"); const $filter = convertToTensor(filter, "filter", "conv3dTranspose"); return conv3DBackpropInput(outputShape, $x, $filter, strides, pad3); } -var conv3dTranspose = op({conv3dTranspose_}); +var conv3dTranspose = op({ conv3dTranspose_ }); function cos_(x) { const $x = convertToTensor(x, "x", "cos"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Cos, inputs); } -var cos = op({cos_}); +var cos = op({ cos_ }); function cosh_(x) { const $x = convertToTensor(x, "x", "cosh"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Cosh, inputs); } -var cosh = op({cosh_}); +var cosh = op({ cosh_ }); function cumsum_(x, axis = 0, exclusive = false, reverse5 = false) { const $x = convertToTensor(x, "x", "cumsum"); - const inputs = {x: $x}; - const attrs = {axis, exclusive, reverse: reverse5}; + const inputs = { x: $x }; + const attrs = { axis, exclusive, reverse: reverse5 }; return ENGINE.runKernel(Cumsum, inputs, attrs); } -var cumsum = op({cumsum_}); +var cumsum = op({ cumsum_ }); function denseBincount_(x, weights, size, binaryOutput = false) { const $x = convertToTensor(x, "x", "denseBincount"); const $weights = convertToTensor(weights, "weights", "denseBincount"); @@ -10932,11 +10932,11 @@ function denseBincount_(x, weights, size, binaryOutput = false) { assert($x.rank <= 2, () => `Error in denseBincount: input must be at most rank 2, but got rank ${$x.rank}.`); assert(size >= 0, () => `size must be non-negative, but got ${size}.`); assert($weights.size === $x.size || $weights.size === 0, () => `Error in denseBincount: weights must have the same shape as x or 0-length, but got x shape: ${$x.shape}, weights shape: ${$weights.shape}.`); - const inputs = {x: $x, weights: $weights}; - const attrs = {size, binaryOutput}; + const inputs = { x: $x, weights: $weights }; + const attrs = { size, binaryOutput }; return ENGINE.runKernel(DenseBincount, inputs, attrs); } -var denseBincount = op({denseBincount_}); +var denseBincount = op({ denseBincount_ }); function depthToSpace_(x, blockSize, dataFormat = "NHWC") { const $x = convertToTensor(x, "x", "depthToSpace"); const inputHeight = dataFormat === "NHWC" ? $x.shape[1] : $x.shape[2]; @@ -10949,11 +10949,11 @@ function depthToSpace_(x, blockSize, dataFormat = "NHWC") { ${inputWidth} and ${blockSize} for depthToSpace with input shape ${$x.shape}`); assert(inputDepth % (blockSize * blockSize) === 0, () => `Dimension size must be evenly divisible by ${blockSize * blockSize} but is ${inputDepth} for depthToSpace with input shape ${$x.shape}`); - const inputs = {x: $x}; - const attrs = {blockSize, dataFormat}; + const inputs = { x: $x }; + const attrs = { blockSize, dataFormat }; return ENGINE.runKernel(DepthToSpace, inputs, attrs); } -var depthToSpace = op({depthToSpace_}); +var depthToSpace = op({ depthToSpace_ }); function depthwiseConv2d_(x, filter, strides, pad3, dataFormat = "NHWC", dilations = [1, 1], dimRoundingMode) { const $x = convertToTensor(x, "x", "depthwiseConv2d"); const $filter = convertToTensor(filter, "filter", "depthwiseConv2d"); @@ -10969,21 +10969,21 @@ function depthwiseConv2d_(x, filter, strides, pad3, dataFormat = "NHWC", dilatio if (dimRoundingMode != null) { assert(isInt(pad3), () => `Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad3}.`); } - const inputs = {x: x4D, filter: $filter}; - const attrs = {strides, pad: pad3, dataFormat, dilations, dimRoundingMode}; + const inputs = { x: x4D, filter: $filter }; + const attrs = { strides, pad: pad3, dataFormat, dilations, dimRoundingMode }; const res = ENGINE.runKernel(DepthwiseConv2dNative, inputs, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } -var depthwiseConv2d = op({depthwiseConv2d_}); +var depthwiseConv2d = op({ depthwiseConv2d_ }); function diag_(x) { const $x = convertToTensor(x, "x", "diag"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Diag, inputs); } -var diag = op({diag_}); +var diag = op({ diag_ }); function dilation2d_(x, filter, strides, pad3, dilations = [1, 1], dataFormat = "NHWC") { const $x = convertToTensor(x, "x", "dilation2d"); const $filter = convertToTensor(filter, "filter", "dilation2d"); @@ -10996,15 +10996,15 @@ function dilation2d_(x, filter, strides, pad3, dilations = [1, 1], dataFormat = x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); reshapedTo4D = true; } - const inputs = {x: x4D, filter: $filter}; - const attrs = {strides, pad: pad3, dilations}; + const inputs = { x: x4D, filter: $filter }; + const attrs = { strides, pad: pad3, dilations }; const res = ENGINE.runKernel(Dilation2D, inputs, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } -var dilation2d = op({dilation2d_}); +var dilation2d = op({ dilation2d_ }); function getBroadcastDims(inShape, outShape) { const inRank = inShape.length; const dims = []; @@ -11060,10 +11060,10 @@ function equal_(a, b) { let $b = convertToTensor(b, "b", "equal"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(Equal, inputs); } -var equal = op({equal_}); +var equal = op({ equal_ }); function where_(condition, a, b) { const $a = convertToTensor(a, "a", "where"); const $b = convertToTensor(b, "b", "where"); @@ -11079,13 +11079,13 @@ function where_(condition, a, b) { }; return ENGINE.runKernel(Select, inputs); } -var where = op({where_}); +var where = op({ where_ }); function zerosLike_(x) { const $x = convertToTensor(x, "x", "zerosLike"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(ZerosLike, inputs); } -var zerosLike = op({zerosLike_}); +var zerosLike = op({ zerosLike_ }); function divNoNan_(a, b) { let $a = convertToTensor(a, "a", "div"); let $b = convertToTensor(b, "b", "div"); @@ -11095,7 +11095,7 @@ function divNoNan_(a, b) { const bEqualsZero = equal($b, zeros4); return where(bEqualsZero, zeros4, divResult); } -var divNoNan = op({divNoNan_}); +var divNoNan = op({ divNoNan_ }); function dot_(t1, t2) { const $t1 = convertToTensor(t1, "t1", "dot"); const $t2 = convertToTensor(t2, "t2", "dot"); @@ -11123,57 +11123,57 @@ function dot_(t1, t2) { return t1t2; } } -var dot = op({dot_}); +var dot = op({ dot_ }); function einsum_(equation, ...tensors) { const $tensors = tensors.map((t, i) => convertToTensor(t, `tensors${i}`, "einsum")); - const attrs = {equation}; + const attrs = { equation }; return ENGINE.runKernel(Einsum, $tensors, attrs); } -var einsum = op({einsum_}); +var einsum = op({ einsum_ }); function elu_(x) { const $x = convertToTensor(x, "x", "elu"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Elu, inputs); } -var elu = op({elu_}); +var elu = op({ elu_ }); function erf_(x) { let $x = convertToTensor(x, "x", "erf"); assert($x.dtype === "int32" || $x.dtype === "float32", () => "Input dtype must be `int32` or `float32`."); if ($x.dtype === "int32") { $x = cast($x, "float32"); } - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Erf, inputs); } -var erf = op({erf_}); +var erf = op({ erf_ }); function exp_(x) { const $x = convertToTensor(x, "x", "exp"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Exp, inputs); } -var exp = op({exp_}); +var exp = op({ exp_ }); function expandDims_(x, axis = 0) { const $x = convertToTensor(x, "x", "expandDims", "string_or_numeric"); assert(axis <= $x.rank, () => "Axis must be <= rank of the tensor"); - const inputs = {input: $x}; - const attrs = {dim: axis}; + const inputs = { input: $x }; + const attrs = { dim: axis }; return ENGINE.runKernel(ExpandDims, inputs, attrs); } -var expandDims = op({expandDims_}); +var expandDims = op({ expandDims_ }); function expm1_(x) { const $x = convertToTensor(x, "x", "expm1"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Expm1, inputs); } -var expm1 = op({expm1_}); +var expm1 = op({ expm1_ }); function tile_(x, reps) { const $x = convertToTensor(x, "x", "tile", "string_or_numeric"); assert($x.rank === reps.length, () => `Error in transpose: rank of input ${$x.rank} must match length of reps ${reps}.`); - const inputs = {x: $x}; - const attrs = {reps}; + const inputs = { x: $x }; + const attrs = { reps }; return ENGINE.runKernel(Tile, inputs, attrs); } -var tile = op({tile_}); +var tile = op({ tile_ }); function eye_(numRows, numColumns, batchShape, dtype = "float32") { if (numColumns == null) { numColumns = numRows; @@ -11204,97 +11204,97 @@ function eye_(numRows, numColumns, batchShape, dtype = "float32") { } } } -var eye = op({eye_}); +var eye = op({ eye_ }); function fill(shape, value, dtype) { - const attrs = {shape, value, dtype}; + const attrs = { shape, value, dtype }; return ENGINE.runKernel(Fill, {}, attrs); } function floor_(x) { const $x = convertToTensor(x, "x", "floor"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Floor, inputs); } -var floor = op({floor_}); +var floor = op({ floor_ }); function gather_(x, indices, axis = 0, batchDims = 0) { const $x = convertToTensor(x, "x", "gather"); const $indices = convertToTensor(indices, "indices", "gather", "int32"); - const inputs = {x: $x, indices: $indices}; - const attrs = {axis, batchDims}; + const inputs = { x: $x, indices: $indices }; + const attrs = { axis, batchDims }; return ENGINE.runKernel(GatherV2, inputs, attrs); } -var gather = op({gather_}); +var gather = op({ gather_ }); function greater_(a, b) { let $a = convertToTensor(a, "a", "greater"); let $b = convertToTensor(b, "b", "greater"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(Greater, inputs); } -var greater = op({greater_}); +var greater = op({ greater_ }); function greaterEqual_(a, b) { let $a = convertToTensor(a, "a", "greaterEqual"); let $b = convertToTensor(b, "b", "greaterEqual"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(GreaterEqual, inputs); } -var greaterEqual = op({greaterEqual_}); +var greaterEqual = op({ greaterEqual_ }); function imag_(input2) { const $input = convertToTensor(input2, "input", "imag"); - const inputs = {input: $input}; + const inputs = { input: $input }; return ENGINE.runKernel(Imag, inputs); } -var imag = op({imag_}); +var imag = op({ imag_ }); function isFinite_(x) { const $x = convertToTensor(x, "x", "isFinite"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(IsFinite, inputs); } -var isFinite2 = op({isFinite_}); +var isFinite2 = op({ isFinite_ }); function isInf_(x) { const $x = convertToTensor(x, "x", "isInf"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(IsInf, inputs); } -var isInf = op({isInf_}); +var isInf = op({ isInf_ }); function isNaN_(x) { const $x = convertToTensor(x, "x", "isNaN"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(IsNan, inputs); } -var isNaN2 = op({isNaN_}); +var isNaN2 = op({ isNaN_ }); function leakyRelu_(x, alpha = 0.2) { const $x = convertToTensor(x, "x", "leakyRelu"); - const inputs = {x: $x}; - const attrs = {alpha}; + const inputs = { x: $x }; + const attrs = { alpha }; return ENGINE.runKernel(LeakyRelu, inputs, attrs); } -var leakyRelu = op({leakyRelu_}); +var leakyRelu = op({ leakyRelu_ }); function less_(a, b) { let $a = convertToTensor(a, "a", "less"); let $b = convertToTensor(b, "b", "less"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(Less, inputs); } -var less = op({less_}); +var less = op({ less_ }); function lessEqual_(a, b) { let $a = convertToTensor(a, "a", "lessEqual"); let $b = convertToTensor(b, "b", "lessEqual"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(LessEqual, inputs); } -var lessEqual = op({lessEqual_}); +var lessEqual = op({ lessEqual_ }); function linspace(start, stop, num) { if (num <= 0) { throw new Error("The number of values should be positive."); } - const attrs = {start, stop, num}; + const attrs = { start, stop, num }; return ENGINE.runKernel(LinSpace, {}, attrs); } function localResponseNormalization_(x, depthRadius = 5, bias = 1, alpha = 1, beta = 0.5) { @@ -11308,8 +11308,8 @@ function localResponseNormalization_(x, depthRadius = 5, bias = 1, alpha = 1, be reshapedTo4D = true; x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); } - const inputs = {x: x4D}; - const attrs = {depthRadius, bias, alpha, beta}; + const inputs = { x: x4D }; + const attrs = { depthRadius, bias, alpha, beta }; const res = ENGINE.runKernel(LRN, inputs, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); @@ -11317,26 +11317,26 @@ function localResponseNormalization_(x, depthRadius = 5, bias = 1, alpha = 1, be return res; } } -var localResponseNormalization = op({localResponseNormalization_}); +var localResponseNormalization = op({ localResponseNormalization_ }); function log_(x) { const $x = convertToTensor(x, "x", "log"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Log, inputs); } -var log = op({log_}); +var log = op({ log_ }); function log1p_(x) { const $x = convertToTensor(x, "x", "log1p"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Log1p, inputs); } -var log1p = op({log1p_}); +var log1p = op({ log1p_ }); function grad(f) { assert(isFunction(f), () => "The f passed in grad(f) must be a function"); return (x, dy) => { const $x = convertToTensor(x, "x", "tf.grad", "string_or_numeric"); const $dy = dy != null ? convertToTensor(dy, "dy", "tf.grad") : null; return ENGINE.tidy(() => { - const {value, grads: grads2} = ENGINE.gradients(() => f($x), [$x], $dy); + const { value, grads: grads2 } = ENGINE.gradients(() => f($x), [$x], $dy); if ($dy != null) { assertShapesMatch(value.shape, $dy.shape, "The shape of dy passed in grad(f)(x, dy) must match the shape returned by f(x)"); } @@ -11352,7 +11352,7 @@ function grads(f) { const $args = convertToTensorArray(args, "args", "tf.grads", "string_or_numeric"); const $dy = dy != null ? convertToTensor(dy, "dy", "tf.grads") : null; return ENGINE.tidy(() => { - const {value, grads: grads2} = ENGINE.gradients(() => f(...$args), $args, $dy); + const { value, grads: grads2 } = ENGINE.gradients(() => f(...$args), $args, $dy); if ($dy != null) { assertShapesMatch(value.shape, $dy.shape, "The shape of dy passed in grads(f)([x1,...], dy) must match the shape returned by f([x1,...])"); } @@ -11366,9 +11366,9 @@ function valueAndGrad(f) { return (x, dy) => { assert(x instanceof Tensor, () => "The x passed in valueAndGrad(f)(x) must be a tensor"); assert(dy == null || dy instanceof Tensor, () => "The dy passed in valueAndGrad(f)(x, dy) must be a tensor"); - const {grads: grads2, value} = ENGINE.gradients(() => f(x), [x], dy); + const { grads: grads2, value } = ENGINE.gradients(() => f(x), [x], dy); checkGrads(grads2); - return {grad: grads2[0], value}; + return { grad: grads2[0], value }; }; } function valueAndGrads(f) { @@ -11399,7 +11399,7 @@ function variableGrads(f, varList) { varList = varList.filter((variable2) => variable2.trainable); assert(varList.length > 0, () => `variableGrads() expects at least one of the input variables to be trainable, but none of the ${originalVarCount} variables is trainable.`); const allowNoGradients = true; - const {value, grads: grads2} = ENGINE.gradients(f, varList, null, allowNoGradients); + const { value, grads: grads2 } = ENGINE.gradients(f, varList, null, allowNoGradients); assert(grads2.some((g) => g != null), () => "Cannot find a connection between any variable and the result of the loss function y=f(x). Please make sure the operations that use variables are inside the function f passed to minimize()."); assert(value.rank === 0, () => `The f passed in variableGrads(f) must return a scalar, but it returned a rank-${value.rank} tensor`); const namedGrads = {}; @@ -11411,7 +11411,7 @@ function variableGrads(f, varList) { if (specifiedNonTrainable != null) { specifiedNonTrainable.forEach((v) => namedGrads[v.name] = null); } - return {value, grads: namedGrads}; + return { value, grads: namedGrads }; } function customGrad(f) { return ENGINE.customGrad(f); @@ -11425,16 +11425,16 @@ function checkGrads(grads2) { } function neg_(x) { const $x = convertToTensor(x, "x", "neg"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Neg, inputs); } -var neg = op({neg_}); +var neg = op({ neg_ }); function softplus_(x) { const $x = convertToTensor(x, "x", "softplus"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Softplus, inputs); } -var softplus = op({softplus_}); +var softplus = op({ softplus_ }); function logSigmoid_(x) { const $x = convertToTensor(x, "x", "logSigmoid"); const customOp = customGrad((x2) => { @@ -11443,36 +11443,36 @@ function logSigmoid_(x) { const derX = mul(dy, sigmoid(neg(x2))); return derX; }; - return {value, gradFunc}; + return { value, gradFunc }; }); return customOp($x); } -var logSigmoid = op({logSigmoid_}); +var logSigmoid = op({ logSigmoid_ }); function max_(x, axis = null, keepDims = false) { const $x = convertToTensor(x, "x", "max"); - const inputs = {x: $x}; - const attrs = {reductionIndices: axis, keepDims}; + const inputs = { x: $x }; + const attrs = { reductionIndices: axis, keepDims }; return ENGINE.runKernel(Max, inputs, attrs); } -var max = op({max_}); +var max = op({ max_ }); function sub_(a, b) { let $a = convertToTensor(a, "a", "sub"); let $b = convertToTensor(b, "b", "sub"); [$a, $b] = makeTypesMatch($a, $b); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(Sub, inputs); } -var sub = op({sub_}); +var sub = op({ sub_ }); function sum_(x, axis = null, keepDims = false) { let $x = convertToTensor(x, "x", "sum"); if ($x.dtype === "bool") { $x = cast($x, "int32"); } - const inputs = {x: $x}; - const attrs = {axis, keepDims}; + const inputs = { x: $x }; + const attrs = { axis, keepDims }; return ENGINE.runKernel(Sum, inputs, attrs); } -var sum2 = op({sum_}); +var sum2 = op({ sum_ }); function logSoftmax_(logits, axis = -1) { const $logits = convertToTensor(logits, "logits", "logSoftmax"); if (axis === -1) { @@ -11493,11 +11493,11 @@ function logSoftmax_(logits, axis = -1) { const softmax6 = exp(value2); return sub(dy, mul(sum2(dy, axis, keepDims2), softmax6)); }; - return {value, gradFunc}; + return { value, gradFunc }; }); return customOp($logits); } -var logSoftmax = op({logSoftmax_}); +var logSoftmax = op({ logSoftmax_ }); function axesAreInnerMostDims(axes, rank) { for (let i = 0; i < axes.length; ++i) { if (axes[axes.length - i - 1] !== rank - 1 - i) { @@ -11576,36 +11576,36 @@ function logSumExp_(x, axis = null, keepDims = false) { } return res; } -var logSumExp = op({logSumExp_}); +var logSumExp = op({ logSumExp_ }); function logicalAnd_(a, b) { const $a = convertToTensor(a, "a", "logicalAnd", "bool"); const $b = convertToTensor(b, "b", "logicalAnd", "bool"); assertAndGetBroadcastShape($a.shape, $b.shape); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(LogicalAnd, inputs); } -var logicalAnd = op({logicalAnd_}); +var logicalAnd = op({ logicalAnd_ }); function logicalNot_(x) { const $x = convertToTensor(x, "x", "logicalNot", "bool"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(LogicalNot, inputs); } -var logicalNot = op({logicalNot_}); +var logicalNot = op({ logicalNot_ }); function logicalOr_(a, b) { const $a = convertToTensor(a, "a", "logicalOr", "bool"); const $b = convertToTensor(b, "b", "logicalOr", "bool"); assertAndGetBroadcastShape($a.shape, $b.shape); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(LogicalOr, inputs); } -var logicalOr = op({logicalOr_}); +var logicalOr = op({ logicalOr_ }); function logicalXor_(a, b) { const $a = convertToTensor(a, "a", "logicalXor", "bool"); const $b = convertToTensor(b, "b", "logicalXor", "bool"); assertAndGetBroadcastShape($a.shape, $b.shape); return logicalAnd(logicalOr(a, b), logicalNot(logicalAnd(a, b))); } -var logicalXor = op({logicalXor_}); +var logicalXor = op({ logicalXor_ }); function maxPool_(x, filterSize, strides, pad3, dimRoundingMode) { const $x = convertToTensor(x, "x", "maxPool"); const dilations = 1; @@ -11620,15 +11620,15 @@ function maxPool_(x, filterSize, strides, pad3, dimRoundingMode) { if (dimRoundingMode != null) { assert(isInt(pad3), () => `Error in maxPool: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad3}.`); } - const inputs = {x: x4D}; - const attrs = {filterSize, strides, pad: pad3, dimRoundingMode}; + const inputs = { x: x4D }; + const attrs = { filterSize, strides, pad: pad3, dimRoundingMode }; const res = ENGINE.runKernel(MaxPool, inputs, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } -var maxPool = op({maxPool_}); +var maxPool = op({ maxPool_ }); function maxPool3d_(x, filterSize = [1, 1, 1], strides, pad3, dimRoundingMode, dataFormat = "NDHWC") { const $x = convertToTensor(x, "x", "maxPool3d"); let x5D = $x; @@ -11642,23 +11642,23 @@ function maxPool3d_(x, filterSize = [1, 1, 1], strides, pad3, dimRoundingMode, d if (dimRoundingMode != null) { assert(isInt(pad3), () => `Error in maxPool3d: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad3}.`); } - const inputs = {x: x5D}; - const attrs = {filterSize, strides, pad: pad3, dimRoundingMode, dataFormat}; + const inputs = { x: x5D }; + const attrs = { filterSize, strides, pad: pad3, dimRoundingMode, dataFormat }; const res = ENGINE.runKernel(MaxPool3D, inputs, attrs); if (reshapedTo5D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); } return res; } -var maxPool3d = op({maxPool3d_}); +var maxPool3d = op({ maxPool3d_ }); function maxPoolWithArgmax_(x, filterSize, strides, pad3, includeBatchInIndex = false) { const $x = convertToTensor(x, "x", "maxPoolWithArgmax"); - const inputs = {x: $x}; - const attrs = {filterSize, strides, pad: pad3, includeBatchInIndex}; + const inputs = { x: $x }; + const attrs = { filterSize, strides, pad: pad3, includeBatchInIndex }; const result = ENGINE.runKernel(MaxPoolWithArgmax, inputs, attrs); - return {result: result[0], indexes: result[1]}; + return { result: result[0], indexes: result[1] }; } -var maxPoolWithArgmax = op({maxPoolWithArgmax_}); +var maxPoolWithArgmax = op({ maxPoolWithArgmax_ }); function maximum_(a, b) { let $a = convertToTensor(a, "a", "maximum"); let $b = convertToTensor(b, "b", "maximum"); @@ -11668,17 +11668,17 @@ function maximum_(a, b) { $b = cast($b, "int32"); } assertAndGetBroadcastShape($a.shape, $b.shape); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(Maximum, inputs); } -var maximum = op({maximum_}); +var maximum = op({ maximum_ }); function mean_(x, axis = null, keepDims = false) { const $x = convertToTensor(x, "x", "mean"); - const inputs = {x: $x}; - const attrs = {axis, keepDims}; + const inputs = { x: $x }; + const attrs = { axis, keepDims }; return ENGINE.runKernel(Mean, inputs, attrs); } -var mean = op({mean_}); +var mean = op({ mean_ }); function zeros(shape, dtype = "float32") { if (dtype === "complex64") { const real4 = zeros(shape, "float32"); @@ -11697,7 +11697,7 @@ function ones2(shape, dtype = "float32") { const values = makeOnesTypedArray(sizeFromShape(shape), dtype); return ENGINE.makeTensor(values, shape, dtype); } -function meshgrid(x, y, {indexing = "xy"} = {}) { +function meshgrid(x, y, { indexing = "xy" } = {}) { if (indexing !== "xy" && indexing !== "ij") { throw new TypeError(`${indexing} is not a valid third argument to meshgrid`); } @@ -11728,11 +11728,11 @@ function meshgrid(x, y, {indexing = "xy"} = {}) { } function min_(x, axis = null, keepDims = false) { const $x = convertToTensor(x, "x", "min"); - const inputs = {x: $x}; - const attrs = {axis, keepDims}; + const inputs = { x: $x }; + const attrs = { axis, keepDims }; return ENGINE.runKernel(Min, inputs, attrs); } -var min = op({min_}); +var min = op({ min_ }); function minimum_(a, b) { let $a = convertToTensor(a, "a", "minimum"); let $b = convertToTensor(b, "b", "minimum"); @@ -11742,10 +11742,10 @@ function minimum_(a, b) { $b = cast($b, "int32"); } assertAndGetBroadcastShape($a.shape, $b.shape); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(Minimum, inputs); } -var minimum = op({minimum_}); +var minimum = op({ minimum_ }); function mirrorPad_(x, paddings, mode) { assert(mode === "reflect" || mode === "symmetric", () => `Invalid mode. Mode must be either reflect or symmetric. Got ${mode}.`); const $x = convertToTensor(x, "x", "mirrorPad"); @@ -11758,25 +11758,25 @@ function mirrorPad_(x, paddings, mode) { assert(paddings[i].length === 2, () => `Invalid number of paddings. Must be length of 2 each.`); assert(paddings[i][0] >= 0 && paddings[i][0] <= $x.shape[i] - shapeOffset && paddings[i][1] >= 0 && paddings[i][1] <= $x.shape[i] - shapeOffset, () => `Padding in dimension ${i} cannot be greater than or equal to ${$x.shape[i] - shapeOffset} or less than 0 for input of shape ${$x.shape}`); } - const attrs = {paddings, mode}; - const inputs = {x: $x}; + const attrs = { paddings, mode }; + const inputs = { x: $x }; return ENGINE.runKernel(MirrorPad, inputs, attrs); } -var mirrorPad = op({mirrorPad_}); +var mirrorPad = op({ mirrorPad_ }); function mod_(a, b) { let $a = convertToTensor(a, "a", "mod"); let $b = convertToTensor(b, "b", "mod"); [$a, $b] = makeTypesMatch($a, $b); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(Mod, inputs); } -var mod = op({mod_}); +var mod = op({ mod_ }); function square_(x) { const $x = convertToTensor(x, "x", "square"); const attrs = {}; - return ENGINE.runKernel("Square", {x: $x}, attrs); + return ENGINE.runKernel("Square", { x: $x }, attrs); } -var square = op({square_}); +var square = op({ square_ }); function moments_(x, axis = null, keepDims = false) { x = convertToTensor(x, "x", "moments"); const axes = parseAxisParam(axis, x.shape); @@ -11787,9 +11787,9 @@ function moments_(x, axis = null, keepDims = false) { } const devSquared = square(sub(cast(x, "float32"), reshape(xMean, keepDimsShape))); const variance = mean(devSquared, axes, keepDims); - return {mean: xMean, variance}; + return { mean: xMean, variance }; } -var moments = op({moments_}); +var moments = op({ moments_ }); function multiRNNCell_(lstmCells, data, c, h) { const $data = convertToTensor(data, "data", "multiRNNCell"); const $c = convertToTensorArray(c, "c", "multiRNNCell"); @@ -11810,7 +11810,7 @@ function multiRNNCell_(lstmCells, data, c, h) { } return [newC, newH]; } -var multiRNNCell = op({multiRNNCell_}); +var multiRNNCell = op({ multiRNNCell_ }); function multinomial_(logits, numSamples, seed, normalized = false) { const $logits = convertToTensor(logits, "logits", "multinomial"); const numOutcomes = $logits.size; @@ -11823,27 +11823,27 @@ function multinomial_(logits, numSamples, seed, normalized = false) { } seed = seed || Math.random(); const logits2D = origRank === 1 ? reshape($logits, [1, -1]) : $logits; - const inputs = {logits: logits2D}; - const attrs = {numSamples, seed, normalized}; + const inputs = { logits: logits2D }; + const attrs = { numSamples, seed, normalized }; const res = ENGINE.runKernel(Multinomial, inputs, attrs); return origRank === 1 ? reshape(res, [res.size]) : res; } -var multinomial = op({multinomial_}); +var multinomial = op({ multinomial_ }); function notEqual_(a, b) { let $a = convertToTensor(a, "a", "notEqual"); let $b = convertToTensor(b, "b", "notEqual"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; return ENGINE.runKernel(NotEqual, inputs); } -var notEqual = op({notEqual_}); +var notEqual = op({ notEqual_ }); function onesLike_(x) { const $x = convertToTensor(x, "x", "onesLike"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(OnesLike, inputs); } -var onesLike = op({onesLike_}); +var onesLike = op({ onesLike_ }); function outerProduct_(v1, v2) { const $v1 = convertToTensor(v1, "v1", "outerProduct"); const $v2 = convertToTensor(v2, "v2", "outerProduct"); @@ -11852,37 +11852,37 @@ function outerProduct_(v1, v2) { const v22D = reshape($v2, [1, -1]); return matMul(v12D, v22D); } -var outerProduct = op({outerProduct_}); +var outerProduct = op({ outerProduct_ }); function pad_(x, paddings, constantValue = 0) { const $x = convertToTensor(x, "x", "pad"); if ($x.rank === 0) { throw new Error("pad(scalar) is not defined. Pass non-scalar to pad"); } - const attrs = {paddings, constantValue}; - const inputs = {x: $x}; + const attrs = { paddings, constantValue }; + const inputs = { x: $x }; return ENGINE.runKernel(PadV2, inputs, attrs); } -var pad = op({pad_}); +var pad = op({ pad_ }); function pad1d_(x, paddings, constantValue = 0) { assert(paddings.length === 2, () => "Invalid number of paddings. Must be length of 2."); return pad(x, [paddings], constantValue); } -var pad1d = op({pad1d_}); +var pad1d = op({ pad1d_ }); function pad2d_(x, paddings, constantValue = 0) { assert(paddings.length === 2 && paddings[0].length === 2 && paddings[1].length === 2, () => "Invalid number of paddings. Must be length of 2 each."); return pad(x, paddings, constantValue); } -var pad2d = op({pad2d_}); +var pad2d = op({ pad2d_ }); function pad3d_(x, paddings, constantValue = 0) { assert(paddings.length === 3 && paddings[0].length === 2 && paddings[1].length === 2 && paddings[2].length === 2, () => "Invalid number of paddings. Must be length of 2 each."); return pad(x, paddings, constantValue); } -var pad3d = op({pad3d_}); +var pad3d = op({ pad3d_ }); function pad4d_(x, paddings, constantValue = 0) { assert(paddings.length === 4 && paddings[0].length === 2 && paddings[1].length === 2 && paddings[2].length === 2 && paddings[3].length === 2, () => "Invalid number of paddings. Must be length of 2 each."); return pad(x, paddings, constantValue); } -var pad4d = op({pad4d_}); +var pad4d = op({ pad4d_ }); function spaceToBatchND_(x, blockShape, paddings) { const $x = convertToTensor(x, "x", "spaceToBatchND"); assert($x.rank >= 1 + blockShape.length, () => `input rank ${$x.rank} should be > than [blockShape] ${blockShape.length}`); @@ -11893,11 +11893,11 @@ function spaceToBatchND_(x, blockShape, paddings) { } return a; }, true), () => `input spatial dimensions ${$x.shape.slice(1)} with paddings ${paddings.toString()} must be divisible by blockShapes ${blockShape.toString()}`); - const inputs = {x: $x}; - const attrs = {blockShape, paddings}; + const inputs = { x: $x }; + const attrs = { blockShape, paddings }; return ENGINE.runKernel(SpaceToBatchND, inputs, attrs); } -var spaceToBatchND = op({spaceToBatchND_}); +var spaceToBatchND = op({ spaceToBatchND_ }); function pool_(input2, windowShape, poolingType, pad3, dilations, strides) { if (dilations == null) { dilations = [1, 1]; @@ -11957,32 +11957,32 @@ function withSpaceToBatchBasePaddings(filterShape, dilation) { return [padExtraStart[i], padExtraEnd[i]]; }); } -var pool = op({pool_}); +var pool = op({ pool_ }); function pow_(base2, exp4) { let $base = convertToTensor(base2, "base", "pow"); let $exp = convertToTensor(exp4, "exp", "pow"); [$base, $exp] = makeTypesMatch($base, $exp); - const inputs = {a: $base, b: $exp}; + const inputs = { a: $base, b: $exp }; return ENGINE.runKernel(Pow, inputs); } -var pow = op({pow_}); +var pow = op({ pow_ }); function prelu_(x, alpha) { const $x = convertToTensor(x, "x", "prelu"); const $alpha = convertToTensor(alpha, "alpha", "prelu"); - const inputs = {x: $x, alpha: $alpha}; + const inputs = { x: $x, alpha: $alpha }; return ENGINE.runKernel(Prelu, inputs); } -var prelu = op({prelu_}); +var prelu = op({ prelu_ }); function prod_(x, axis = null, keepDims = false) { let $x = convertToTensor(x, "x", "prod"); if ($x.dtype === "bool") { $x = cast($x, "int32"); } - const inputs = {x: $x}; - const attrs = {axis, keepDims}; + const inputs = { x: $x }; + const attrs = { axis, keepDims }; return ENGINE.runKernel(Prod, inputs, attrs); } -var prod = op({prod_}); +var prod = op({ prod_ }); function rand_(shape, randFunction, dtype) { const size = sizeFromShape(shape); let values = null; @@ -12000,7 +12000,7 @@ function rand_(shape, randFunction, dtype) { } return ENGINE.makeTensor(values, shape, dtype); } -var rand = op({rand_}); +var rand = op({ rand_ }); var seedrandom = __toModule(require_seedrandom2()); var MPRandGauss = class { constructor(mean4, stdDeviation, dtype, truncated, seed) { @@ -12141,7 +12141,7 @@ function randomGamma_(shape, alpha, beta = 1, dtype = "float32", seed) { } return res.toTensor(); } -var randomGamma = op({randomGamma_}); +var randomGamma = op({ randomGamma_ }); function randomNormal_(shape, mean4 = 0, stdDev = 1, dtype, seed) { if (dtype != null && dtype === "bool") { throw new Error(`Unsupported data type ${dtype}`); @@ -12153,7 +12153,7 @@ function randomNormal_(shape, mean4 = 0, stdDev = 1, dtype, seed) { } return res.toTensor(); } -var randomNormal = op({randomNormal_}); +var randomNormal = op({ randomNormal_ }); function randomUniform_(shape, minval = 0, maxval = 1, dtype = "float32", seed) { const res = buffer(shape, dtype); const random = new UniformRandom(minval, maxval, null, seed); @@ -12162,81 +12162,81 @@ function randomUniform_(shape, minval = 0, maxval = 1, dtype = "float32", seed) } return res.toTensor(); } -var randomUniform = op({randomUniform_}); +var randomUniform = op({ randomUniform_ }); function range(start, stop, step5 = 1, dtype = "float32") { if (step5 === 0) { throw new Error("Cannot have a step of zero"); } - const attrs = {start, stop, step: step5, dtype}; + const attrs = { start, stop, step: step5, dtype }; return ENGINE.runKernel(Range, {}, attrs); } function real_(input2) { const $input = convertToTensor(input2, "input", "real"); - const inputs = {input: $input}; + const inputs = { input: $input }; return ENGINE.runKernel(Real, inputs); } -var real = op({real_}); +var real = op({ real_ }); function reciprocal_(x) { const $x = convertToTensor(x, "x", "reciprocal"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Reciprocal, inputs); } -var reciprocal = op({reciprocal_}); +var reciprocal = op({ reciprocal_ }); function relu_(x) { const $x = convertToTensor(x, "x", "relu"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Relu, inputs); } -var relu = op({relu_}); +var relu = op({ relu_ }); function relu6_(x) { const $x = convertToTensor(x, "x", "relu6"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Relu6, inputs); } -var relu6 = op({relu6_}); +var relu6 = op({ relu6_ }); function reverse_(x, axis) { const $x = convertToTensor(x, "x", "reverse"); - const inputs = {x: $x}; - const attrs = {dims: axis}; + const inputs = { x: $x }; + const attrs = { dims: axis }; return ENGINE.runKernel(Reverse, inputs, attrs); } -var reverse = op({reverse_}); +var reverse = op({ reverse_ }); function reverse1d_(x) { const $x = convertToTensor(x, "x", "reverse"); assert($x.rank === 1, () => `Error in reverse1D: x must be rank 1 but got rank ${$x.rank}.`); return reverse($x, 0); } -var reverse1d = op({reverse1d_}); +var reverse1d = op({ reverse1d_ }); function reverse2d_(x, axis) { const $x = convertToTensor(x, "x", "reverse"); assert($x.rank === 2, () => `Error in reverse2D: x must be rank 2 but got rank ${$x.rank}.`); return reverse($x, axis); } -var reverse2d = op({reverse2d_}); +var reverse2d = op({ reverse2d_ }); function reverse3d_(x, axis) { const $x = convertToTensor(x, "x", "reverse"); assert($x.rank === 3, () => `Error in reverse3D: x must be rank 3 but got rank ${$x.rank}.`); return reverse($x, axis); } -var reverse3d = op({reverse3d_}); +var reverse3d = op({ reverse3d_ }); function reverse4d_(x, axis) { const $x = convertToTensor(x, "x", "reverse"); assert($x.rank === 4, () => `Error in reverse4D: x must be rank 4 but got rank ${$x.rank}.`); return reverse($x, axis); } -var reverse4d = op({reverse4d_}); +var reverse4d = op({ reverse4d_ }); function round_(x) { const $x = convertToTensor(x, "x", "round"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Round, inputs); } -var round2 = op({round_}); +var round2 = op({ round_ }); function rsqrt_(x) { const $x = convertToTensor(x, "x", "rsqrt"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Rsqrt, inputs); } -var rsqrt = op({rsqrt_}); +var rsqrt = op({ rsqrt_ }); function scalar(value, dtype) { if ((isTypedArray(value) && dtype !== "string" || Array.isArray(value)) && dtype !== "complex64") { throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)"); @@ -12250,10 +12250,10 @@ function scalar(value, dtype) { } function selu_(x) { const $x = convertToTensor(x, "x", "selu"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Selu, inputs); } -var selu = op({selu_}); +var selu = op({ selu_ }); function separableConv2d_(x, depthwiseFilter, pointwiseFilter, strides, pad3, dilation = [1, 1], dataFormat = "NHWC") { const $x = convertToTensor(x, "x", "separableConv2d"); const $depthwiseFilter = convertToTensor(depthwiseFilter, "depthwiseFilter", "separableConv2d"); @@ -12283,7 +12283,7 @@ function separableConv2d_(x, depthwiseFilter, pointwiseFilter, strides, pad3, di } return res; } -var separableConv2d = op({separableConv2d_}); +var separableConv2d = op({ separableConv2d_ }); async function setdiff1dAsync_(x, y) { const $x = convertToTensor(x, "x", "setdiff1d"); const $y = convertToTensor(y, "y", "setdiff1d"); @@ -12313,46 +12313,46 @@ async function setdiff1dAsync_(x, y) { var setdiff1dAsync = setdiff1dAsync_; function sign_(x) { const $x = convertToTensor(x, "x", "sign"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Sign, inputs); } -var sign = op({sign_}); +var sign = op({ sign_ }); function sin_(x) { const $x = convertToTensor(x, "x", "sin"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Sin, inputs); } -var sin = op({sin_}); +var sin = op({ sin_ }); function sinh_(x) { const $x = convertToTensor(x, "x", "sinh"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Sinh, inputs); } -var sinh = op({sinh_}); +var sinh = op({ sinh_ }); function slice1d_(x, begin, size) { const $x = convertToTensor(x, "x", "slice1d"); assert($x.rank === 1, () => `slice1d expects a rank-1 tensor, but got a rank-${$x.rank} tensor`); return slice($x, [begin], [size]); } -var slice1d = op({slice1d_}); +var slice1d = op({ slice1d_ }); function slice2d_(x, begin, size) { const $x = convertToTensor(x, "x", "slice2d"); assert($x.rank === 2, () => `slice2d expects a rank-2 tensor, but got a rank-${$x.rank} tensor`); return slice($x, begin, size); } -var slice2d = op({slice2d_}); +var slice2d = op({ slice2d_ }); function slice3d_(x, begin, size) { const $x = convertToTensor(x, "x", "slice3d"); assert($x.rank === 3, () => `slice3d expects a rank-3 tensor, but got a rank-${$x.rank} tensor`); return slice($x, begin, size); } -var slice3d = op({slice3d_}); +var slice3d = op({ slice3d_ }); function slice4d_(x, begin, size) { const $x = convertToTensor(x, "x", "slice4d"); assert($x.rank === 4, () => `slice4d expects a rank-4 tensor, but got a rank-${$x.rank} tensor`); return slice($x, begin, size); } -var slice4d = op({slice4d_}); +var slice4d = op({ slice4d_ }); function softmax_(logits, dim = -1) { const $logits = convertToTensor(logits, "logits", "softmax", "float32"); if (dim === -1) { @@ -12361,23 +12361,23 @@ function softmax_(logits, dim = -1) { if (dim !== $logits.rank - 1) { throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${$logits.rank} and dim was ${dim}`); } - const inputs = {logits: $logits}; - const attrs = {dim}; + const inputs = { logits: $logits }; + const attrs = { dim }; return ENGINE.runKernel(Softmax, inputs, attrs); } -var softmax = op({softmax_}); +var softmax = op({ softmax_ }); function fft_(input2) { assert(input2.dtype === "complex64", () => `The dtype for tf.spectral.fft() must be complex64 but got ${input2.dtype}.`); - const inputs = {input: input2}; + const inputs = { input: input2 }; return ENGINE.runKernel(FFT, inputs); } -var fft = op({fft_}); +var fft = op({ fft_ }); function ifft_(input2) { assert(input2.dtype === "complex64", () => `The dtype for tf.spectral.ifft() must be complex64 but got ${input2.dtype}.`); - const inputs = {input: input2}; + const inputs = { input: input2 }; return ENGINE.runKernel(IFFT, inputs); } -var ifft = op({ifft_}); +var ifft = op({ ifft_ }); function irfft_(input2) { const innerDimensionSize = input2.shape[input2.shape.length - 1]; const batch = input2.size / innerDimensionSize; @@ -12405,14 +12405,14 @@ function irfft_(input2) { } return ret; } -var irfft = op({irfft_}); +var irfft = op({ irfft_ }); function split_(x, numOrSizeSplits, axis = 0) { const $x = convertToTensor(x, "x", "split"); - const inputs = {x: $x}; - const attr = {numOrSizeSplits, axis}; + const inputs = { x: $x }; + const attr = { numOrSizeSplits, axis }; return ENGINE.runKernel(SplitV, inputs, attr); } -var split = op({split_}); +var split = op({ split_ }); function rfft_(input2, fftLength) { assert(input2.dtype === "float32", () => `The dtype for rfft() must be real value but got ${input2.dtype}`); let innerDimensionSize = input2.shape[input2.shape.length - 1]; @@ -12444,28 +12444,28 @@ function rfft_(input2, fftLength) { outputShape[adjustedInput.shape.length - 1] = half; return reshape(complex(realComplexConjugate[0], imagComplexConjugate[0]), outputShape); } -var rfft = op({rfft_}); +var rfft = op({ rfft_ }); function sqrt_(x) { const $x = convertToTensor(x, "x", "sqrt"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Sqrt, inputs); } -var sqrt = op({sqrt_}); +var sqrt = op({ sqrt_ }); function squaredDifference_(a, b) { let $a = convertToTensor(a, "a", "squaredDifference"); let $b = convertToTensor(b, "b", "squaredDifference"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); - const inputs = {a: $a, b: $b}; + const inputs = { a: $a, b: $b }; const attrs = {}; return ENGINE.runKernel(SquaredDifference, inputs, attrs); } -var squaredDifference = op({squaredDifference_}); +var squaredDifference = op({ squaredDifference_ }); function squeeze_(x, axis) { const $x = convertToTensor(x, "x", "squeeze"); return reshape($x, squeezeShape($x.shape, axis).newShape); } -var squeeze = op({squeeze_}); +var squeeze = op({ squeeze_ }); function stack_(tensors, axis = 0) { const $tensors = convertToTensorArray(tensors, "tensors", "stack", "string_or_numeric"); assert($tensors.length >= 1, () => "Pass at least one tensor to tf.stack"); @@ -12473,20 +12473,20 @@ function stack_(tensors, axis = 0) { assert(axis <= $tensors[0].rank, () => "Axis must be <= rank of the tensor"); } const inputs = $tensors; - const attrs = {axis}; + const attrs = { axis }; return ENGINE.runKernel(Pack, inputs, attrs); } -var stack = op({stack_}); +var stack = op({ stack_ }); function step_(x, alpha = 0) { const $x = convertToTensor(x, "x", "step"); - const inputs = {x: $x}; - const attrs = {alpha}; + const inputs = { x: $x }; + const attrs = { alpha }; return ENGINE.runKernel(Step, inputs, attrs); } -var step = op({step_}); +var step = op({ step_ }); function stridedSlice_(x, begin, end, strides, beginMask = 0, endMask = 0, ellipsisMask = 0, newAxisMask = 0, shrinkAxisMask = 0) { const $x = convertToTensor(x, "x", "stridedSlice"); - const inputs = {x: $x}; + const inputs = { x: $x }; const attrs = { begin, end, @@ -12499,13 +12499,13 @@ function stridedSlice_(x, begin, end, strides, beginMask = 0, endMask = 0, ellip }; return ENGINE.runKernel(StridedSlice, inputs, attrs); } -var stridedSlice = op({stridedSlice_}); +var stridedSlice = op({ stridedSlice_ }); function tan_(x) { const $x = convertToTensor(x, "x", "tan"); - const inputs = {x: $x}; + const inputs = { x: $x }; return ENGINE.runKernel(Tan, inputs); } -var tan = op({tan_}); +var tan = op({ tan_ }); function tensor1d(values, dtype) { assertNonNull(values); const inferredShape = inferShape(values, dtype); @@ -12581,12 +12581,12 @@ function topk_(x, k = 1, sorted = true) { if (k > lastDim) { throw new Error(`'k' passed to topk() must be <= the last dimension (${lastDim}) but got ${k}`); } - const inputs = {x: $x}; - const attrs = {k, sorted}; + const inputs = { x: $x }; + const attrs = { k, sorted }; const [values, indices] = ENGINE.runKernel(TopK, inputs, attrs); - return {values, indices}; + return { values, indices }; } -var topk = op({topk_}); +var topk = op({ topk_ }); function truncatedNormal_(shape, mean4 = 0, stdDev = 1, dtype, seed) { if (dtype != null && dtype === "bool") { throw new Error(`Unsupported data type $ { dtype }`); @@ -12598,33 +12598,33 @@ function truncatedNormal_(shape, mean4 = 0, stdDev = 1, dtype, seed) { } return res.toTensor(); } -var truncatedNormal = op({truncatedNormal_}); +var truncatedNormal = op({ truncatedNormal_ }); function unique_(x, axis = 0) { const $x = convertToTensor(x, "x", "unique", "string_or_numeric"); assert($x.rank > 0, () => "The input tensor must be at least 1D"); - const inputs = {x: $x}; - const attrs = {axis}; + const inputs = { x: $x }; + const attrs = { axis }; const [values, indices] = ENGINE.runKernel(Unique, inputs, attrs); - return {values, indices}; + return { values, indices }; } -var unique = op({unique_}); +var unique = op({ unique_ }); function unsortedSegmentSum_(x, segmentIds, numSegments) { const $x = convertToTensor(x, "x", "unsortedSegmentSum"); const $segmentIds = convertToTensor(segmentIds, "segmentIds", "unsortedSegmentSum", "int32"); assert(isInt(numSegments), () => "numSegments must be of dtype int"); - const inputs = {x: $x, segmentIds: $segmentIds}; - const attrs = {numSegments}; + const inputs = { x: $x, segmentIds: $segmentIds }; + const attrs = { numSegments }; return ENGINE.runKernel(UnsortedSegmentSum, inputs, attrs); } -var unsortedSegmentSum = op({unsortedSegmentSum_}); +var unsortedSegmentSum = op({ unsortedSegmentSum_ }); function unstack_(x, axis = 0) { const $x = convertToTensor(x, "x", "unstack", "string_or_numeric"); assert(axis >= -$x.shape.length && axis < $x.shape.length, () => `Axis = ${axis} is not in [-${$x.shape.length}, ${$x.shape.length})`); - const inputs = {value: $x}; - const attrs = {axis}; + const inputs = { value: $x }; + const attrs = { axis }; return ENGINE.runKernel(Unpack, inputs, attrs); } -var unstack = op({unstack_}); +var unstack = op({ unstack_ }); function variable(initialValue, trainable = true, name, dtype) { return ENGINE.makeVariable(initialValue, trainable, name, dtype); } @@ -12734,7 +12734,7 @@ function normImpl(x, p2, axis = null) { } throw new Error(`Error in norm: invalid axis: ${axis}`); } -var norm = op({norm_}); +var norm = op({ norm_ }); function movingAverage_(v, x, decay, step5, zeroDebias = true) { const $v = convertToTensor(v, "v", "movingAverage"); const $x = convertToTensor(x, "x", "movingAverage"); @@ -12751,16 +12751,16 @@ function movingAverage_(v, x, decay, step5, zeroDebias = true) { } return add2($v, update); } -var movingAverage = op({movingAverage_}); +var movingAverage = op({ movingAverage_ }); function scatterND_(indices, updates, shape) { const $indices = convertToTensor(indices, "indices", "scatterND", "int32"); const $updates = convertToTensor(updates, "updates", "scatterND"); validateInput($updates, $indices, shape); - const inputs = {indices: $indices, updates: $updates}; - const attrs = {shape}; + const inputs = { indices: $indices, updates: $updates }; + const attrs = { shape }; return ENGINE.runKernel(ScatterNd, inputs, attrs); } -var scatterND = op({scatterND_}); +var scatterND = op({ scatterND_ }); function validateInput2(sparseIndices, sparseValues, outputShape, defaultValues) { if (sparseIndices.dtype !== "int32") { throw new Error(`tf.sparseToDense() expects the indices to be int32 type, but the dtype was ${sparseIndices.dtype}.`); @@ -12791,17 +12791,17 @@ function sparseToDense_(sparseIndices, sparseValues, outputShape, defaultValue = sparseValues: $sparseValues, defaultValue: $defaultValue }; - const attrs = {outputShape}; + const attrs = { outputShape }; return ENGINE.runKernel(SparseToDense, inputs, attrs); } -var sparseToDense = op({sparseToDense_}); +var sparseToDense = op({ sparseToDense_ }); function gatherND_(x, indices) { const $indices = convertToTensor(indices, "indices", "gatherND", "int32"); const $x = convertToTensor(x, "x", "gatherND"); - const inputs = {params: $x, indices: $indices}; + const inputs = { params: $x, indices: $indices }; return ENGINE.runKernel(GatherNd, inputs); } -var gatherND = op({gatherND_}); +var gatherND = op({ gatherND_ }); function getNoiseShape(x, noiseShape) { if (noiseShape == null) { return x.shape.slice(); @@ -12834,7 +12834,7 @@ function dropout_(x, rate, noiseShape, seed) { const multiplier = div(floor(add2(randomUniform($noiseShape, 0, 1, "float32", seed), keepProb)), keepProb); return mul($x, multiplier); } -var dropout = op({dropout_}); +var dropout = op({ dropout_ }); function enclosingPowerOfTwo(value) { return Math.floor(Math.pow(2, Math.ceil(Math.log(value) / Math.log(2)))); } @@ -12864,7 +12864,7 @@ async function inTopKAsync_(predictions, targets, k = 1) { const vals = predictionsVals.subarray(offset, offset + size); const valAndInd = []; for (let i = 0; i < vals.length; i++) { - valAndInd.push({value: vals[i], index: i}); + valAndInd.push({ value: vals[i], index: i }); } valAndInd.sort((a, b2) => b2.value - a.value); precision3[b] = 0; @@ -12909,11 +12909,11 @@ function conv2DBackpropFilter_(x, dy, filterShape, strides, pad3, dataFormat = " if (dimRoundingMode != null) { assert(isInt(pad3), () => `Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad3}.`); } - const inputs = {x: x4D, dy: dy4D}; - const attrs = {strides, pad: pad3, dataFormat, dimRoundingMode, filterShape}; + const inputs = { x: x4D, dy: dy4D }; + const attrs = { strides, pad: pad3, dataFormat, dimRoundingMode, filterShape }; return ENGINE.runKernel(Conv2DBackpropFilter, inputs, attrs); } -var conv2DBackpropFilter = op({conv2DBackpropFilter_}); +var conv2DBackpropFilter = op({ conv2DBackpropFilter_ }); function getFusedDyActivation(dy, y, activation2) { if (activation2 == null || activation2 === "linear") { return dy; @@ -12953,7 +12953,7 @@ var shouldFuse = (gradientDepth, activation2) => { const gradientMode = gradientDepth > 0; return !gradientMode || activation2 === "linear"; }; -function fusedConv2d_({x, filter, strides, pad: pad3, dataFormat = "NHWC", dilations = [1, 1], dimRoundingMode, bias, activation: activation2 = "linear", preluActivationWeights, leakyreluAlpha}) { +function fusedConv2d_({ x, filter, strides, pad: pad3, dataFormat = "NHWC", dilations = [1, 1], dimRoundingMode, bias, activation: activation2 = "linear", preluActivationWeights, leakyreluAlpha }) { activation2 = activation2 || "linear"; if (shouldFuse(ENGINE.state.gradientDepth, activation2) === false) { let result = conv2d(x, filter, strides, pad3, dataFormat, dilations, dimRoundingMode); @@ -13024,7 +13024,7 @@ function fusedConv2d_({x, filter, strides, pad: pad3, dataFormat = "NHWC", dilat if (reshapedTo4D) { res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } - return {value: res, gradFunc: grad2}; + return { value: res, gradFunc: grad2 }; }); return customOp(x4D, $filter); } else { @@ -13034,12 +13034,12 @@ function fusedConv2d_({x, filter, strides, pad: pad3, dataFormat = "NHWC", dilat if (reshapedTo4D) { res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } - return {value: res, gradFunc: grad2}; + return { value: res, gradFunc: grad2 }; }); return customOpWithBias(x4D, $filter, $bias); } } -var conv2d2 = op({fusedConv2d_}); +var conv2d2 = op({ fusedConv2d_ }); function depthwiseConv2dNativeBackpropFilter_(x, dy, filterShape, strides, pad3, dilations = [1, 1], dimRoundingMode) { let x4D = x; if (x.rank === 3) { @@ -13049,11 +13049,11 @@ function depthwiseConv2dNativeBackpropFilter_(x, dy, filterShape, strides, pad3, if (dy4D.rank === 3) { dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]); } - const inputs = {x: x4D, dy: dy4D}; - const attrs = {strides, pad: pad3, dimRoundingMode, dilations, filterShape}; + const inputs = { x: x4D, dy: dy4D }; + const attrs = { strides, pad: pad3, dimRoundingMode, dilations, filterShape }; return ENGINE.runKernel(DepthwiseConv2dNativeBackpropFilter, inputs, attrs); } -var depthwiseConv2dNativeBackpropFilter = op({depthwiseConv2dNativeBackpropFilter_}); +var depthwiseConv2dNativeBackpropFilter = op({ depthwiseConv2dNativeBackpropFilter_ }); function depthwiseConv2dNativeBackpropInput_(xShape, dy, filter, strides, pad3, dilations = [1, 1], dimRoundingMode) { let dy4D = dy; let reshapedTo4D = false; @@ -13061,16 +13061,16 @@ function depthwiseConv2dNativeBackpropInput_(xShape, dy, filter, strides, pad3, reshapedTo4D = true; dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]); } - const inputs = {dy: dy4D, filter}; - const attrs = {strides, pad: pad3, dimRoundingMode, dilations, inputShape: xShape}; + const inputs = { dy: dy4D, filter }; + const attrs = { strides, pad: pad3, dimRoundingMode, dilations, inputShape: xShape }; const res = ENGINE.runKernel(DepthwiseConv2dNativeBackpropInput, inputs, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } -var depthwiseConv2dNativeBackpropInput = op({depthwiseConv2dNativeBackpropInput_}); -function fusedDepthwiseConv2d_({x, filter, strides, pad: pad3, dataFormat = "NHWC", dilations = [1, 1], dimRoundingMode, bias, activation: activation2 = "linear", preluActivationWeights, leakyreluAlpha}) { +var depthwiseConv2dNativeBackpropInput = op({ depthwiseConv2dNativeBackpropInput_ }); +function fusedDepthwiseConv2d_({ x, filter, strides, pad: pad3, dataFormat = "NHWC", dilations = [1, 1], dimRoundingMode, bias, activation: activation2 = "linear", preluActivationWeights, leakyreluAlpha }) { if (shouldFuse(ENGINE.state.gradientDepth, activation2) === false) { let result = depthwiseConv2d(x, filter, strides, pad3, dataFormat, dilations, dimRoundingMode); if (bias != null) { @@ -13141,7 +13141,7 @@ function fusedDepthwiseConv2d_({x, filter, strides, pad: pad3, dataFormat = "NHW if (reshapedTo4D) { res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } - return {value: res, gradFunc: grad2}; + return { value: res, gradFunc: grad2 }; }); return customOp(x4D, $filter); } else { @@ -13151,13 +13151,13 @@ function fusedDepthwiseConv2d_({x, filter, strides, pad: pad3, dataFormat = "NHW if (reshapedTo4D) { res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } - return {value: res, gradFunc: grad2}; + return { value: res, gradFunc: grad2 }; }); return customOpWithBias(x4D, $filter, $bias); } } -var depthwiseConv2d2 = op({fusedDepthwiseConv2d_}); -function fusedMatMul_({a, b, transposeA = false, transposeB = false, bias, activation: activation2 = "linear", preluActivationWeights, leakyreluAlpha}) { +var depthwiseConv2d2 = op({ fusedDepthwiseConv2d_ }); +function fusedMatMul_({ a, b, transposeA = false, transposeB = false, bias, activation: activation2 = "linear", preluActivationWeights, leakyreluAlpha }) { if (shouldFuse(ENGINE.state.gradientDepth, activation2) === false) { let result = matMul(a, b, transposeA, transposeB); if (bias != null) { @@ -13223,32 +13223,32 @@ function fusedMatMul_({a, b, transposeA = false, transposeB = false, bias, activ bias: $bias, preluActivationWeights: $preluActivationWeights }; - const attrs = {transposeA, transposeB, activation: activation2, leakyreluAlpha}; + const attrs = { transposeA, transposeB, activation: activation2, leakyreluAlpha }; if (bias == null) { const customOp = customGrad((a3D2, b3D2, save) => { const res = ENGINE.runKernel(_FusedMatMul, inputs, attrs); save([a3D2, b3D2, res]); - return {value: reshape(res, outShape), gradFunc: grad2}; + return { value: reshape(res, outShape), gradFunc: grad2 }; }); return customOp(a3D, b3D); } else { const customOpWithBias = customGrad((a3D2, b3D2, $bias2, save) => { const res = ENGINE.runKernel(_FusedMatMul, inputs, attrs); save([a3D2, b3D2, res, $bias2]); - return {value: reshape(res, outShape), gradFunc: grad2}; + return { value: reshape(res, outShape), gradFunc: grad2 }; }); return customOpWithBias(a3D, b3D, $bias); } } -var matMul2 = op({fusedMatMul_}); +var matMul2 = op({ fusedMatMul_ }); function hammingWindow_(windowLength) { return cosineWindow(windowLength, 0.54, 0.46); } -var hammingWindow = op({hammingWindow_}); +var hammingWindow = op({ hammingWindow_ }); function hannWindow_(windowLength) { return cosineWindow(windowLength, 0.5, 0.5); } -var hannWindow = op({hannWindow_}); +var hannWindow = op({ hannWindow_ }); function frame_(signal2, frameLength, frameStep, padEnd = false, padValue = 0) { let start = 0; const output = []; @@ -13272,7 +13272,7 @@ function frame_(signal2, frameLength, frameStep, padEnd = false, padValue = 0) { } return reshape(concat(output), [output.length, frameLength]); } -var frame = op({frame_}); +var frame = op({ frame_ }); function stft_(signal2, frameLength, frameStep, fftLength, windowFn = hannWindow) { if (fftLength == null) { fftLength = enclosingPowerOfTwo(frameLength); @@ -13281,7 +13281,7 @@ function stft_(signal2, frameLength, frameStep, fftLength, windowFn = hannWindow const windowedSignal = mul(framedSignal, windowFn(frameLength)); return rfft(windowedSignal, fftLength); } -var stft = op({stft_}); +var stft = op({ stft_ }); function cropAndResize_(image3, boxes, boxInd, cropSize, method = "bilinear", extrapolationValue = 0) { const $image = convertToTensor(image3, "image", "cropAndResize"); const $boxes = convertToTensor(boxes, "boxes", "cropAndResize", "float32"); @@ -13293,29 +13293,29 @@ function cropAndResize_(image3, boxes, boxInd, cropSize, method = "bilinear", ex assert(cropSize.length === 2, () => `Error in cropAndResize: cropSize must be of length 2, but got length ${cropSize.length}.`); assert(cropSize[0] >= 1 && cropSize[1] >= 1, () => `cropSize must be atleast [1,1], but was ${cropSize}`); assert(method === "bilinear" || method === "nearest", () => `method must be bilinear or nearest, but was ${method}`); - const inputs = {image: $image, boxes: $boxes, boxInd: $boxInd}; - const attrs = {method, extrapolationValue, cropSize}; + const inputs = { image: $image, boxes: $boxes, boxInd: $boxInd }; + const attrs = { method, extrapolationValue, cropSize }; const res = ENGINE.runKernel(CropAndResize, inputs, attrs); return res; } -var cropAndResize = op({cropAndResize_}); +var cropAndResize = op({ cropAndResize_ }); function flipLeftRight_(image3) { const $image = convertToTensor(image3, "image", "flipLeftRight", "float32"); assert($image.rank === 4, () => `Error in flipLeftRight: image must be rank 4,but got rank ${$image.rank}.`); - const inputs = {image: $image}; + const inputs = { image: $image }; const res = ENGINE.runKernel(FlipLeftRight, inputs, {}); return res; } -var flipLeftRight = op({flipLeftRight_}); +var flipLeftRight = op({ flipLeftRight_ }); function rotateWithOffset_(image3, radians, fillValue = 0, center = 0.5) { const $image = convertToTensor(image3, "image", "rotateWithOffset", "float32"); assert($image.rank === 4, () => `Error in rotateWithOffset: image must be rank 4,but got rank ${$image.rank}.`); - const inputs = {image: $image}; - const attrs = {radians, fillValue, center}; + const inputs = { image: $image }; + const attrs = { radians, fillValue, center }; const res = ENGINE.runKernel(RotateWithOffset, inputs, attrs); return res; } -var rotateWithOffset = op({rotateWithOffset_}); +var rotateWithOffset = op({ rotateWithOffset_ }); function nonMaxSuppSanityCheck(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) { if (iouThreshold == null) { iouThreshold = 0.5; @@ -13334,7 +13334,7 @@ function nonMaxSuppSanityCheck(boxes, scores, maxOutputSize, iouThreshold, score assert(scores.rank === 1, () => "scores must be a 1D tensor"); assert(scores.shape[0] === numBoxes, () => `scores has incompatible shape with boxes. Expected ${numBoxes}, but was ${scores.shape[0]}`); assert(0 <= softNmsSigma && softNmsSigma <= 1, () => `softNmsSigma must be in [0, 1], but was '${softNmsSigma}'`); - return {maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma}; + return { maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma }; } function nonMaxSuppression_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY) { const $boxes = convertToTensor(boxes, "boxes", "nonMaxSuppression"); @@ -13343,10 +13343,10 @@ function nonMaxSuppression_(boxes, scores, maxOutputSize, iouThreshold = 0.5, sc maxOutputSize = inputs.maxOutputSize; iouThreshold = inputs.iouThreshold; scoreThreshold = inputs.scoreThreshold; - const attrs = {maxOutputSize, iouThreshold, scoreThreshold}; - return ENGINE.runKernel(NonMaxSuppressionV3, {boxes: $boxes, scores: $scores}, attrs); + const attrs = { maxOutputSize, iouThreshold, scoreThreshold }; + return ENGINE.runKernel(NonMaxSuppressionV3, { boxes: $boxes, scores: $scores }, attrs); } -var nonMaxSuppression = op({nonMaxSuppression_}); +var nonMaxSuppression = op({ nonMaxSuppression_ }); function binaryInsert(arr, element, comparator) { const index = binarySearch(arr, element, comparator); const insertionPoint = index < 0 ? -(index + 1) : index; @@ -13388,7 +13388,7 @@ function nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scor const candidates = []; for (let i = 0; i < scores.length; i++) { if (scores[i] > scoreThreshold) { - candidates.push({score: scores[i], boxIndex: i, suppressBeginIndex: 0}); + candidates.push({ score: scores[i], boxIndex: i, suppressBeginIndex: 0 }); } } candidates.sort(ascendingComparator); @@ -13397,7 +13397,7 @@ function nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scor const selectedScores = []; while (selectedIndices.length < maxOutputSize && candidates.length > 0) { const candidate = candidates.pop(); - const {score: originalScore, boxIndex, suppressBeginIndex} = candidate; + const { score: originalScore, boxIndex, suppressBeginIndex } = candidate; if (originalScore < scoreThreshold) { break; } @@ -13429,7 +13429,7 @@ function nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scor selectedIndices.push(...new Array(elemsToPad).fill(0)); selectedScores.push(...new Array(elemsToPad).fill(0)); } - const result = {selectedIndices}; + const result = { selectedIndices }; if (returnScoresTensor) { result["selectedScores"] = selectedScores; } @@ -13478,7 +13478,7 @@ async function nonMaxSuppressionAsync_(boxes, scores, maxOutputSize, iouThreshol const boxesAndScores = await Promise.all([$boxes.data(), $scores.data()]); const boxesVals = boxesAndScores[0]; const scoresVals = boxesAndScores[1]; - const {selectedIndices} = nonMaxSuppressionV3Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold); + const { selectedIndices } = nonMaxSuppressionV3Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold); if ($boxes !== boxes) { $boxes.dispose(); } @@ -13496,12 +13496,12 @@ function nonMaxSuppressionWithScore_(boxes, scores, maxOutputSize, iouThreshold iouThreshold = params.iouThreshold; scoreThreshold = params.scoreThreshold; softNmsSigma = params.softNmsSigma; - const inputs = {boxes: $boxes, scores: $scores}; - const attrs = {maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma}; + const inputs = { boxes: $boxes, scores: $scores }; + const attrs = { maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma }; const result = ENGINE.runKernel(NonMaxSuppressionV5, inputs, attrs); - return {selectedIndices: result[0], selectedScores: result[1]}; + return { selectedIndices: result[0], selectedScores: result[1] }; } -var nonMaxSuppressionWithScore = op({nonMaxSuppressionWithScore_}); +var nonMaxSuppressionWithScore = op({ nonMaxSuppressionWithScore_ }); async function nonMaxSuppressionWithScoreAsync_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, softNmsSigma = 0) { const $boxes = convertToTensor(boxes, "boxes", "nonMaxSuppressionAsync"); const $scores = convertToTensor(scores, "scores", "nonMaxSuppressionAsync"); @@ -13513,7 +13513,7 @@ async function nonMaxSuppressionWithScoreAsync_(boxes, scores, maxOutputSize, io const boxesAndScores = await Promise.all([$boxes.data(), $scores.data()]); const boxesVals = boxesAndScores[0]; const scoresVals = boxesAndScores[1]; - const {selectedIndices, selectedScores} = nonMaxSuppressionV5Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma); + const { selectedIndices, selectedScores } = nonMaxSuppressionV5Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma); if ($boxes !== boxes) { $boxes.dispose(); } @@ -13533,7 +13533,7 @@ function nonMaxSuppressionPadded_(boxes, scores, maxOutputSize, iouThreshold = 0 const $maxOutputSize = params.maxOutputSize; const $iouThreshold = params.iouThreshold; const $scoreThreshold = params.scoreThreshold; - const inputs = {boxes: $boxes, scores: $scores}; + const inputs = { boxes: $boxes, scores: $scores }; const attrs = { maxOutputSize: $maxOutputSize, iouThreshold: $iouThreshold, @@ -13541,9 +13541,9 @@ function nonMaxSuppressionPadded_(boxes, scores, maxOutputSize, iouThreshold = 0 padToMaxOutputSize }; const result = ENGINE.runKernel(NonMaxSuppressionV4, inputs, attrs); - return {selectedIndices: result[0], validOutputs: result[1]}; + return { selectedIndices: result[0], validOutputs: result[1] }; } -var nonMaxSuppressionPadded = op({nonMaxSuppressionPadded_}); +var nonMaxSuppressionPadded = op({ nonMaxSuppressionPadded_ }); async function nonMaxSuppressionPaddedAsync_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, padToMaxOutputSize = false) { const $boxes = convertToTensor(boxes, "boxes", "nonMaxSuppressionAsync"); const $scores = convertToTensor(scores, "scores", "nonMaxSuppressionAsync"); @@ -13552,7 +13552,7 @@ async function nonMaxSuppressionPaddedAsync_(boxes, scores, maxOutputSize, iouTh const $iouThreshold = params.iouThreshold; const $scoreThreshold = params.scoreThreshold; const [boxesVals, scoresVals] = await Promise.all([$boxes.data(), $scores.data()]); - const {selectedIndices, validOutputs} = nonMaxSuppressionV4Impl(boxesVals, scoresVals, $maxOutputSize, $iouThreshold, $scoreThreshold, padToMaxOutputSize); + const { selectedIndices, validOutputs } = nonMaxSuppressionV4Impl(boxesVals, scoresVals, $maxOutputSize, $iouThreshold, $scoreThreshold, padToMaxOutputSize); if ($boxes !== boxes) { $boxes.dispose(); } @@ -13577,15 +13577,15 @@ function resizeBilinear_(images, size, alignCorners = false, halfPixelCenters = batchImages = reshape($images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]); } const [] = size; - const inputs = {images: batchImages}; - const attrs = {alignCorners, halfPixelCenters, size}; + const inputs = { images: batchImages }; + const attrs = { alignCorners, halfPixelCenters, size }; const res = ENGINE.runKernel(ResizeBilinear, inputs, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } -var resizeBilinear = op({resizeBilinear_}); +var resizeBilinear = op({ resizeBilinear_ }); function resizeNearestNeighbor_(images, size, alignCorners = false, halfPixelCenters = false) { const $images = convertToTensor(images, "images", "resizeNearestNeighbor"); assert($images.rank === 3 || $images.rank === 4, () => `Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${$images.rank}.`); @@ -13599,15 +13599,15 @@ function resizeNearestNeighbor_(images, size, alignCorners = false, halfPixelCen batchImages = reshape($images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]); } const [] = size; - const inputs = {images: batchImages}; - const attrs = {alignCorners, halfPixelCenters, size}; + const inputs = { images: batchImages }; + const attrs = { alignCorners, halfPixelCenters, size }; const res = ENGINE.runKernel(ResizeNearestNeighbor, inputs, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } -var resizeNearestNeighbor = op({resizeNearestNeighbor_}); +var resizeNearestNeighbor = op({ resizeNearestNeighbor_ }); function threshold_(image3, method = "binary", inverted = false, threshValue = 0.5) { const $image = convertToTensor(image3, "image", "threshold"); const RED_INTENCITY_COEF = 0.2989; @@ -13663,18 +13663,18 @@ function otsu(histogram, total) { } return bestThresh; } -var threshold = op({threshold_}); +var threshold = op({ threshold_ }); function transform_(image3, transforms, interpolation = "nearest", fillMode = "constant", fillValue = 0, outputShape) { const $image = convertToTensor(image3, "image", "transform", "float32"); const $transforms = convertToTensor(transforms, "transforms", "transform", "float32"); assert($image.rank === 4, () => `Error in transform: image must be rank 4,but got rank ${$image.rank}.`); assert($transforms.rank === 2 && ($transforms.shape[0] === $image.shape[0] || $transforms.shape[0] === 1) && $transforms.shape[1] === 8, () => `Error in transform: Input transform should be batch x 8 or 1 x 8`); assert(outputShape == null || outputShape.length === 2, () => `Error in transform: outputShape must be [height, width] or null, but got ${outputShape}.`); - const inputs = {image: $image, transforms: $transforms}; - const attrs = {interpolation, fillMode, fillValue, outputShape}; + const inputs = { image: $image, transforms: $transforms }; + const attrs = { interpolation, fillMode, fillValue, outputShape }; return ENGINE.runKernel(Transform, inputs, attrs); } -var transform = op({transform_}); +var transform = op({ transform_ }); function bandPart_(a, numLower, numUpper) { assert(numLower % 1 === 0, () => `bandPart(): numLower must be an integer, got ${numLower}.`); assert(numUpper % 1 === 0, () => `bandPart(): numUpper must be an integer, got ${numUpper}.`); @@ -13701,7 +13701,7 @@ function bandPart_(a, numLower, numUpper) { const zero = zeros([M, N], $a.dtype); return reshape(stack(unstack(reshape($a, [-1, M, N])).map((mat) => where(inBand, mat, zero))), shape); } -var bandPart = op({bandPart_}); +var bandPart = op({ bandPart_ }); function gramSchmidt_(xs) { let inputIsTensor2D; if (Array.isArray(xs)) { @@ -13736,7 +13736,7 @@ function gramSchmidt_(xs) { return ys; } } -var gramSchmidt = op({gramSchmidt_}); +var gramSchmidt = op({ gramSchmidt_ }); function qr_(x, fullMatrices = false) { assert(x.rank >= 2, () => `qr() requires input tensor to have a rank >= 2, but got rank ${x.rank}`); if (x.rank === 2) { @@ -13818,7 +13818,7 @@ function qr2d(x, fullMatrices = false) { return [q, r]; }); } -var qr = op({qr_}); +var qr = op({ qr_ }); var Reduction; (function(Reduction2) { Reduction2[Reduction2["NONE"] = 0] = "NONE"; @@ -13859,7 +13859,7 @@ function computeWeightedLoss_(losses4, weights, reduction2 = Reduction.SUM_BY_NO } throw Error(`Unknown reduction: ${reduction2}`); } -var computeWeightedLoss = op({computeWeightedLoss_}); +var computeWeightedLoss = op({ computeWeightedLoss_ }); function absoluteDifference_(labels, predictions, weights, reduction2 = Reduction.SUM_BY_NONZERO_WEIGHTS) { const $labels = convertToTensor(labels, "labels", "absoluteDifference"); const $predictions = convertToTensor(predictions, "predictions", "absoluteDifference"); @@ -13871,7 +13871,7 @@ function absoluteDifference_(labels, predictions, weights, reduction2 = Reductio const losses4 = abs(sub($labels, $predictions)); return computeWeightedLoss(losses4, $weights, reduction2); } -var absoluteDifference = op({absoluteDifference_}); +var absoluteDifference = op({ absoluteDifference_ }); function cosineDistance_(labels, predictions, axis, weights, reduction2 = Reduction.SUM_BY_NONZERO_WEIGHTS) { const $labels = convertToTensor(labels, "labels", "cosineDistance"); const $predictions = convertToTensor(predictions, "predictions", "cosineDistance"); @@ -13884,7 +13884,7 @@ function cosineDistance_(labels, predictions, axis, weights, reduction2 = Reduct const losses4 = sub(one, sum2(mul($labels, $predictions), axis, true)); return computeWeightedLoss(losses4, $weights, reduction2); } -var cosineDistance = op({cosineDistance_}); +var cosineDistance = op({ cosineDistance_ }); function hingeLoss_(labels, predictions, weights, reduction2 = Reduction.SUM_BY_NONZERO_WEIGHTS) { let $labels = convertToTensor(labels, "labels", "hingeLoss"); const $predictions = convertToTensor(predictions, "predictions", "hingeLoss"); @@ -13898,7 +13898,7 @@ function hingeLoss_(labels, predictions, weights, reduction2 = Reduction.SUM_BY_ const losses4 = relu(sub(one, mul($labels, $predictions))); return computeWeightedLoss(losses4, $weights, reduction2); } -var hingeLoss = op({hingeLoss_}); +var hingeLoss = op({ hingeLoss_ }); function huberLoss_(labels, predictions, weights, delta = 1, reduction2 = Reduction.SUM_BY_NONZERO_WEIGHTS) { const $labels = convertToTensor(labels, "labels", "huberLoss"); const $predictions = convertToTensor(predictions, "predictions", "huberLoss"); @@ -13914,7 +13914,7 @@ function huberLoss_(labels, predictions, weights, delta = 1, reduction2 = Reduct const losses4 = add2(mul(scalar(0.5), square(quadratic)), mul(deltaScalar, linear)); return computeWeightedLoss(losses4, $weights, reduction2); } -var huberLoss = op({huberLoss_}); +var huberLoss = op({ huberLoss_ }); function logLoss_(labels, predictions, weights, epsilon32 = 1e-7, reduction2 = Reduction.SUM_BY_NONZERO_WEIGHTS) { const $labels = convertToTensor(labels, "labels", "logLoss"); const $predictions = convertToTensor(predictions, "predictions", "logLoss"); @@ -13930,7 +13930,7 @@ function logLoss_(labels, predictions, weights, epsilon32 = 1e-7, reduction2 = R const losses4 = sub(l13, l23); return computeWeightedLoss(losses4, $weights, reduction2); } -var logLoss = op({logLoss_}); +var logLoss = op({ logLoss_ }); function meanSquaredError_(labels, predictions, weights, reduction2 = Reduction.SUM_BY_NONZERO_WEIGHTS) { const $labels = convertToTensor(labels, "labels", "meanSquaredError"); const $predictions = convertToTensor(predictions, "predictions", "meanSquaredError"); @@ -13942,7 +13942,7 @@ function meanSquaredError_(labels, predictions, weights, reduction2 = Reduction. const losses4 = squaredDifference($labels, $predictions); return computeWeightedLoss(losses4, $weights, reduction2); } -var meanSquaredError = op({meanSquaredError_}); +var meanSquaredError = op({ meanSquaredError_ }); function sigmoidCrossEntropyWithLogits_(labels, logits) { const $labels = convertToTensor(labels, "labels", "sigmoidCrossEntropyWithLogits"); const $logits = convertToTensor(logits, "logits", "sigmoidCrossEntropyWithLogits"); @@ -13969,7 +13969,7 @@ function sigmoidCrossEntropy_(multiClassLabels, logits, weights, labelSmoothing const losses4 = sigmoidCrossEntropyWithLogits_($multiClassLabels, $logits); return computeWeightedLoss(losses4, $weights, reduction2); } -var sigmoidCrossEntropy = op({sigmoidCrossEntropy_}); +var sigmoidCrossEntropy = op({ sigmoidCrossEntropy_ }); function softmaxCrossEntropyWithLogits_(labels, logits, dim = -1) { if (dim === -1) { dim = logits.rank - 1; @@ -13992,7 +13992,7 @@ function softmaxCrossEntropyWithLogits_(labels, logits, dim = -1) { mul(reshape(dy, dyShape), sub(exp(logResult2), cast(labels3, "float32"))) ]; }; - return {value, gradFunc}; + return { value, gradFunc }; }); return customOp(labels, logits); } @@ -14013,7 +14013,7 @@ function softmaxCrossEntropy_(onehotLabels, logits, weights, labelSmoothing = 0, const losses4 = softmaxCrossEntropyWithLogits_($onehotLabels, $logits); return computeWeightedLoss(losses4, $weights, reduction2); } -var softmaxCrossEntropy = op({softmaxCrossEntropy_}); +var softmaxCrossEntropy = op({ softmaxCrossEntropy_ }); function sparseFillEmptyRows_(indices, values, denseShape, defaultValue) { const $indices = convertToTensor(indices, "indices", "sparseFillEmptyRows"); const $values = convertToTensor(values, "values", "sparseFillEmptyRows"); @@ -14046,7 +14046,7 @@ function sparseFillEmptyRows_(indices, values, denseShape, defaultValue) { reverseIndexMap: result[3] }; } -var sparseFillEmptyRows = op({sparseFillEmptyRows_}); +var sparseFillEmptyRows = op({ sparseFillEmptyRows_ }); function sparseReshape_(inputIndices, inputShape, newShape) { const $inputIndices = convertToTensor(inputIndices, "inputIndices", "sparseReshape"); const $inputShape = convertToTensor(inputShape, "inputShape", "sparseReshape"); @@ -14067,9 +14067,9 @@ function sparseReshape_(inputIndices, inputShape, newShape) { newShape: $newShape }; const result = ENGINE.runKernel(SparseReshape, inputs); - return {outputIndices: result[0], outputShape: result[1]}; + return { outputIndices: result[0], outputShape: result[1] }; } -var sparseReshape = op({sparseReshape_}); +var sparseReshape = op({ sparseReshape_ }); var spectral = { fft, ifft, @@ -14119,9 +14119,9 @@ var sparse = { }; var Optimizer = class extends Serializable { minimize(f, returnCost = false, varList) { - const {value, grads: grads2} = this.computeGradients(f, varList); + const { value, grads: grads2 } = this.computeGradients(f, varList); if (varList != null) { - const gradArray = varList.map((v) => ({name: v.name, tensor: grads2[v.name]})); + const gradArray = varList.map((v) => ({ name: v.name, tensor: grads2[v.name] })); this.applyGradients(gradArray); } else { this.applyGradients(grads2); @@ -14231,7 +14231,7 @@ var AdadeltaOptimizer = class extends Optimizer { } async getWeights() { const variables = [...this.accumulatedGrads, ...this.accumulatedUpdates]; - return [await this.saveIterations()].concat(variables.map((v) => ({name: v.originalName, tensor: v.variable}))); + return [await this.saveIterations()].concat(variables.map((v) => ({ name: v.originalName, tensor: v.variable }))); } async setWeights(weightValues) { weightValues = await this.extractIterations(weightValues); @@ -14297,12 +14297,12 @@ var AdagradOptimizer = class extends Optimizer { } } async getWeights() { - return [await this.saveIterations()].concat(this.accumulatedGrads.map((v) => ({name: v.originalName, tensor: v.variable}))); + return [await this.saveIterations()].concat(this.accumulatedGrads.map((v) => ({ name: v.originalName, tensor: v.variable }))); } async setWeights(weightValues) { weightValues = await this.extractIterations(weightValues); const trainable = false; - this.accumulatedGrads = weightValues.map((v) => ({originalName: v.name, variable: v.tensor.variable(trainable)})); + this.accumulatedGrads = weightValues.map((v) => ({ originalName: v.name, variable: v.tensor.variable(trainable) })); } getConfig() { return { @@ -14385,7 +14385,7 @@ var AdamOptimizer = class extends Optimizer { } async getWeights() { const variables = [...this.accumulatedFirstMoment, ...this.accumulatedSecondMoment]; - return [await this.saveIterations()].concat(variables.map((v) => ({name: v.originalName, tensor: v.variable}))); + return [await this.saveIterations()].concat(variables.map((v) => ({ name: v.originalName, tensor: v.variable }))); } async setWeights(weightValues) { weightValues = await this.extractIterations(weightValues); @@ -14548,7 +14548,7 @@ var SGDOptimizer = class extends Optimizer { } } getConfig() { - return {"learningRate": this.learningRate}; + return { "learningRate": this.learningRate }; } static fromConfig(cls, config) { return new cls(config["learningRate"]); @@ -14605,12 +14605,12 @@ var MomentumOptimizer = class extends SGDOptimizer { this.momentum = momentum; } async getWeights() { - return [await this.saveIterations()].concat(this.accumulations.map((v) => ({name: v.originalName, tensor: v.variable}))); + return [await this.saveIterations()].concat(this.accumulations.map((v) => ({ name: v.originalName, tensor: v.variable }))); } async setWeights(weightValues) { weightValues = await this.extractIterations(weightValues); const trainable = false; - this.accumulations = weightValues.map((v) => ({originalName: v.name, variable: v.tensor.variable(trainable)})); + this.accumulations = weightValues.map((v) => ({ originalName: v.name, variable: v.tensor.variable(trainable) })); } getConfig() { return { @@ -14712,7 +14712,7 @@ var RMSPropOptimizer = class extends Optimizer { if (this.centered) { variables.push(...this.accumulatedMeanGrads); } - return [await this.saveIterations()].concat(variables.map((v) => ({name: v.originalName, tensor: v.variable}))); + return [await this.saveIterations()].concat(variables.map((v) => ({ name: v.originalName, tensor: v.variable }))); } async setWeights(weightValues) { weightValues = await this.extractIterations(weightValues); @@ -15007,7 +15007,7 @@ function splitRealAndImagArrays(complex4) { real4[i / 2] = complex4[i]; imag4[i / 2] = complex4[i + 1]; } - return {real: real4, imag: imag4}; + return { real: real4, imag: imag4 }; } function complexWithEvenIndex(complex4) { const len = Math.ceil(complex4.length / 4); @@ -15017,7 +15017,7 @@ function complexWithEvenIndex(complex4) { real4[Math.floor(i / 4)] = complex4[i]; imag4[Math.floor(i / 4)] = complex4[i + 1]; } - return {real: real4, imag: imag4}; + return { real: real4, imag: imag4 }; } function complexWithOddIndex(complex4) { const len = Math.floor(complex4.length / 4); @@ -15027,12 +15027,12 @@ function complexWithOddIndex(complex4) { real4[Math.floor(i / 4)] = complex4[i]; imag4[Math.floor(i / 4)] = complex4[i + 1]; } - return {real: real4, imag: imag4}; + return { real: real4, imag: imag4 }; } function getComplexWithIndex(complex4, index) { const real4 = complex4[index * 2]; const imag4 = complex4[index * 2 + 1]; - return {real: real4, imag: imag4}; + return { real: real4, imag: imag4 }; } function assignToTypedArray(data, real4, imag4, index) { data[index * 2] = real4; @@ -15046,13 +15046,13 @@ function exponents(n, inverse) { real4[i] = Math.cos(x); imag4[i] = Math.sin(x); } - return {real: real4, imag: imag4}; + return { real: real4, imag: imag4 }; } function exponent(k, n, inverse) { const x = (inverse ? 2 : -2) * Math.PI * (k / n); const real4 = Math.cos(x); const imag4 = Math.sin(x); - return {real: real4, imag: imag4}; + return { real: real4, imag: imag4 }; } var ARROW = "->"; var ARROW_REGEX = /->/g; @@ -15108,7 +15108,7 @@ function decodeEinsumEquation(equation, numTensors) { for (let i = numOutDims; i < numDims; ++i) { summedDims.push(i); } - return {allDims, summedDims, idDims}; + return { allDims, summedDims, idDims }; } function getEinsumPermutation(nDims, idDims) { let permutationIndices = new Array(nDims); @@ -15123,7 +15123,7 @@ function getEinsumPermutation(nDims, idDims) { } } permutationIndices = permutationIndices.filter((d) => d !== -1); - return {permutationIndices, expandDims: expandDims6}; + return { permutationIndices, expandDims: expandDims6 }; } function checkEinsumDimSizes(nDims, idDims, tensors) { const dimSizes = new Array(nDims); @@ -15160,7 +15160,7 @@ function getEinsumComputePath(summedDims, idDims) { } } } - return {path, steps}; + return { path, steps }; } function isIdentityPermutation(perm) { return perm.every((dim, index) => dim === index); @@ -15276,7 +15276,7 @@ function collectGatherOpShapeInfo(x, indices, axis, batchDims) { outputShape.push(x.shape[i]); sliceSize *= x.shape[i]; } - return {batchSize, sliceSize, outerSize, dimSize, outputShape}; + return { batchSize, sliceSize, outerSize, dimSize, outputShape }; } function fromUint8ToStringArray(vals) { try { @@ -15300,7 +15300,7 @@ var absGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => mul(dy, step(cast(x, "float32"), -1))}; + return { x: () => mul(dy, step(cast(x, "float32"), -1)) }; } }; var acosGradConfig = { @@ -15352,7 +15352,7 @@ var addGradConfig = { } return reshape(res, b.shape); }; - return {a: derA, b: derB}; + return { a: derA, b: derB }; } }; var addNGradConfig = { @@ -15371,7 +15371,7 @@ var argMaxGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => zerosLike(x)}; + return { x: () => zerosLike(x) }; } }; var argMinGradConfig = { @@ -15379,7 +15379,7 @@ var argMinGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => zerosLike(x)}; + return { x: () => zerosLike(x) }; } }; var asinGradConfig = { @@ -15387,7 +15387,7 @@ var asinGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => div(dy, sqrt(sub(scalar(1), square(cast(x, "float32")))))}; + return { x: () => div(dy, sqrt(sub(scalar(1), square(cast(x, "float32"))))) }; } }; var asinhGradConfig = { @@ -15427,7 +15427,7 @@ var atan2GradConfig = { } return reshape(res, b.shape); }; - return {a: derA, b: derB}; + return { a: derA, b: derB }; } }; var atanGradConfig = { @@ -15435,7 +15435,7 @@ var atanGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => div(dy, add2(square(cast(x, "float32")), 1))}; + return { x: () => div(dy, add2(square(cast(x, "float32")), 1)) }; } }; var atanhGradConfig = { @@ -15443,7 +15443,7 @@ var atanhGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => div(dy, sub(scalar(1), square(cast(x, "float32"))))}; + return { x: () => div(dy, sub(scalar(1), square(cast(x, "float32")))) }; } }; function avgPool3dGrad_(dy, input2, filterSize, strides, pad3, dimRoundingMode) { @@ -15468,21 +15468,21 @@ function avgPool3dGrad_(dy, input2, filterSize, strides, pad3, dimRoundingMode) if (dimRoundingMode != null) { assert(isInt(pad3), () => `Error in avgPool3dGrad: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad3}.`); } - const inputs = {dy: dy5D, input: input5D}; - const attrs = {filterSize, strides, pad: pad3, dimRoundingMode}; + const inputs = { dy: dy5D, input: input5D }; + const attrs = { filterSize, strides, pad: pad3, dimRoundingMode }; const res = ENGINE.runKernel(AvgPool3DGrad, inputs, attrs); if (reshapedTo5D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); } return res; } -var avgPool3dGrad = op({avgPool3dGrad_}); +var avgPool3dGrad = op({ avgPool3dGrad_ }); var avgPool3DGradConfig = { kernelName: AvgPool3D, inputsToSave: ["x"], gradFunc: (dy, saved, attrs) => { const [x] = saved; - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; return { x: () => avgPool3dGrad(dy, x, filterSize, strides, pad3, dimRoundingMode) }; @@ -15502,22 +15502,22 @@ function avgPoolGrad_(dy, input2, filterSize, strides, pad3) { } assert(dy4D.rank === 4, () => `Error in avgPoolGrad: dy must be rank 4 but got rank ${dy4D.rank}.`); assert(input4D.rank === 4, () => `Error in avgPoolGrad: input must be rank 4 but got rank ${input4D.rank}.`); - const inputs = {dy: dy4D, input: input4D}; - const attrs = {filterSize, strides, pad: pad3}; + const inputs = { dy: dy4D, input: input4D }; + const attrs = { filterSize, strides, pad: pad3 }; const res = ENGINE.runKernel(AvgPoolGrad, inputs, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } -var avgPoolGrad = op({avgPoolGrad_}); +var avgPoolGrad = op({ avgPoolGrad_ }); var avgPoolGradConfig = { kernelName: AvgPool, inputsToSave: ["x"], gradFunc: (dy, saved, attrs) => { const [x] = saved; - const {filterSize, strides, pad: pad3} = attrs; - return {x: () => avgPoolGrad(dy, x, filterSize, strides, pad3)}; + const { filterSize, strides, pad: pad3 } = attrs; + return { x: () => avgPoolGrad(dy, x, filterSize, strides, pad3) }; } }; var batchMatMulGradConfig = { @@ -15525,7 +15525,7 @@ var batchMatMulGradConfig = { inputsToSave: ["a", "b"], gradFunc: (dy, saved, attrs) => { const [a, b] = saved; - const {transposeA, transposeB} = attrs; + const { transposeA, transposeB } = attrs; if (!transposeA && !transposeB) { return { a: () => matMul(dy, b, false, true), @@ -15552,8 +15552,8 @@ var batchMatMulGradConfig = { var batchToSpaceNDGradConfig = { kernelName: BatchToSpaceND, gradFunc: (dy, saved, attrs) => { - const {blockShape, crops} = attrs; - return {x: () => spaceToBatchND(dy, blockShape, crops)}; + const { blockShape, crops } = attrs; + return { x: () => spaceToBatchND(dy, blockShape, crops) }; } }; var broadcastToGradConfig = { @@ -15576,19 +15576,19 @@ var broadcastToGradConfig = { axes.push(i); } } - return {x: () => sum2(dy, axes, true)}; + return { x: () => sum2(dy, axes, true) }; } }; var castGradConfig = { kernelName: Cast, gradFunc: (dy) => { - return {x: () => dy.clone()}; + return { x: () => dy.clone() }; } }; var ceilGradConfig = { kernelName: Ceil, gradFunc: (dy) => { - return {x: () => zerosLike(dy)}; + return { x: () => zerosLike(dy) }; } }; var clipByValueGradConfig = { @@ -15596,7 +15596,7 @@ var clipByValueGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved, attrs) => { const [x] = saved; - const {clipValueMin, clipValueMax} = attrs; + const { clipValueMin, clipValueMax } = attrs; return { x: () => where(logicalAnd(greaterEqual(x, clipValueMin), lessEqual(x, clipValueMax)), dy, zerosLike(dy)) }; @@ -15612,7 +15612,7 @@ var concatGradConfig = { saveAllInputs: true, gradFunc: (dy, saved, attrs) => { const shapes = saved.map((t) => t.shape); - const {axis} = attrs; + const { axis } = attrs; const $axis = parseAxisParam(axis, saved[0].shape)[0]; const sizeSplits = shapes.map((s) => s[$axis]); const derTensors = split(dy, sizeSplits, $axis); @@ -15624,7 +15624,7 @@ var conv2DGradConfig = { inputsToSave: ["x", "filter"], gradFunc: (dy, saved, attrs) => { const [x4D, $filter] = saved; - const {dilations, strides, pad: pad3, dataFormat} = attrs; + const { dilations, strides, pad: pad3, dataFormat } = attrs; assert(tupleValuesAreOne(dilations), () => `Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${dilations}'`); return { x: () => conv2DBackpropInput(x4D.shape, dy, $filter, strides, pad3, dataFormat), @@ -15637,7 +15637,7 @@ var conv2DBackpropInputGradConfig = { inputsToSave: ["dy", "filter"], gradFunc: (ddx, saved, attrs) => { const [dy, filter] = saved; - const {strides, pad: pad3, dataFormat, dimRoundingMode} = attrs; + const { strides, pad: pad3, dataFormat, dimRoundingMode } = attrs; return { dy: () => conv2d(ddx, filter, strides, pad3, dataFormat, 1, dimRoundingMode), filter: () => conv2DBackpropFilter(ddx, dy, filter.shape, strides, pad3, dataFormat, dimRoundingMode) @@ -15658,16 +15658,16 @@ function conv3DBackpropFilter_(x, dy, filterShape, strides, pad3) { assert(filterShape.length === 5, () => `Error in conv3dDerFilter: filterShape must be length 5, but got ${filterShape}.`); assert(x5D.shape[4] === filterShape[3], () => `Error in conv3dDerFilter: depth of input ${x5D.shape[4]}) must match input depth in filter (${filterShape[3]}.`); assert(dy5D.shape[4] === filterShape[4], () => `Error in conv3dDerFilter: depth of dy (${dy5D.shape[4]}) must match output depth for filter (${filterShape[4]}).`); - const inputs = {x: x5D, dy: dy5D}; - const attrs = {strides, pad: pad3, filterShape}; + const inputs = { x: x5D, dy: dy5D }; + const attrs = { strides, pad: pad3, filterShape }; return ENGINE.runKernel(Conv3DBackpropFilterV2, inputs, attrs); } -var conv3DBackpropFilter = op({conv3DBackpropFilter_}); +var conv3DBackpropFilter = op({ conv3DBackpropFilter_ }); var conv3DGradConfig = { kernelName: Conv3D, inputsToSave: ["x", "filter"], gradFunc: (dy, saved, attrs) => { - const {dilations, strides, pad: pad3} = attrs; + const { dilations, strides, pad: pad3 } = attrs; assert(tupleValuesAreOne(dilations), () => `Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${dilations}'`); const [x5D, $filter] = saved; return { @@ -15681,7 +15681,7 @@ var cosGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => mul(neg(sin(cast(x, "float32"))), dy)}; + return { x: () => mul(neg(sin(cast(x, "float32"))), dy) }; } }; var coshGradConfig = { @@ -15689,7 +15689,7 @@ var coshGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => mul(sinh(cast(x, "float32")), dy)}; + return { x: () => mul(sinh(cast(x, "float32")), dy) }; } }; var cumsumGradConfig = { @@ -15697,7 +15697,7 @@ var cumsumGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved, attrs) => { const [x] = saved; - const {axis, exclusive, reverse: reverse5} = attrs; + const { axis, exclusive, reverse: reverse5 } = attrs; return { x: () => { const permutation = getAxesPermutation([axis], x.rank); @@ -15714,7 +15714,7 @@ var depthwiseConv2dNativeGradConfig = { kernelName: DepthwiseConv2dNative, inputsToSave: ["x", "filter"], gradFunc: (dy, saved, attrs) => { - const {dilations, strides, pad: pad3, dimRoundingMode} = attrs; + const { dilations, strides, pad: pad3, dimRoundingMode } = attrs; const $dilations = dilations == null ? [1, 1] : dilations; assert(tupleValuesAreOne($dilations), () => `Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${$dilations}'`); const [x, filter] = saved; @@ -15736,8 +15736,8 @@ var dilation2dGradConfig = { inputsToSave: ["x", "filter"], gradFunc: (dy, saved, attrs) => { const [x, filter] = saved; - const inputInputs = {x, filter, dy}; - const filterInputs = {x, filter, dy}; + const inputInputs = { x, filter, dy }; + const filterInputs = { x, filter, dy }; return { x: () => ENGINE.runKernel(Dilation2DBackpropInput, inputInputs, attrs), filter: () => ENGINE.runKernel(Dilation2DBackpropFilter, filterInputs, attrs) @@ -15749,8 +15749,8 @@ var eluGradConfig = { outputsToSave: [true], gradFunc: (dy, saved) => { const [y] = saved; - const inputs = {dy, y}; - return {x: () => ENGINE.runKernel(EluGrad, inputs)}; + const inputs = { dy, y }; + return { x: () => ENGINE.runKernel(EluGrad, inputs) }; } }; var erfGradConfig = { @@ -15759,7 +15759,7 @@ var erfGradConfig = { gradFunc: (dy, saved) => { const [x] = saved; const a = mul(exp(neg(square(x))), 2 / Math.sqrt(Math.PI)); - return {x: () => mul(dy, a)}; + return { x: () => mul(dy, a) }; } }; var expGradConfig = { @@ -15767,7 +15767,7 @@ var expGradConfig = { outputsToSave: [true], gradFunc: (dy, saved) => { const [y] = saved; - return {x: () => mul(dy, y)}; + return { x: () => mul(dy, y) }; } }; var expandDimsGradConfig = { @@ -15775,7 +15775,7 @@ var expandDimsGradConfig = { inputsToSave: ["input"], gradFunc: (dy, saved) => { const [input2] = saved; - return {input: () => reshape(dy, input2.shape)}; + return { input: () => reshape(dy, input2.shape) }; } }; var expm1GradConfig = { @@ -15783,13 +15783,13 @@ var expm1GradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => mul(dy, exp(x))}; + return { x: () => mul(dy, exp(x)) }; } }; var floorGradConfig = { kernelName: Floor, gradFunc: (dy) => { - return {x: () => zerosLike(dy)}; + return { x: () => zerosLike(dy) }; } }; var floorDivGradConfig = { @@ -15815,14 +15815,14 @@ var floorDivGradConfig = { const tmp = square(b); return neg(div(res, cast(tmp, "float32"))); }; - return {a: derA, b: derB}; + return { a: derA, b: derB }; } }; var fusedBatchNormGradConfig = { kernelName: FusedBatchNorm, inputsToSave: ["x", "mean", "variance", "scale"], gradFunc: (dy, saved, attrs) => { - const {varianceEpsilon} = attrs; + const { varianceEpsilon } = attrs; const [x, mean4, variance, scale22] = saved; const scaleValue = scale22 == null ? scalar(1) : scale22; const reductionAxes = getReductionAxes(mean4.shape, x.shape); @@ -15887,7 +15887,7 @@ var gatherGradConfig = { inputsToSave: ["x", "indices"], gradFunc: (dy, saved, attrs) => { const [x, indices] = saved; - const {axis} = attrs; + const { axis } = attrs; const parsedAxis = parseAxisParam(axis, x.shape)[0]; const derX = () => { const paramsShape = x.shape; @@ -15908,7 +15908,7 @@ var gatherGradConfig = { paramsGrad = transpose(paramsGrad, invertTransposeDims); return paramsGrad; }; - return {x: derX, indices: () => indices}; + return { x: derX, indices: () => indices }; } }; function arrayRange(start, stop) { @@ -15932,31 +15932,31 @@ var greaterEqualGradConfig = { inputsToSave: ["a", "b"], gradFunc: (dy, saved) => { const [a, b] = saved; - return {a: () => zerosLike(a), b: () => zerosLike(b)}; + return { a: () => zerosLike(a), b: () => zerosLike(b) }; } }; var identityGradConfig = { kernelName: Identity, gradFunc: (dy) => { - return {x: () => cast(dy, "float32")}; + return { x: () => cast(dy, "float32") }; } }; var isFiniteGradConfig = { kernelName: IsFinite, gradFunc: (dy) => { - return {x: () => zerosLike(dy)}; + return { x: () => zerosLike(dy) }; } }; var isInfGradConfig = { kernelName: IsInf, gradFunc: (dy) => { - return {x: () => zerosLike(dy)}; + return { x: () => zerosLike(dy) }; } }; var isNanGradConfig = { kernelName: IsNan, gradFunc: (dy) => { - return {x: () => zerosLike(dy)}; + return { x: () => zerosLike(dy) }; } }; var leakyReluGradConfig = { @@ -15964,9 +15964,9 @@ var leakyReluGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved, attrs) => { const [x] = saved; - const {alpha} = attrs; + const { alpha } = attrs; const mask = greater(x, 0); - return {x: () => where(mask, dy, mul(dy, alpha))}; + return { x: () => where(mask, dy, mul(dy, alpha)) }; } }; var log1pGradConfig = { @@ -15974,7 +15974,7 @@ var log1pGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => div(dy, add2(x, 1))}; + return { x: () => div(dy, add2(x, 1)) }; } }; var logGradConfig = { @@ -15982,7 +15982,7 @@ var logGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => div(dy, cast(x, "float32"))}; + return { x: () => div(dy, cast(x, "float32")) }; } }; var logSoftmaxGradConfig = { @@ -15991,7 +15991,7 @@ var logSoftmaxGradConfig = { outputsToSave: [true], gradFunc: (dy, saved, attrs) => { const [value] = saved; - const {axis} = attrs; + const { axis } = attrs; return { logits: () => { const keepDims = true; @@ -16002,18 +16002,18 @@ var logSoftmaxGradConfig = { } }; function localResponseNormalizationBackprop_(x, y, dy, depthRadius = 5, bias = 1, alpha = 1, beta = 0.5) { - const inputs = {x, y, dy}; - const attrs = {depthRadius, bias, alpha, beta}; + const inputs = { x, y, dy }; + const attrs = { depthRadius, bias, alpha, beta }; return ENGINE.runKernel(LRNGrad, inputs, attrs); } -var localResponseNormalizationBackprop = op({localResponseNormalizationBackprop_}); +var localResponseNormalizationBackprop = op({ localResponseNormalizationBackprop_ }); var lrnGradConfig = { kernelName: LRN, inputsToSave: ["x"], outputsToSave: [true], gradFunc: (dy, saved, attrs) => { const [x, y] = saved; - const {depthRadius, bias, alpha, beta} = attrs; + const { depthRadius, bias, alpha, beta } = attrs; return { x: () => localResponseNormalizationBackprop(x, y, dy, depthRadius, bias, alpha, beta) }; @@ -16039,7 +16039,7 @@ var maxGradConfig = { outputsToSave: [true], gradFunc: (dy, saved, attrs) => { const maxAttrs = attrs; - const {reductionIndices} = maxAttrs; + const { reductionIndices } = maxAttrs; const x = saved[0]; const y = saved[1]; const origAxes = parseAxisParam(reductionIndices, x.shape); @@ -16058,7 +16058,7 @@ var maximumGradConfig = { const [a, b] = saved; const derA = () => mul(dy, cast(greaterEqual(a, b), "float32")); const derB = () => mul(dy, cast(less(a, b), "float32")); - return {a: derA, b: derB}; + return { a: derA, b: derB }; } }; function maxPool3dGrad_(dy, input2, output, filterSize, strides, pad3, dimRoundingMode) { @@ -16093,22 +16093,22 @@ function maxPool3dGrad_(dy, input2, output, filterSize, strides, pad3, dimRoundi if (dimRoundingMode != null) { assert(isInt(pad3), () => `Error in maxPool3dGrad: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad3}.`); } - const inputs = {dy: dy5D, input: input5D, output: output5D}; - const attrs = {filterSize, strides, pad: pad3, dimRoundingMode}; + const inputs = { dy: dy5D, input: input5D, output: output5D }; + const attrs = { filterSize, strides, pad: pad3, dimRoundingMode }; const res = ENGINE.runKernel(MaxPool3DGrad, inputs, attrs); if (reshapedTo5D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); } return res; } -var maxPool3dGrad = op({maxPool3dGrad_}); +var maxPool3dGrad = op({ maxPool3dGrad_ }); var maxPool3DGradConfig = { kernelName: MaxPool3D, inputsToSave: ["x"], outputsToSave: [true], gradFunc: (dy, saved, attrs) => { const [x, y] = saved; - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; return { x: () => maxPool3dGrad(dy, x, y, filterSize, strides, pad3, dimRoundingMode) }; @@ -16124,18 +16124,18 @@ function maxPoolGrad_(dy, input2, output, filterSize, strides, pad3, dimRounding if (dimRoundingMode != null) { assert(isInt(pad3), () => `Error in maxPoolGrad: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad3}.`); } - const inputs = {dy: $dy, input: $input, output: $output}; - const attrs = {filterSize, strides, pad: pad3, dimRoundingMode}; + const inputs = { dy: $dy, input: $input, output: $output }; + const attrs = { filterSize, strides, pad: pad3, dimRoundingMode }; return ENGINE.runKernel(MaxPoolGrad, inputs, attrs); } -var maxPoolGrad = op({maxPoolGrad_}); +var maxPoolGrad = op({ maxPoolGrad_ }); var maxPoolGradConfig = { kernelName: MaxPool, inputsToSave: ["x"], outputsToSave: [true], gradFunc: (dy, saved, attrs) => { const [x, y] = saved; - const {filterSize, strides, pad: pad3} = attrs; + const { filterSize, strides, pad: pad3 } = attrs; return { x: () => maxPoolGrad(dy, x, y, filterSize, strides, pad3) }; @@ -16146,7 +16146,7 @@ var meanGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved, attrs) => { const [x] = saved; - const {axis} = attrs; + const { axis } = attrs; const axes = parseAxisParam(axis, x.shape); const shapes = computeOutAndReduceShapes(x.shape, axes); const reduceShape = shapes[1]; @@ -16160,7 +16160,7 @@ var meanGradConfig = { const res = div(mul(expandedDy, ones2(x.shape, "float32")), reduceSize); return res; }; - return {x: derX}; + return { x: derX }; } }; var minGradConfig = { @@ -16169,7 +16169,7 @@ var minGradConfig = { outputsToSave: [true], gradFunc: (dy, saved, attrs) => { const minAttrs = attrs; - const {axis} = minAttrs; + const { axis } = minAttrs; const [x, y] = saved; const origAxes = parseAxisParam(axis, x.shape); const minGrad = gradForMinAndMax(dy, y, x, origAxes); @@ -16187,7 +16187,7 @@ var minimumGradConfig = { const [a, b] = saved; const derA = () => mul(dy, cast(lessEqual(a, b), "float32")); const derB = () => mul(dy, cast(greater(a, b), "float32")); - return {a: derA, b: derB}; + return { a: derA, b: derB }; } }; var mirrorPadGradConfig = { @@ -16195,9 +16195,9 @@ var mirrorPadGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved, attrs) => { const x = saved[0]; - const {paddings} = attrs; + const { paddings } = attrs; const begin = paddings.map((p2) => p2[0]); - return {x: () => slice(dy, begin, x.shape)}; + return { x: () => slice(dy, begin, x.shape) }; } }; var modGradConfig = { @@ -16221,7 +16221,7 @@ var modGradConfig = { } return res; }; - return {a: derA, b: derB}; + return { a: derA, b: derB }; } }; var multiplyGradConfig = { @@ -16246,13 +16246,13 @@ var multiplyGradConfig = { } return res; }; - return {a: derA, b: derB}; + return { a: derA, b: derB }; } }; var negGradConfig = { kernelName: Neg, gradFunc: (dy) => { - return {x: () => neg(dy)}; + return { x: () => neg(dy) }; } }; var oneHotGradConfig = { @@ -16260,20 +16260,20 @@ var oneHotGradConfig = { inputsToSave: ["indices"], gradFunc: (dy, saved) => { const indices = saved[0]; - return {indices: () => zeros(indices.shape, "float32")}; + return { indices: () => zeros(indices.shape, "float32") }; } }; var onesLikeGradConfig = { kernelName: OnesLike, gradFunc: (dy) => { - return {x: () => zerosLike(dy)}; + return { x: () => zerosLike(dy) }; } }; var packGradConfig = { kernelName: Pack, saveAllInputs: true, gradFunc: (dy, saved, attrs) => { - const {axis} = attrs; + const { axis } = attrs; const derTensors = unstack(dy, axis); return derTensors.map((t) => () => t); } @@ -16283,9 +16283,9 @@ var padV2GradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved, attrs) => { const x = saved[0]; - const {paddings} = attrs; + const { paddings } = attrs; const begin = paddings.map((p2) => p2[0]); - return {x: () => slice(dy, begin, x.shape)}; + return { x: () => slice(dy, begin, x.shape) }; } }; var powGradConfig = { @@ -16316,7 +16316,7 @@ var powGradConfig = { } return reshape(res, exp4.shape); }; - return {a: derBase, b: derExp}; + return { a: derBase, b: derExp }; } }; var preluGradConfig = { @@ -16361,7 +16361,7 @@ var divGradConfig = { const tmp = square(b); return neg(div(res, cast(tmp, "float32"))); }; - return {a: derA, b: derB}; + return { a: derA, b: derB }; } }; var reciprocalGradConfig = { @@ -16369,7 +16369,7 @@ var reciprocalGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => div(dy, neg(square(x)))}; + return { x: () => div(dy, neg(square(x))) }; } }; var relu6GradConfig = { @@ -16378,7 +16378,7 @@ var relu6GradConfig = { gradFunc: (dy, saved) => { const [x] = saved; const mask = mul(lessEqual(x, 6), step(x)); - return {x: () => mul(dy, cast(mask, "float32"))}; + return { x: () => mul(dy, cast(mask, "float32")) }; } }; var reluGradConfig = { @@ -16386,7 +16386,7 @@ var reluGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => mul(dy, cast(step(x), "float32"))}; + return { x: () => mul(dy, cast(step(x), "float32")) }; } }; var reshapeGradConfig = { @@ -16394,7 +16394,7 @@ var reshapeGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => reshape(dy, x.shape)}; + return { x: () => reshape(dy, x.shape) }; } }; var resizeBilinearGradConfig = { @@ -16402,9 +16402,9 @@ var resizeBilinearGradConfig = { inputsToSave: ["images"], gradFunc: (dy, saved, attrs) => { const [images] = saved; - const inputs = {dy, images}; + const inputs = { dy, images }; const imagesDer = () => ENGINE.runKernel(ResizeBilinearGrad, inputs, attrs); - return {images: imagesDer}; + return { images: imagesDer }; } }; var resizeNearestNeighborGradConfig = { @@ -16412,23 +16412,23 @@ var resizeNearestNeighborGradConfig = { inputsToSave: ["images"], gradFunc: (dy, saved, attrs) => { const [images] = saved; - const inputs = {dy, images}; + const inputs = { dy, images }; const imagesDer = () => ENGINE.runKernel(ResizeNearestNeighborGrad, inputs, attrs); - return {images: imagesDer}; + return { images: imagesDer }; } }; var reverseGradConfig = { kernelName: Reverse, gradFunc: (dy, saved, attrs) => { - const {dims} = attrs; + const { dims } = attrs; const axes = parseAxisParam(dims, dy.shape); - return {x: () => reverse(dy, axes)}; + return { x: () => reverse(dy, axes) }; } }; var roundGradConfig = { kernelName: Round, gradFunc: (dy) => { - return {x: () => zerosLike(dy)}; + return { x: () => zerosLike(dy) }; } }; var rsqrtGradConfig = { @@ -16436,7 +16436,7 @@ var rsqrtGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => neg(div(dy, mul(pow(x, 1.5), 2)))}; + return { x: () => neg(div(dy, mul(pow(x, 1.5), 2))) }; } }; var selectGradConfig = { @@ -16473,13 +16473,13 @@ var sigmoidGradConfig = { outputsToSave: [true], gradFunc: (dy, saved) => { const [y] = saved; - return {x: () => mul(dy, mul(y, sub(scalar(1), y)))}; + return { x: () => mul(dy, mul(y, sub(scalar(1), y))) }; } }; var signGradConfig = { kernelName: Sign, gradFunc: (dy) => { - return {x: () => zerosLike(dy)}; + return { x: () => zerosLike(dy) }; } }; var sinGradConfig = { @@ -16487,7 +16487,7 @@ var sinGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => mul(cos(cast(x, "float32")), dy)}; + return { x: () => mul(cos(cast(x, "float32")), dy) }; } }; var sinhGradConfig = { @@ -16495,7 +16495,7 @@ var sinhGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => mul(cosh(cast(x, "float32")), dy)}; + return { x: () => mul(cosh(cast(x, "float32")), dy) }; } }; var sliceGradConfig = { @@ -16503,14 +16503,14 @@ var sliceGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved, attrs) => { const [x] = saved; - const {begin, size} = attrs; + const { begin, size } = attrs; const inputShape = x.shape; const [begin_, size_] = parseSliceParams(x, begin, size); const paddings = []; for (let i = 0; i < dy.rank; i++) { paddings.push([begin_[i], inputShape[i] - begin_[i] - size_[i]]); } - return {x: () => pad(dy, paddings)}; + return { x: () => pad(dy, paddings) }; } }; var softmaxGradConfig = { @@ -16518,7 +16518,7 @@ var softmaxGradConfig = { outputsToSave: [true], gradFunc: (dy, saved, attrs) => { const [y] = saved; - const {dim} = attrs; + const { dim } = attrs; const keepDims = true; const dyTimesY = mul(dy, y); return { @@ -16531,21 +16531,21 @@ var softplusGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => mul(dy, sigmoid(x))}; + return { x: () => mul(dy, sigmoid(x)) }; } }; var spaceToBatchNDGradConfig = { kernelName: SpaceToBatchND, gradFunc: (dy, saved, attrs) => { - const {blockShape, paddings} = attrs; - return {x: () => batchToSpaceND(dy, blockShape, paddings)}; + const { blockShape, paddings } = attrs; + return { x: () => batchToSpaceND(dy, blockShape, paddings) }; } }; var splitVGradConfig = { kernelName: SplitV, gradFunc: (dy, saved, attrs) => { - const {axis} = attrs; - return {x: () => concat(dy, axis)}; + const { axis } = attrs; + return { x: () => concat(dy, axis) }; } }; var sqrtGradConfig = { @@ -16553,7 +16553,7 @@ var sqrtGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => div(dy, mul(sqrt(cast(x, "float32")), 2))}; + return { x: () => div(dy, mul(sqrt(cast(x, "float32")), 2)) }; } }; var squareGradConfig = { @@ -16561,7 +16561,7 @@ var squareGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => mul(dy, mul(cast(x, "float32"), 2))}; + return { x: () => mul(dy, mul(cast(x, "float32"), 2)) }; } }; var squaredDifferenceGradConfig = { @@ -16572,13 +16572,13 @@ var squaredDifferenceGradConfig = { const two = scalar(2); const derA = () => mul(dy, mul(two, sub(a, b))); const derB = () => mul(dy, mul(two, sub(b, a))); - return {a: derA, b: derB}; + return { a: derA, b: derB }; } }; var stepGradConfig = { kernelName: Step, gradFunc: (dy) => { - return {x: () => zerosLike(dy)}; + return { x: () => zerosLike(dy) }; } }; var subGradConfig = { @@ -16603,7 +16603,7 @@ var subGradConfig = { } return reshape(neg(res), b.shape); }; - return {a: derA, b: derB}; + return { a: derA, b: derB }; } }; var sumGradConfig = { @@ -16612,14 +16612,14 @@ var sumGradConfig = { gradFunc: (dy, saved, attrs) => { const [x] = saved; const expandedDyShape = x.shape.slice(); - const {axis} = attrs; + const { axis } = attrs; const axes = parseAxisParam(axis, x.shape); axes.forEach((axis2) => { expandedDyShape[axis2] = 1; }); const expandedDy = reshape(dy, expandedDyShape); const derX = mul(expandedDy, ones2(x.shape, "float32")); - return {x: () => derX}; + return { x: () => derX }; } }; var tanGradConfig = { @@ -16627,7 +16627,7 @@ var tanGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved) => { const [x] = saved; - return {x: () => div(dy, square(cos(x)))}; + return { x: () => div(dy, square(cos(x))) }; } }; var tanhGradConfig = { @@ -16635,7 +16635,7 @@ var tanhGradConfig = { outputsToSave: [true], gradFunc: (dy, saved) => { const [y] = saved; - return {x: () => mul(sub(scalar(1), square(y)), dy)}; + return { x: () => mul(sub(scalar(1), square(y)), dy) }; } }; var tileGradConfig = { @@ -16643,7 +16643,7 @@ var tileGradConfig = { inputsToSave: ["x"], gradFunc: (dy, saved, attrs) => { const [x] = saved; - const {reps} = attrs; + const { reps } = attrs; const derX = () => { let xGrad = zerosLike(x); if (x.rank === 1) { @@ -16687,24 +16687,24 @@ var tileGradConfig = { } return xGrad; }; - return {x: derX}; + return { x: derX }; } }; var transposeGradConfig = { kernelName: Transpose, gradFunc: (dy, saved, attrs) => { const transposeAttrs = attrs; - const {perm} = transposeAttrs; + const { perm } = transposeAttrs; const undoPerm = getUndoAxesPermutation(perm); - return {x: () => transpose(dy, undoPerm)}; + return { x: () => transpose(dy, undoPerm) }; } }; var unpackGradConfig = { kernelName: Unpack, gradFunc: (dy, saved, attrs) => { const unpackAttrs = attrs; - const {axis} = unpackAttrs; - return {value: () => stack(dy, axis)}; + const { axis } = unpackAttrs; + return { value: () => stack(dy, axis) }; } }; var unsortedSegmentSumGradConfig = { @@ -16715,7 +16715,7 @@ var unsortedSegmentSumGradConfig = { const derX = () => { return gatherDropNegatives(dy, segmentIds); }; - return {x: derX}; + return { x: derX }; } }; function gatherDropNegatives(x, indices) { @@ -16733,7 +16733,7 @@ function gatherDropNegatives(x, indices) { var zerosLikeGradConfig = { kernelName: ZerosLike, gradFunc: (dy) => { - return {x: () => zerosLike(dy)}; + return { x: () => zerosLike(dy) }; } }; var gradConfigs = [ @@ -17699,7 +17699,7 @@ var MaxNorm = class extends Constraint { }); } getConfig() { - return {maxValue: this.maxValue, axis: this.axis}; + return { maxValue: this.maxValue, axis: this.axis }; } }; MaxNorm.className = "MaxNorm"; @@ -17714,7 +17714,7 @@ var UnitNorm = class extends Constraint { return tidy(() => div(w, add2(epsilon(), calcL2Norms(w, this.axis)))); } getConfig() { - return {axis: this.axis}; + return { axis: this.axis }; } }; UnitNorm.className = "UnitNorm"; @@ -17774,7 +17774,7 @@ function getConstraint(identifier) { } if (typeof identifier === "string") { const className = identifier in CONSTRAINT_IDENTIFIER_REGISTRY_SYMBOL_MAP ? CONSTRAINT_IDENTIFIER_REGISTRY_SYMBOL_MAP[identifier] : identifier; - const config = {className, config: {}}; + const config = { className, config: {} }; return deserializeConstraint(config); } else if (identifier instanceof Constraint) { return identifier; @@ -18107,7 +18107,7 @@ function dot2(a, b, activation2, bias) { const bLastDim = bShape.pop(); const ySecondLastDim = bShape.pop(); const yOtherDims = [...bShape, bLastDim]; - const perm = Array.from({length: b.rank}, (_, i) => { + const perm = Array.from({ length: b.rank }, (_, i) => { if (i === 0) { return b.rank - 2; } else if (i <= b.rank - 2) { @@ -18289,7 +18289,7 @@ var RandomUniform = class extends Initializer { return randomUniform(shape, this.minval, this.maxval, dtype); } getConfig() { - return {minval: this.minval, maxval: this.maxval, seed: this.seed}; + return { minval: this.minval, maxval: this.maxval, seed: this.seed }; } }; RandomUniform.className = "RandomUniform"; @@ -18311,7 +18311,7 @@ var RandomNormal = class extends Initializer { return randomNormal2(shape, this.mean, this.stddev, dtype, this.seed); } getConfig() { - return {mean: this.mean, stddev: this.stddev, seed: this.seed}; + return { mean: this.mean, stddev: this.stddev, seed: this.seed }; } }; RandomNormal.className = "RandomNormal"; @@ -18333,7 +18333,7 @@ var TruncatedNormal = class extends Initializer { return truncatedNormal(shape, this.mean, this.stddev, dtype, this.seed); } getConfig() { - return {mean: this.mean, stddev: this.stddev, seed: this.seed}; + return { mean: this.mean, stddev: this.stddev, seed: this.seed }; } }; TruncatedNormal.className = "TruncatedNormal"; @@ -18353,7 +18353,7 @@ var Identity2 = class extends Initializer { }); } getConfig() { - return {gain: this.gain}; + return { gain: this.gain }; } }; Identity2.className = "Identity"; @@ -19372,7 +19372,7 @@ var Layer = class extends serialization_exports.Serializable { } } getConfig() { - const config = {name: this.name, trainable: this.trainable}; + const config = { name: this.name, trainable: this.trainable }; if (this.batchInputShape != null) { config["batchInputShape"] = this.batchInputShape; } @@ -19402,7 +19402,7 @@ var Layer = class extends serialization_exports.Serializable { if (--this._refCount === 0) { numDisposedVariables = this.disposeWeights(); } - return {refCountAfterDispose: this._refCount, numDisposedVariables}; + return { refCountAfterDispose: this._refCount, numDisposedVariables }; } }; function collectInputShape(inputTensors) { @@ -19477,7 +19477,7 @@ var InputLayer = class extends Layer { const dtype = args.dtype || "float32"; this.batchInputShape = batchInputShape; this.dtype = dtype; - this.inputSpec = [{shape: batchInputShape}]; + this.inputSpec = [{ shape: batchInputShape }]; const inputTensor = new SymbolicTensor(this.dtype, this.batchInputShape, this, [], {}, this.name); inputTensor.nodeIndex = 0; inputTensor.tensorIndex = 0; @@ -19498,7 +19498,7 @@ var InputLayer = class extends Layer { throw new ValueError(`Cannot pass any input to an InputLayer's apply() method. InputLayer name: ${this.name}`); } dispose() { - return {refCountAfterDispose: this._refCount, numDisposedVariables: 0}; + return { refCountAfterDispose: this._refCount, numDisposedVariables: 0 }; } getConfig() { return { @@ -19915,7 +19915,7 @@ function configureCallbacks(callbacks2, verbose, epochs, initialEpoch, numTrainS doValidation, metrics: callbackMetrics }); - return {callbackList, history}; + return { callbackList, history }; } function deserialize(config, customObjects = {}, fastWeightInit = false) { return deserializeKerasObject(config, serialization_exports.SerializationMap.getMap().classNameMap, customObjects, "layer", fastWeightInit); @@ -20655,7 +20655,7 @@ function getTopologicalSortAndRecipientCounts(fetches, feedDict) { } else { const visited = new Set(); for (const fetch3 of fetches) { - const {sorted, recipientMap} = getTopologicalSortAndRecipientCountsForOneFetch(fetch3, feedDict); + const { sorted, recipientMap } = getTopologicalSortAndRecipientCountsForOneFetch(fetch3, feedDict); for (const symbolicTensor of sorted) { if (!visited.has(symbolicTensor.name)) { finalSorted.push(symbolicTensor); @@ -20720,7 +20720,7 @@ function getTopologicalSortAndRecipientCountsForOneFetch(fetch3, feedDict) { } } } - return {sorted, recipientMap}; + return { sorted, recipientMap }; } function getNodeOutputs(fetch3) { let layerOutputs; @@ -20968,7 +20968,7 @@ var Container = class extends Layer { } dispose() { this.assertNotDisposed(); - const result = {refCountAfterDispose: null, numDisposedVariables: 0}; + const result = { refCountAfterDispose: null, numDisposedVariables: 0 }; if (--this._refCount === 0) { for (const layer of this.layers) { result.numDisposedVariables += layer.dispose().numDisposedVariables; @@ -21272,7 +21272,7 @@ var Container = class extends Layer { }); } getConfig() { - const config = {name: this.name}; + const config = { name: this.name }; const nodeConversionMap = this.buildNodeConversionMap(this.layers); const layerConfigs = []; for (const layer of this.layers) { @@ -21438,7 +21438,7 @@ var Container = class extends Layer { const layerOutputTensors = layer.inboundNodes[nodeIndex].outputTensors; outputTensors.push(layerOutputTensors[tensorIndex]); } - return new cls({inputs: inputTensors, outputs: outputTensors, name}); + return new cls({ inputs: inputTensors, outputs: outputTensors, name }); } get stateful() { if (this._stateful) { @@ -21555,7 +21555,7 @@ function standardizeDataIteratorOutput(model2, iteratorOut) { for (let yIndex = 0; yIndex < flattenedYs.length; yIndex++) { util_exports.assert(flattenedYs[yIndex].shape[0] === batchSize, () => `Batch size mismatch: output ${model2.outputNames[yIndex]} has ${flattenedYs[yIndex].shape[0]}; expected ${batchSize} based on input ${model2.inputNames[0]}.`); } - return {xs: flattenedXs, ys: flattenedYs}; + return { xs: flattenedXs, ys: flattenedYs }; } function flattenTensorOrArrayOrMap(inputOrOutput, names, values) { if (values instanceof Tensor) { @@ -21578,7 +21578,7 @@ function standardizeTensorValidationData(data) { if (data.length === 3) { throw new NotImplementedError("Validation with sample weights is not implemented yet."); } - return {xs: data[0], ys: data[1]}; + return { xs: data[0], ys: data[1] }; } async function fitDataset(model2, dataset, args) { const hasBatchesPerEpoch = args.batchesPerEpoch != null; @@ -21614,7 +21614,7 @@ async function fitDataset(model2, dataset, args) { } const callbacks2 = standardizeCallbacks(args.callbacks, args.yieldEvery); const verbose = args.verbose == null ? 1 : args.verbose; - const {callbackList, history} = configureCallbacks(callbacks2, verbose, args.epochs, null, null, getStepsPerEpoch(dataset, args), null, doValidation, callbackMetrics); + const { callbackList, history } = configureCallbacks(callbacks2, verbose, args.epochs, null, null, getStepsPerEpoch(dataset, args), null, doValidation, callbackMetrics); callbackList.setModel(model2); model2.history = history; await callbackList.onTrainBegin(); @@ -21636,7 +21636,7 @@ async function fitDataset(model2, dataset, args) { break; } if (iteratorOut.value != null) { - const {xs, ys} = standardizeDataIteratorOutput(model2, iteratorOut.value); + const { xs, ys } = standardizeDataIteratorOutput(model2, iteratorOut.value); const batchLogs = {}; batchLogs["batch"] = batchIndex; batchLogs["size"] = xs[0].shape[0]; @@ -21666,7 +21666,7 @@ async function fitDataset(model2, dataset, args) { if (doValidation) { let valOuts; if (isDatasetObject(args.validationData)) { - valOuts = toList(await model2.evaluateDataset(args.validationData, {batches: args.validationBatches})); + valOuts = toList(await model2.evaluateDataset(args.validationData, { batches: args.validationBatches })); } else { valOuts = toList(model2.evaluate(valXs, valYs, { batchSize: args.validationBatchSize == null ? DEFAULT_VALIDATION_BATCH_SIZE : args.validationBatchSize, @@ -21727,7 +21727,7 @@ async function evaluateDataset(model2, dataset, args) { const iteratorOut = await dataIterator.next(); outs = tidy(() => { if (iteratorOut.value) { - const {xs, ys} = standardizeDataIteratorOutput(model2, iteratorOut.value); + const { xs, ys } = standardizeDataIteratorOutput(model2, iteratorOut.value); const xsAndYs = xs.concat(ys); const batchOuts = tidy(() => f(xsAndYs)); dispose(xsAndYs); @@ -21833,7 +21833,7 @@ async function fitLoop(model2, f, ins, outLabels, batchSize, epochs, verbose, ca if (verbose == null) { verbose = 1; } - const {callbackList, history} = configureCallbacks(callbacks2, verbose, epochs, initialEpoch, numTrainSamples, stepsPerEpoch, batchSize, doValidation, callbackMetrics); + const { callbackList, history } = configureCallbacks(callbacks2, verbose, epochs, initialEpoch, numTrainSamples, stepsPerEpoch, batchSize, doValidation, callbackMetrics); callbackList.setModel(model2); model2.history = history; await callbackList.onTrainBegin(); @@ -22491,10 +22491,10 @@ var LayersModel = class extends Container { const feeds = []; if (Array.isArray(insBatch)) { for (let i = 0; i < insBatch.length; ++i) { - feeds.push({key: this.inputs[i], value: insBatch[i]}); + feeds.push({ key: this.inputs[i], value: insBatch[i] }); } } else { - feeds.push({key: this.inputs[0], value: insBatch}); + feeds.push({ key: this.inputs[0], value: insBatch }); } const feedDict = new FeedDict(feeds); return execute(this.outputs, feedDict); @@ -22619,10 +22619,10 @@ var LayersModel = class extends Container { const totalLossFunction = () => { const feeds = []; for (let i = 0; i < this.inputs.length; ++i) { - feeds.push({key: this.inputs[i], value: inputs[i]}); + feeds.push({ key: this.inputs[i], value: inputs[i] }); } const feedDict = new FeedDict(feeds); - const outputs = execute(this.outputs, feedDict, {"training": true}); + const outputs = execute(this.outputs, feedDict, { "training": true }); let totalLoss; for (let i = 0; i < this.lossFunctions.length; ++i) { const lossFunction = this.lossFunctions[i]; @@ -22671,7 +22671,7 @@ var LayersModel = class extends Container { const targets = data.slice(this.inputs.length, this.inputs.length + this.outputs.length); const feeds = []; for (let i = 0; i < this.inputs.length; ++i) { - feeds.push({key: this.inputs[i], value: inputs[i]}); + feeds.push({ key: this.inputs[i], value: inputs[i] }); } const feedDict = new FeedDict(feeds); const outputs = execute(this.outputs, feedDict); @@ -22724,7 +22724,7 @@ var LayersModel = class extends Container { if (trainableOnly && !weights[i].trainable) { continue; } - namedWeights.push({name: weights[i].originalName, tensor: weightValues[i]}); + namedWeights.push({ name: weights[i].originalName, tensor: weightValues[i] }); } return namedWeights; } @@ -22832,7 +22832,7 @@ var LayersModel = class extends Container { metrics2[key] = toCamelCase(trainingConfig.metrics[key]); } } - this.compile({loss, metrics: metrics2, optimizer}); + this.compile({ loss, metrics: metrics2, optimizer }); } async save(handlerOrURL, config) { if (typeof handlerOrURL === "string") { @@ -22861,7 +22861,7 @@ var LayersModel = class extends Container { if (includeOptimizer && this.optimizer != null) { modelArtifacts.trainingConfig = this.getTrainingConfig(); const weightType = "optimizer"; - const {data: optimizerWeightData, specs: optimizerWeightSpecs} = await io_exports.encodeWeights(await this.optimizer.getWeights(), weightType); + const { data: optimizerWeightData, specs: optimizerWeightSpecs } = await io_exports.encodeWeights(await this.optimizer.getWeights(), weightType); weightDataAndSpecs.specs.push(...optimizerWeightSpecs); weightDataAndSpecs.data = io_exports.concatenateArrayBuffers([weightDataAndSpecs.data, optimizerWeightData]); } @@ -22890,7 +22890,7 @@ Functional.className = "Functional"; serialization_exports.registerClass(Functional); async function modelFromJSON(modelAndWeightsConfig, customObjects) { if (!("modelTopology" in modelAndWeightsConfig)) { - modelAndWeightsConfig = {modelTopology: modelAndWeightsConfig}; + modelAndWeightsConfig = { modelTopology: modelAndWeightsConfig }; } modelAndWeightsConfig = modelAndWeightsConfig; let modelTopology = modelAndWeightsConfig.modelTopology; @@ -22951,7 +22951,7 @@ async function loadLayersModelFromIOHandler(handler, customObjects, options) { if (artifacts.weightSpecs == null) { throw new ValueError("LayersModel artifacts contains weight data, but not weight specs. Therefore loading of weights cannot proceed."); } - const {modelWeights, optimizerWeights} = decodeModelAndOptimizerWeights(artifacts.weightData, artifacts.weightSpecs); + const { modelWeights, optimizerWeights } = decodeModelAndOptimizerWeights(artifacts.weightData, artifacts.weightSpecs); model2.loadWeights(modelWeights, strict); if (model2.optimizer != null && optimizerWeights.length > 0) { await model2.optimizer.setWeights(optimizerWeights); @@ -22967,16 +22967,16 @@ function decodeModelAndOptimizerWeights(buffer2, specs) { const optimizerWeights = []; specs.forEach((spec) => { if (spec.group === "optimizer") { - optimizerWeights.push({name: spec.name, tensor: name2Tensor[spec.name]}); + optimizerWeights.push({ name: spec.name, tensor: name2Tensor[spec.name] }); } else { modelWeights[spec.name] = name2Tensor[spec.name]; } }); - return {modelWeights, optimizerWeights}; + return { modelWeights, optimizerWeights }; } var Sequential = class extends LayersModel { constructor(args) { - super({inputs: [], outputs: []}); + super({ inputs: [], outputs: [] }); args = args || {}; this.trainable = true; this.built = false; @@ -23225,7 +23225,7 @@ var Sequential = class extends LayersModel { dict["config"] = layer.getConfig(); layers.push(dict); } - return {name: this.name, layers}; + return { name: this.name, layers }; } }; Sequential.className = "Sequential"; @@ -23404,21 +23404,21 @@ var L1L2 = class extends Regularizer { }); } getConfig() { - return {"l1": this.l1, "l2": this.l2}; + return { "l1": this.l1, "l2": this.l2 }; } static fromConfig(cls, config) { - return new cls({l1: config["l1"], l2: config["l2"]}); + return new cls({ l1: config["l1"], l2: config["l2"] }); } }; L1L2.className = "L1L2"; serialization_exports.registerClass(L1L2); function l1(args) { assertObjectArgs(args); - return new L1L2({l1: args != null ? args.l1 : null, l2: 0}); + return new L1L2({ l1: args != null ? args.l1 : null, l2: 0 }); } function l2(args) { assertObjectArgs(args); - return new L1L2({l2: args != null ? args.l2 : null, l1: 0}); + return new L1L2({ l2: args != null ? args.l2 : null, l1: 0 }); } var REGULARIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP = { "l1l2": "L1L2" @@ -23435,7 +23435,7 @@ function getRegularizer(identifier) { } if (typeof identifier === "string") { const className = identifier in REGULARIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP ? REGULARIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP[identifier] : identifier; - const config = {className, config: {}}; + const config = { className, config: {} }; return deserializeRegularizer(config); } else if (identifier instanceof Regularizer) { return identifier; @@ -23463,7 +23463,7 @@ var ReLU = class extends Layer { return inputShape; } getConfig() { - const config = {maxValue: this.maxValue}; + const config = { maxValue: this.maxValue }; const baseConfig = super.getConfig(); Object.assign(config, baseConfig); return config; @@ -23488,7 +23488,7 @@ var LeakyReLU = class extends Layer { return inputShape; } getConfig() { - const config = {alpha: this.alpha}; + const config = { alpha: this.alpha }; const baseConfig = super.getConfig(); Object.assign(config, baseConfig); return config; @@ -23576,7 +23576,7 @@ var ELU = class extends Layer { return inputShape; } getConfig() { - const config = {alpha: this.alpha}; + const config = { alpha: this.alpha }; const baseConfig = super.getConfig(); Object.assign(config, baseConfig); return config; @@ -23601,7 +23601,7 @@ var ThresholdedReLU = class extends Layer { return inputShape; } getConfig() { - const config = {theta: this.theta}; + const config = { theta: this.theta }; const baseConfig = super.getConfig(); Object.assign(config, baseConfig); return config; @@ -23627,7 +23627,7 @@ var Softmax3 = class extends Layer { return inputShape; } getConfig() { - const config = {axis: this.axis}; + const config = { axis: this.axis }; const baseConfig = super.getConfig(); Object.assign(config, baseConfig); return config; @@ -23872,7 +23872,7 @@ var Conv = class extends BaseConv { if (this.useBias) { this.bias = this.addWeight("bias", [this.filters], null, this.biasInitializer, this.biasRegularizer, true, this.biasConstraint); } - this.inputSpec = [{ndim: this.rank + 2, axes: {[channelAxis]: inputDim}}]; + this.inputSpec = [{ ndim: this.rank + 2, axes: { [channelAxis]: inputDim } }]; this.built = true; } call(inputs, kwargs) { @@ -23976,7 +23976,7 @@ serialization_exports.registerClass(Conv3D2); var Conv2DTranspose = class extends Conv2D2 { constructor(args) { super(args); - this.inputSpec = [new InputSpec({ndim: 4})]; + this.inputSpec = [new InputSpec({ ndim: 4 })]; if (this.padding !== "same" && this.padding !== "valid") { throw new ValueError(`Conv2DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`); } @@ -23996,7 +23996,7 @@ var Conv2DTranspose = class extends Conv2D2 { if (this.useBias) { this.bias = this.addWeight("bias", [this.filters], "float32", this.biasInitializer, this.biasRegularizer, true, this.biasConstraint); } - this.inputSpec = [new InputSpec({ndim: 4, axes: {[channelAxis]: inputDim}})]; + this.inputSpec = [new InputSpec({ ndim: 4, axes: { [channelAxis]: inputDim } })]; this.built = true; } call(inputs, kwargs) { @@ -24076,7 +24076,7 @@ serialization_exports.registerClass(Conv2DTranspose); var Conv3DTranspose = class extends Conv3D2 { constructor(args) { super(args); - this.inputSpec = [new InputSpec({ndim: 5})]; + this.inputSpec = [new InputSpec({ ndim: 5 })]; if (this.padding !== "same" && this.padding !== "valid") { throw new ValueError(`Conv3DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`); } @@ -24096,7 +24096,7 @@ var Conv3DTranspose = class extends Conv3D2 { if (this.useBias) { this.bias = this.addWeight("bias", [this.filters], "float32", this.biasInitializer, this.biasRegularizer, true, this.biasConstraint); } - this.inputSpec = [new InputSpec({ndim: 5, axes: {[channelAxis]: inputDim}})]; + this.inputSpec = [new InputSpec({ ndim: 5, axes: { [channelAxis]: inputDim } })]; this.built = true; } call(inputs, kwargs) { @@ -24234,7 +24234,7 @@ var SeparableConv = class extends Conv { } else { this.bias = null; } - this.inputSpec = [new InputSpec({ndim: this.rank + 2, axes: {[channelAxis]: inputDim}})]; + this.inputSpec = [new InputSpec({ ndim: this.rank + 2, axes: { [channelAxis]: inputDim } })]; this.built = true; } call(inputs, kwargs) { @@ -24288,7 +24288,7 @@ var Conv1D = class extends Conv { constructor(args) { super(1, args); Conv1D.verifyArgs(args); - this.inputSpec = [{ndim: 3}]; + this.inputSpec = [{ ndim: 3 }]; } getConfig() { const config = super.getConfig(); @@ -24318,7 +24318,7 @@ var Cropping2D = class extends Layer { this.cropping = args.cropping; } this.dataFormat = args.dataFormat === void 0 ? "channelsLast" : args.dataFormat; - this.inputSpec = [{ndim: 4}]; + this.inputSpec = [{ ndim: 4 }]; } computeOutputShape(inputShape) { if (this.dataFormat === "channelsFirst") { @@ -24350,7 +24350,7 @@ var Cropping2D = class extends Layer { }); } getConfig() { - const config = {cropping: this.cropping, dataFormat: this.dataFormat}; + const config = { cropping: this.cropping, dataFormat: this.dataFormat }; const baseConfig = super.getConfig(); Object.assign(config, baseConfig); return config; @@ -24362,7 +24362,7 @@ var UpSampling2D = class extends Layer { constructor(args) { super(args); this.DEFAULT_SIZE = [2, 2]; - this.inputSpec = [{ndim: 4}]; + this.inputSpec = [{ ndim: 4 }]; this.size = args.size == null ? this.DEFAULT_SIZE : args.size; this.dataFormat = args.dataFormat == null ? "channelsLast" : args.dataFormat; checkDataFormat(this.dataFormat); @@ -24398,7 +24398,7 @@ var UpSampling2D = class extends Layer { }); } getConfig() { - const config = {size: this.size, dataFormat: this.dataFormat}; + const config = { size: this.size, dataFormat: this.dataFormat }; const baseConfig = super.getConfig(); Object.assign(config, baseConfig); return config; @@ -24519,7 +24519,7 @@ function standardizeArgs(inputs, initialState, constants, numConstants) { } initialState = toListOrNull(initialState); constants = toListOrNull(constants); - return {inputs, initialState, constants}; + return { inputs, initialState, constants }; } function rnn(stepFunction, inputs, initialStates, goBackwards = false, mask, constants, unroll = false, needPerStepOutputs = false) { return tidy(() => { @@ -24571,7 +24571,7 @@ function rnn(stepFunction, inputs, initialStates, goBackwards = false, mask, con const newStates = states.map((state, i) => { return stepOutputs[1][i].mul(stepMask).add(state.mul(negStepMask)); }); - return {output, newStates}; + return { output, newStates }; }); lastOutput = maskedOutputs.output; states = maskedOutputs.newStates; @@ -24595,7 +24595,7 @@ var RNN = class extends Layer { if (args.cell == null) { throw new ValueError("cell property is missing for the constructor of RNN."); } else if (Array.isArray(args.cell)) { - cell = new StackedRNNCells({cells: args.cell}); + cell = new StackedRNNCells({ cells: args.cell }); } else { cell = args.cell; } @@ -24609,7 +24609,7 @@ var RNN = class extends Layer { this._stateful = args.stateful == null ? false : args.stateful; this.unroll = args.unroll == null ? false : args.unroll; this.supportsMasking = true; - this.inputSpec = [new InputSpec({ndim: 3})]; + this.inputSpec = [new InputSpec({ ndim: 3 })]; this.stateSpec = null; this.states_ = null; this.numConstants = null; @@ -24692,7 +24692,7 @@ var RNN = class extends Layer { inputShape = inputShape; const batchSize = this.stateful ? inputShape[0] : null; const inputDim = inputShape.slice(2); - this.inputSpec[0] = new InputSpec({shape: [batchSize, null, ...inputDim]}); + this.inputSpec[0] = new InputSpec({ shape: [batchSize, null, ...inputDim] }); const stepInputShape = [inputShape[0]].concat(inputShape.slice(2)); if (constantShape != null) { throw new NotImplementedError("Constants support is not implemented in RNN yet."); @@ -24710,7 +24710,7 @@ var RNN = class extends Layer { throw new ValueError(`An initialState was passed that is not compatible with cell.stateSize. Received stateSpec=${this.stateSpec}; However cell.stateSize is ${this.cell.stateSize}`); } } else { - this.stateSpec = stateSize.map((dim) => new InputSpec({shape: [null, dim]})); + this.stateSpec = stateSize.map((dim) => new InputSpec({ shape: [null, dim] })); } if (this.stateful) { this.resetStates(); @@ -24784,7 +24784,7 @@ var RNN = class extends Layer { additionalInputs = additionalInputs.concat(initialState); this.stateSpec = []; for (const state of initialState) { - this.stateSpec.push(new InputSpec({shape: state.shape})); + this.stateSpec.push(new InputSpec({ shape: state.shape })); } additionalSpecs = additionalSpecs.concat(this.stateSpec); } @@ -24826,7 +24826,7 @@ var RNN = class extends Layer { if (this.unroll) { console.warn("Ignoring unroll = true for RNN layer, due to imperative backend."); } - const cellCallKwargs = {training}; + const cellCallKwargs = { training }; const step5 = (inputs2, states2) => { const outputs2 = this.cell.call([inputs2].concat(states2), cellCallKwargs); return [outputs2[0], outputs2.slice(1)]; @@ -24900,7 +24900,7 @@ var RNN = class extends Layer { static fromConfig(cls, config, customObjects = {}) { const cellConfig = config["cell"]; const cell = deserialize(cellConfig, customObjects); - return new cls(Object.assign(config, {cell})); + return new cls(Object.assign(config, { cell })); } }; RNN.className = "RNN"; @@ -25033,7 +25033,7 @@ var SimpleRNN = class extends RNN { const mask = kwargs == null ? null : kwargs["mask"]; const training = kwargs == null ? null : kwargs["training"]; const initialState = kwargs == null ? null : kwargs["initialState"]; - return super.call(inputs, {mask, training, initialState}); + return super.call(inputs, { mask, training, initialState }); }); } static fromConfig(cls, config) { @@ -25190,7 +25190,7 @@ var GRU = class extends RNN { const mask = kwargs == null ? null : kwargs["mask"]; const training = kwargs == null ? null : kwargs["training"]; const initialState = kwargs == null ? null : kwargs["initialState"]; - return super.call(inputs, {mask, training, initialState}); + return super.call(inputs, { mask, training, initialState }); }); } static fromConfig(cls, config) { @@ -25363,7 +25363,7 @@ var LSTM = class extends RNN { const mask = kwargs == null ? null : kwargs["mask"]; const training = kwargs == null ? null : kwargs["training"]; const initialState = kwargs == null ? null : kwargs["initialState"]; - return super.call(inputs, {mask, training, initialState}); + return super.call(inputs, { mask, training, initialState }); }); } static fromConfig(cls, config) { @@ -25452,7 +25452,7 @@ var StackedRNNCells = class extends RNNCell { }; }; const cellConfigs = this.cells.map(getCellConfig); - const config = {"cells": cellConfigs}; + const config = { "cells": cellConfigs }; return Object.assign({}, baseConfig, config); } static fromConfig(cls, config, customObjects = {}) { @@ -25460,7 +25460,7 @@ var StackedRNNCells = class extends RNNCell { for (const cellConfig of config["cells"]) { cells.push(deserialize(cellConfig, customObjects)); } - return new cls({cells}); + return new cls({ cells }); } get trainableWeights() { if (!this.trainable) { @@ -25508,7 +25508,7 @@ var StackedRNNCells = class extends RNNCell { StackedRNNCells.className = "StackedRNNCells"; serialization_exports.registerClass(StackedRNNCells); function generateDropoutMask(args) { - const {ones: ones4, rate, training = false, count: count2 = 1} = args; + const { ones: ones4, rate, training = false, count: count2 = 1 } = args; const droppedInputs = () => dropout2(ones4(), rate); const createMask = () => inTrainPhase(droppedInputs, ones4, training); if (!count2 || count2 <= 1) { @@ -25538,7 +25538,7 @@ var ConvRNN2D = class extends RNN { throw new NotImplementedError("It is not possible at the moment to stack convolutional cells."); } super(args); - this.inputSpec = [new InputSpec({ndim: 5})]; + this.inputSpec = [new InputSpec({ ndim: 5 })]; } call(inputs, kwargs) { return tidy(() => { @@ -25556,7 +25556,7 @@ var ConvRNN2D = class extends RNN { const mask = kwargs == null ? null : kwargs["mask"]; const training = kwargs == null ? null : kwargs["training"]; const initialState = kwargs == null ? null : kwargs["initialState"]; - return super.call(inputs, {mask, training, initialState}); + return super.call(inputs, { mask, training, initialState }); }); } computeOutputShape(inputShape) { @@ -25571,7 +25571,7 @@ var ConvRNN2D = class extends RNN { } getInitialState(inputs) { return tidy(() => { - const {stateSize} = this.cell; + const { stateSize } = this.cell; const inputShape = inputs.shape; const outputShape = this.computeSingleOutputShape(inputShape); const stateShape = [outputShape[0], ...outputShape.slice(2)]; @@ -25636,7 +25636,7 @@ var ConvRNN2D = class extends RNN { }); } computeSingleOutputShape(inputShape) { - const {dataFormat, filters, kernelSize, padding, strides, dilationRate} = this.cell; + const { dataFormat, filters, kernelSize, padding, strides, dilationRate } = this.cell; const isChannelsFirst = dataFormat === "channelsFirst"; const h = inputShape[isChannelsFirst ? 3 : 2]; const w = inputShape[isChannelsFirst ? 4 : 3]; @@ -25652,8 +25652,8 @@ var ConvRNN2D = class extends RNN { ConvRNN2D.className = "ConvRNN2D"; var ConvLSTM2DCell = class extends LSTMCell { constructor(args) { - const {filters, kernelSize, strides, padding, dataFormat, dilationRate} = args; - super(Object.assign({}, args, {units: filters})); + const { filters, kernelSize, strides, padding, dataFormat, dilationRate } = args; + super(Object.assign({}, args, { units: filters })); this.filters = filters; assertPositiveInteger(this.filters, "filters"); this.kernelSize = normalizeArray(kernelSize, 2, "kernelSize"); @@ -25762,7 +25762,7 @@ var ConvLSTM2DCell = class extends LSTMCell { }); } getConfig() { - const _a = super.getConfig(), {"units": _} = _a, baseConfig = __rest(_a, ["units"]); + const _a = super.getConfig(), { "units": _ } = _a, baseConfig = __rest(_a, ["units"]); const config = { filters: this.filters, kernelSize: this.kernelSize, @@ -25790,7 +25790,7 @@ serialization_exports.registerClass(ConvLSTM2DCell); var ConvLSTM2D = class extends ConvRNN2D { constructor(args) { const cell = new ConvLSTM2DCell(args); - super(Object.assign({}, args, {cell})); + super(Object.assign({}, args, { cell })); } static fromConfig(cls, config) { return new cls(config); @@ -25849,7 +25849,7 @@ serialization_exports.registerClass(Dropout); var SpatialDropout1D = class extends Dropout { constructor(args) { super(args); - this.inputSpec = [{ndim: 3}]; + this.inputSpec = [{ ndim: 3 }]; } getNoiseShape(input2) { const inputShape = input2.shape; @@ -25888,7 +25888,7 @@ var Dense = class extends Layer { this.biasRegularizer = getRegularizer(args.biasRegularizer); this.activityRegularizer = getRegularizer(args.activityRegularizer); this.supportsMasking = true; - this.inputSpec = [{minNDim: 2}]; + this.inputSpec = [{ minNDim: 2 }]; } build(inputShape) { inputShape = getExactlyOneShape(inputShape); @@ -25899,7 +25899,7 @@ var Dense = class extends Layer { this.bias = this.addWeight("bias", [this.units], null, this.biasInitializer, this.biasRegularizer, true, this.biasConstraint); } } - this.inputSpec = [{minNDim: 2, axes: {[-1]: inputLastDim}}]; + this.inputSpec = [{ minNDim: 2, axes: { [-1]: inputLastDim } }]; this.built = true; } computeOutputShape(inputShape) { @@ -25952,7 +25952,7 @@ var Flatten = class extends Layer { constructor(args) { args = args || {}; super(args); - this.inputSpec = [{minNDim: 3}]; + this.inputSpec = [{ minNDim: 3 }]; this.dataFormat = args.dataFormat; } computeOutputShape(inputShape) { @@ -26005,7 +26005,7 @@ var Activation2 = class extends Layer { }); } getConfig() { - const config = {activation: serializeActivation(this.activation)}; + const config = { activation: serializeActivation(this.activation) }; const baseConfig = super.getConfig(); Object.assign(config, baseConfig); return config; @@ -26017,7 +26017,7 @@ var RepeatVector = class extends Layer { constructor(args) { super(args); this.n = args.n; - this.inputSpec = [{ndim: 2}]; + this.inputSpec = [{ ndim: 2 }]; } computeOutputShape(inputShape) { return [inputShape[0], this.n, inputShape[1]]; @@ -26129,7 +26129,7 @@ var Permute = class extends Layer { } this.dims = args.dims; this.dimsIncludingBatch = [0].concat(this.dims); - this.inputSpec = [new InputSpec({ndim: this.dims.length + 1})]; + this.inputSpec = [new InputSpec({ ndim: this.dims.length + 1 })]; } computeOutputShape(inputShape) { inputShape = getExactlyOneShape(inputShape); @@ -26168,7 +26168,7 @@ var Masking = class extends Layer { } getConfig() { const baseConfig = super.getConfig(); - const config = {maskValue: this.maskValue}; + const config = { maskValue: this.maskValue }; Object.assign(config, baseConfig); return config; } @@ -26827,7 +26827,7 @@ var GaussianNoise = class extends Layer { } getConfig() { const baseConfig = super.getConfig(); - const config = {stddev: this.stddev}; + const config = { stddev: this.stddev }; Object.assign(config, baseConfig); return config; } @@ -26854,7 +26854,7 @@ var GaussianDropout = class extends Layer { } getConfig() { const baseConfig = super.getConfig(); - const config = {rate: this.rate}; + const config = { rate: this.rate }; Object.assign(config, baseConfig); return config; } @@ -26890,7 +26890,7 @@ var AlphaDropout = class extends Layer { } getConfig() { const baseConfig = super.getConfig(); - const config = {rate: this.rate}; + const config = { rate: this.rate }; Object.assign(config, baseConfig); return config; } @@ -26996,7 +26996,7 @@ var BatchNormalization = class extends Layer { if (dim == null) { throw new ValueError(`Axis ${axis} of input tensor should have a defined dimension but the layer received an input with shape ${JSON.stringify(inputShape)}.`); } - this.inputSpec = [new InputSpec({ndim: inputShape.length, axes: {[axis]: dim}})]; + this.inputSpec = [new InputSpec({ ndim: inputShape.length, axes: { [axis]: dim } })]; const shape = [dim]; if (this.scale) { this.gamma = this.addWeight("gamma", shape, null, this.gammaInitializer, this.gammaRegularizer, true, this.gammaConstraint); @@ -27144,7 +27144,7 @@ var LayerNormalization = class extends Layer { const nDims = inputShape.length; return tidy(() => { const keepDims = true; - let {mean: mean4, variance} = moments(input2, this.axis, keepDims); + let { mean: mean4, variance } = moments(input2, this.axis, keepDims); const broadcastShape = pyListRepeat(1, nDims); for (const dim of this.axis) { broadcastShape[dim] = inputShape[dim]; @@ -27254,7 +27254,7 @@ var ZeroPadding2D = class extends Layer { } this.padding = [heightPadding, widthPadding]; } - this.inputSpec = [new InputSpec({ndim: 4})]; + this.inputSpec = [new InputSpec({ ndim: 4 })]; } computeOutputShape(inputShape) { inputShape = getExactlyOneShape(inputShape); @@ -27391,7 +27391,7 @@ var Pooling1D = class extends Layer { assertPositiveInteger(this.strides, "strides"); this.padding = args.padding == null ? "valid" : args.padding; checkPaddingMode(this.padding); - this.inputSpec = [new InputSpec({ndim: 3})]; + this.inputSpec = [new InputSpec({ ndim: 3 })]; } computeOutputShape(inputShape) { inputShape = getExactlyOneShape(inputShape); @@ -27464,7 +27464,7 @@ var Pooling2D = class extends Layer { this.dataFormat = args.dataFormat == null ? "channelsLast" : args.dataFormat; checkDataFormat(this.dataFormat); checkPaddingMode(this.padding); - this.inputSpec = [new InputSpec({ndim: 4})]; + this.inputSpec = [new InputSpec({ ndim: 4 })]; } computeOutputShape(inputShape) { inputShape = getExactlyOneShape(inputShape); @@ -27543,7 +27543,7 @@ var Pooling3D = class extends Layer { this.dataFormat = args.dataFormat == null ? "channelsLast" : args.dataFormat; checkDataFormat(this.dataFormat); checkPaddingMode(this.padding); - this.inputSpec = [new InputSpec({ndim: 5})]; + this.inputSpec = [new InputSpec({ ndim: 5 })]; } computeOutputShape(inputShape) { inputShape = getExactlyOneShape(inputShape); @@ -27604,7 +27604,7 @@ serialization_exports.registerClass(AveragePooling3D); var GlobalPooling1D = class extends Layer { constructor(args) { super(args); - this.inputSpec = [new InputSpec({ndim: 3})]; + this.inputSpec = [new InputSpec({ ndim: 3 })]; } computeOutputShape(inputShape) { return [inputShape[0], inputShape[2]]; @@ -27644,7 +27644,7 @@ var GlobalPooling2D = class extends Layer { super(args); this.dataFormat = args.dataFormat == null ? "channelsLast" : args.dataFormat; checkDataFormat(this.dataFormat); - this.inputSpec = [new InputSpec({ndim: 4})]; + this.inputSpec = [new InputSpec({ ndim: 4 })]; } computeOutputShape(inputShape) { inputShape = inputShape; @@ -27658,7 +27658,7 @@ var GlobalPooling2D = class extends Layer { throw new NotImplementedError(); } getConfig() { - const config = {dataFormat: this.dataFormat}; + const config = { dataFormat: this.dataFormat }; const baseConfig = super.getConfig(); Object.assign(config, baseConfig); return config; @@ -27751,7 +27751,7 @@ var Wrapper = class extends Layer { const layerConfig = config["layer"]; const layer = deserialize(layerConfig, customObjects); delete config["layer"]; - const newConfig = {layer}; + const newConfig = { layer }; Object.assign(newConfig, config); return new cls(newConfig); } @@ -27766,7 +27766,7 @@ var TimeDistributed = class extends Wrapper { if (inputShape.length < 3) { throw new ValueError(`TimeDistributed layer expects an input shape >= 3D, but received input shape ${JSON.stringify(inputShape)}`); } - this.inputSpec = [{shape: inputShape}]; + this.inputSpec = [{ shape: inputShape }]; const childInputShape = [inputShape[0]].concat(inputShape.slice(2)); if (!this.layer.built) { this.layer.build(childInputShape); @@ -27907,7 +27907,7 @@ var Bidirectional = class extends Wrapper { } kwargs["initialState"] = initialState; additionalInputs.push(...initialState); - const stateSpecs = initialState.map((state) => new InputSpec({shape: state.shape})); + const stateSpecs = initialState.map((state) => new InputSpec({ shape: state.shape })); this.forwardLayer.stateSpec = stateSpecs.slice(0, numStates / 2); this.backwardLayer.stateSpec = stateSpecs.slice(numStates / 2); additionalSpecs.push(...stateSpecs); @@ -27944,8 +27944,8 @@ var Bidirectional = class extends Wrapper { } else { const forwardState = initialState.slice(0, initialState.length / 2); const backwardState = initialState.slice(initialState.length / 2); - y = this.forwardLayer.call(inputs, Object.assign(kwargs, {initialState: forwardState})); - yRev = this.backwardLayer.call(inputs, Object.assign(kwargs, {initialState: backwardState})); + y = this.forwardLayer.call(inputs, Object.assign(kwargs, { initialState: forwardState })); + yRev = this.backwardLayer.call(inputs, Object.assign(kwargs, { initialState: backwardState })); } let states; if (this.returnState) { @@ -28443,7 +28443,7 @@ var EarlyStopping = class extends Callback { function earlyStopping(args) { return new EarlyStopping(args); } -var callbacks = {earlyStopping}; +var callbacks = { earlyStopping }; var DataType; (function(DataType2) { DataType2[DataType2["DT_INVALID"] = 0] = "DT_INVALID"; @@ -28580,38 +28580,38 @@ var json = [ "tfOpName": "Add", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "AddV2", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "AddN", "category": "arithmetic", - "inputs": [{"start": 0, "end": 0, "name": "tensors", "type": "tensors"}] + "inputs": [{ "start": 0, "end": 0, "name": "tensors", "type": "tensors" }] }, { "tfOpName": "BiasAdd", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true}, + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true }, { "tfName": "data_format", "name": "dataFormat", @@ -28624,129 +28624,129 @@ var json = [ "tfOpName": "Sub", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "RealDiv", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Div", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "DivNoNan", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "FloorDiv", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Mul", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Maximum", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Minimum", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Pow", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "SquaredDifference", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Mod", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "FloorMod", "category": "arithmetic", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [{ "tfName": "T", @@ -28765,164 +28765,164 @@ var json2 = [ "tfOpName": "Abs", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Acos", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Asin", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Atan", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Atan2", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "y", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "y", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Ceil", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "ClipByValue", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "clipValueMin", "type": "number"}, - {"start": 2, "name": "clipValueMax", "type": "number"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "clipValueMin", "type": "number" }, + { "start": 2, "name": "clipValueMax", "type": "number" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Complex", "category": "basic_math", "inputs": [ - {"start": 0, "name": "real", "type": "tensor"}, - {"start": 1, "name": "imag", "type": "tensor"} + { "start": 0, "name": "real", "type": "tensor" }, + { "start": 1, "name": "imag", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "ComplexAbs", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Cos", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Cosh", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Elu", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Exp", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Floor", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Log", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Imag", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true}, + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true }, { "tfName": "Tout", "name": "outputType", @@ -28935,20 +28935,20 @@ var json2 = [ "tfOpName": "Neg", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Real", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true}, + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true }, { "tfName": "Tout", "name": "outputType", @@ -28961,229 +28961,229 @@ var json2 = [ "tfOpName": "Prelu", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "alpha", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "alpha", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Relu", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Relu6", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Selu", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Sigmoid", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Sin", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Sinh", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Sqrt", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Rsqrt", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Square", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Tan", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Tanh", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Sign", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Round", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Expm1", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Log1p", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Reciprocal", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Softplus", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Asinh", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Acosh", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Atanh", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Erf", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Prod", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axes", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axes", "type": "number[]" } ], "attrs": [ { @@ -29192,14 +29192,14 @@ var json2 = [ "type": "bool", "notSupported": true }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "LeakyRelu", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ { @@ -29208,14 +29208,14 @@ var json2 = [ "type": "number", "defaultValue": 0.2 }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "IsNan", "category": "basic_math", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [{ "tfName": "T", @@ -29234,100 +29234,100 @@ var json3 = [ "tfOpName": "EmptyTensorList", "category": "control", "inputs": [ - {"start": 0, "name": "elementShape", "type": "shape"}, - {"start": 1, "name": "maxNumElements", "type": "number"} + { "start": 0, "name": "elementShape", "type": "shape" }, + { "start": 1, "name": "maxNumElements", "type": "number" } ], - "attrs": [{"tfName": "element_dtype", "name": "elementDType", "type": "dtype"}] + "attrs": [{ "tfName": "element_dtype", "name": "elementDType", "type": "dtype" }] }, { "tfOpName": "LoopCond", "category": "control", - "inputs": [{"start": 0, "name": "pred", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "pred", "type": "tensor" }] }, { "tfOpName": "Switch", "category": "control", "inputs": [ - {"start": 0, "name": "data", "type": "tensor"}, - {"start": 1, "name": "pred", "type": "tensor"} + { "start": 0, "name": "data", "type": "tensor" }, + { "start": 1, "name": "pred", "type": "tensor" } ] }, { "tfOpName": "Merge", "category": "control", - "inputs": [{"start": 0, "end": 0, "name": "tensors", "type": "tensors"}] + "inputs": [{ "start": 0, "end": 0, "name": "tensors", "type": "tensors" }] }, { "tfOpName": "Enter", "category": "control", "inputs": [ - {"start": 0, "name": "tensor", "type": "tensor"} + { "start": 0, "name": "tensor", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true}, - {"tfName": "frame_name", "name": "frameName", "type": "string"}, - {"tfName": "is_constant", "name": "isConstant", "type": "bool"} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true }, + { "tfName": "frame_name", "name": "frameName", "type": "string" }, + { "tfName": "is_constant", "name": "isConstant", "type": "bool" } ] }, { "tfOpName": "Exit", "category": "control", "inputs": [ - {"start": 0, "name": "tensor", "type": "tensor"} + { "start": 0, "name": "tensor", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "NextIteration", "category": "control", "inputs": [ - {"start": 0, "name": "tensor", "type": "tensor"} + { "start": 0, "name": "tensor", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "TensorArrayV3", "category": "control", "inputs": [ - {"start": 0, "name": "size", "type": "number"} + { "start": 0, "name": "size", "type": "number" } ], "attrs": [ - {"tfName": "dtype", "name": "dtype", "type": "dtype"}, - {"tfName": "element_shape", "name": "elementShape", "type": "shape"}, - {"tfName": "dynamic_size", "name": "dynamicSize", "type": "bool"}, - {"tfName": "clear_after_read", "name": "clearAfterRead", "type": "bool"}, + { "tfName": "dtype", "name": "dtype", "type": "dtype" }, + { "tfName": "element_shape", "name": "elementShape", "type": "shape" }, + { "tfName": "dynamic_size", "name": "dynamicSize", "type": "bool" }, + { "tfName": "clear_after_read", "name": "clearAfterRead", "type": "bool" }, { "tfName": "identical_element_shapes", "name": "identicalElementShapes", "type": "bool" }, - {"tfName": "tensor_array_name", "name": "name", "type": "string"} + { "tfName": "tensor_array_name", "name": "name", "type": "string" } ] }, { "tfOpName": "TensorArrayWriteV3", "category": "control", "inputs": [ - {"start": 0, "name": "tensorArrayId", "type": "tensor"}, - {"start": 1, "name": "index", "type": "number"}, - {"start": 2, "name": "tensor", "type": "tensor"}, - {"start": 3, "name": "flowIn", "type": "number"} + { "start": 0, "name": "tensorArrayId", "type": "tensor" }, + { "start": 1, "name": "index", "type": "number" }, + { "start": 2, "name": "tensor", "type": "tensor" }, + { "start": 3, "name": "flowIn", "type": "number" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "TensorArrayReadV3", "category": "control", "inputs": [ - {"start": 0, "name": "tensorArrayId", "type": "tensor"}, - {"start": 1, "name": "index", "type": "number"}, - {"start": 2, "name": "flowIn", "type": "number"} + { "start": 0, "name": "tensorArrayId", "type": "tensor" }, + { "start": 1, "name": "index", "type": "number" }, + { "start": 2, "name": "flowIn", "type": "number" } ], "attrs": [{ "tfName": "dtype", @@ -29340,35 +29340,35 @@ var json3 = [ "tfOpName": "TensorArrayGatherV3", "category": "control", "inputs": [ - {"start": 0, "name": "tensorArrayId", "type": "tensor"}, - {"start": 1, "name": "indices", "type": "number[]"}, - {"start": 2, "name": "flowIn", "type": "number"} + { "start": 0, "name": "tensorArrayId", "type": "tensor" }, + { "start": 1, "name": "indices", "type": "number[]" }, + { "start": 2, "name": "flowIn", "type": "number" } ], "attrs": [ - {"tfName": "dtype", "name": "dtype", "type": "dtype"}, - {"tfName": "element_shape", "name": "elementShape", "type": "shape"} + { "tfName": "dtype", "name": "dtype", "type": "dtype" }, + { "tfName": "element_shape", "name": "elementShape", "type": "shape" } ] }, { "tfOpName": "TensorArrayScatterV3", "category": "control", "inputs": [ - {"start": 0, "name": "tensorArrayId", "type": "tensor"}, - {"start": 1, "name": "indices", "type": "number[]"}, - {"start": 2, "name": "tensor", "type": "tensor"}, - {"start": 3, "name": "flowIn", "type": "number"} + { "start": 0, "name": "tensorArrayId", "type": "tensor" }, + { "start": 1, "name": "indices", "type": "number[]" }, + { "start": 2, "name": "tensor", "type": "tensor" }, + { "start": 3, "name": "flowIn", "type": "number" } ], - "attrs": [{"tfName": "T", "name": "dtype", "type": "dtype"}] + "attrs": [{ "tfName": "T", "name": "dtype", "type": "dtype" }] }, { "tfOpName": "TensorArrayConcatV3", "category": "control", "inputs": [ - {"start": 0, "name": "tensorArrayId", "type": "tensor"}, - {"start": 1, "name": "flowIn", "type": "number"} + { "start": 0, "name": "tensorArrayId", "type": "tensor" }, + { "start": 1, "name": "flowIn", "type": "number" } ], "attrs": [ - {"tfName": "dtype", "name": "dtype", "type": "dtype"}, + { "tfName": "dtype", "name": "dtype", "type": "dtype" }, { "tfName": "element_shape_except0", "name": "elementShapeExcept0", @@ -29381,192 +29381,192 @@ var json3 = [ "tfOpName": "TensorArraySplitV3", "category": "control", "inputs": [ - {"start": 0, "name": "tensorArrayId", "type": "tensor"}, - {"start": 1, "name": "tensor", "type": "tensor"}, - {"start": 2, "name": "lengths", "type": "number[]"}, - {"start": 3, "name": "flowIn", "type": "number"} + { "start": 0, "name": "tensorArrayId", "type": "tensor" }, + { "start": 1, "name": "tensor", "type": "tensor" }, + { "start": 2, "name": "lengths", "type": "number[]" }, + { "start": 3, "name": "flowIn", "type": "number" } ], - "attrs": [{"tfName": "T", "name": "dtype", "type": "dtype"}] + "attrs": [{ "tfName": "T", "name": "dtype", "type": "dtype" }] }, { "tfOpName": "TensorArraySizeV3", "category": "control", "inputs": [ - {"start": 0, "name": "tensorArrayId", "type": "tensor"}, - {"start": 1, "name": "flowIn", "type": "number"} + { "start": 0, "name": "tensorArrayId", "type": "tensor" }, + { "start": 1, "name": "flowIn", "type": "number" } ] }, { "tfOpName": "TensorArrayCloseV3", "category": "control", - "inputs": [{"start": 0, "name": "tensorArrayId", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "tensorArrayId", "type": "tensor" }] }, { "tfOpName": "StatelessIf", "category": "control", "inputs": [ - {"start": 0, "name": "cond", "type": "tensor"}, - {"start": 1, "end": 0, "name": "args", "type": "tensors"} + { "start": 0, "name": "cond", "type": "tensor" }, + { "start": 1, "end": 0, "name": "args", "type": "tensors" } ], "attrs": [ - {"tfName": "then_branch", "name": "thenBranch", "type": "func"}, - {"tfName": "else_branch", "name": "elseBranch", "type": "func"} + { "tfName": "then_branch", "name": "thenBranch", "type": "func" }, + { "tfName": "else_branch", "name": "elseBranch", "type": "func" } ] }, { "tfOpName": "If", "category": "control", "inputs": [ - {"start": 0, "name": "cond", "type": "tensor"}, - {"start": 1, "end": 0, "name": "args", "type": "tensors"} + { "start": 0, "name": "cond", "type": "tensor" }, + { "start": 1, "end": 0, "name": "args", "type": "tensors" } ], "attrs": [ - {"tfName": "then_branch", "name": "thenBranch", "type": "func"}, - {"tfName": "else_branch", "name": "elseBranch", "type": "func"} + { "tfName": "then_branch", "name": "thenBranch", "type": "func" }, + { "tfName": "else_branch", "name": "elseBranch", "type": "func" } ] }, { "tfOpName": "StatelessWhile", "category": "control", "inputs": [ - {"start": 0, "end": 0, "name": "args", "type": "tensors"} + { "start": 0, "end": 0, "name": "args", "type": "tensors" } ], "attrs": [ - {"tfName": "cond", "name": "cond", "type": "func"}, - {"tfName": "body", "name": "body", "type": "func"} + { "tfName": "cond", "name": "cond", "type": "func" }, + { "tfName": "body", "name": "body", "type": "func" } ] }, { "tfOpName": "While", "category": "control", "inputs": [ - {"start": 0, "end": 0, "name": "args", "type": "tensors"} + { "start": 0, "end": 0, "name": "args", "type": "tensors" } ], "attrs": [ - {"tfName": "cond", "name": "cond", "type": "func"}, - {"tfName": "body", "name": "body", "type": "func"} + { "tfName": "cond", "name": "cond", "type": "func" }, + { "tfName": "body", "name": "body", "type": "func" } ] }, { "tfOpName": "TensorListScatter", "category": "control", "inputs": [ - {"start": 0, "name": "tensor", "type": "tensor"}, - {"start": 1, "name": "indices", "type": "number[]"}, - {"start": 2, "name": "elementShape", "type": "shape"} + { "start": 0, "name": "tensor", "type": "tensor" }, + { "start": 1, "name": "indices", "type": "number[]" }, + { "start": 2, "name": "elementShape", "type": "shape" } ], - "attrs": [{"tfName": "element_dtype", "name": "elementDType", "type": "dtype"}] + "attrs": [{ "tfName": "element_dtype", "name": "elementDType", "type": "dtype" }] }, { "tfOpName": "TensorListScatterV2", "category": "control", "inputs": [ - {"start": 0, "name": "tensor", "type": "tensor"}, - {"start": 1, "name": "indices", "type": "number[]"}, - {"start": 2, "name": "elementShape", "type": "shape"}, - {"start": 3, "name": "numElements", "type": "number"} + { "start": 0, "name": "tensor", "type": "tensor" }, + { "start": 1, "name": "indices", "type": "number[]" }, + { "start": 2, "name": "elementShape", "type": "shape" }, + { "start": 3, "name": "numElements", "type": "number" } ], - "attrs": [{"tfName": "element_dtype", "name": "elementDType", "type": "dtype"}] + "attrs": [{ "tfName": "element_dtype", "name": "elementDType", "type": "dtype" }] }, { "tfOpName": "TensorListGather", "category": "control", "inputs": [ - {"start": 0, "name": "tensorListId", "type": "tensor"}, - {"start": 1, "name": "indices", "type": "number[]"}, - {"start": 2, "name": "elementShape", "type": "shape"} + { "start": 0, "name": "tensorListId", "type": "tensor" }, + { "start": 1, "name": "indices", "type": "number[]" }, + { "start": 2, "name": "elementShape", "type": "shape" } ], - "attrs": [{"tfName": "element_dtype", "name": "elementDType", "type": "dtype"}] + "attrs": [{ "tfName": "element_dtype", "name": "elementDType", "type": "dtype" }] }, { "tfOpName": "TensorListGetItem", "category": "control", "inputs": [ - {"start": 0, "name": "tensorListId", "type": "tensor"}, - {"start": 1, "name": "index", "type": "number"}, - {"start": 2, "name": "elementShape", "type": "shape"} + { "start": 0, "name": "tensorListId", "type": "tensor" }, + { "start": 1, "name": "index", "type": "number" }, + { "start": 2, "name": "elementShape", "type": "shape" } ], - "attrs": [{"tfName": "element_dtype", "name": "elementDType", "type": "dtype"}] + "attrs": [{ "tfName": "element_dtype", "name": "elementDType", "type": "dtype" }] }, { "tfOpName": "TensorListSetItem", "category": "control", "inputs": [ - {"start": 0, "name": "tensorListId", "type": "tensor"}, - {"start": 1, "name": "index", "type": "number"}, - {"start": 2, "name": "tensor", "type": "tensor"} + { "start": 0, "name": "tensorListId", "type": "tensor" }, + { "start": 1, "name": "index", "type": "number" }, + { "start": 2, "name": "tensor", "type": "tensor" } ], - "attrs": [{"tfName": "element_dtype", "name": "elementDType", "type": "dtype"}] + "attrs": [{ "tfName": "element_dtype", "name": "elementDType", "type": "dtype" }] }, { "tfOpName": "TensorListReserve", "category": "control", "inputs": [ - {"start": 0, "name": "elementShape", "type": "shape"}, - {"start": 1, "name": "numElements", "type": "number"} + { "start": 0, "name": "elementShape", "type": "shape" }, + { "start": 1, "name": "numElements", "type": "number" } ], - "attrs": [{"tfName": "element_dtype", "name": "elementDType", "type": "dtype"}] + "attrs": [{ "tfName": "element_dtype", "name": "elementDType", "type": "dtype" }] }, { "tfOpName": "TensorListFromTensor", "category": "control", "inputs": [ - {"start": 0, "name": "tensor", "type": "tensor"}, - {"start": 1, "name": "elementShape", "type": "shape"} + { "start": 0, "name": "tensor", "type": "tensor" }, + { "start": 1, "name": "elementShape", "type": "shape" } ], - "attrs": [{"tfName": "element_dtype", "name": "elementDType", "type": "dtype"}] + "attrs": [{ "tfName": "element_dtype", "name": "elementDType", "type": "dtype" }] }, { "tfOpName": "TensorListStack", "category": "control", "inputs": [ - {"start": 0, "name": "tensorListId", "type": "tensor"}, - {"start": 1, "name": "elementShape", "type": "shape"} + { "start": 0, "name": "tensorListId", "type": "tensor" }, + { "start": 1, "name": "elementShape", "type": "shape" } ], "attrs": [ - {"tfName": "element_dtype", "name": "elementDType", "type": "dtype"}, - {"tfName": "num_elements", "name": "numElements", "type": "dtype"} + { "tfName": "element_dtype", "name": "elementDType", "type": "dtype" }, + { "tfName": "num_elements", "name": "numElements", "type": "dtype" } ] }, { "tfOpName": "TensorListSplit", "category": "control", "inputs": [ - {"start": 0, "name": "tensor", "type": "tensor"}, - {"start": 1, "name": "elementShape", "type": "shape"}, - {"start": 2, "name": "lengths", "type": "number[]"} + { "start": 0, "name": "tensor", "type": "tensor" }, + { "start": 1, "name": "elementShape", "type": "shape" }, + { "start": 2, "name": "lengths", "type": "number[]" } ], - "attrs": [{"tfName": "element_dtype", "name": "elementDType", "type": "dtype"}] + "attrs": [{ "tfName": "element_dtype", "name": "elementDType", "type": "dtype" }] }, { "tfOpName": "TensorListConcat", "category": "control", "inputs": [ - {"start": 0, "name": "tensorListId", "type": "tensor"} + { "start": 0, "name": "tensorListId", "type": "tensor" } ], "attrs": [ - {"tfName": "element_shape", "name": "elementShape", "type": "shape"}, - {"tfName": "element_dtype", "name": "elementDType", "type": "dtype"} + { "tfName": "element_shape", "name": "elementShape", "type": "shape" }, + { "tfName": "element_dtype", "name": "elementDType", "type": "dtype" } ] }, { "tfOpName": "TensorListPopBack", "category": "control", "inputs": [ - {"start": 0, "name": "tensorListId", "type": "tensor"}, - {"start": 1, "name": "elementShape", "type": "shape"} + { "start": 0, "name": "tensorListId", "type": "tensor" }, + { "start": 1, "name": "elementShape", "type": "shape" } ], - "attrs": [{"tfName": "element_dtype", "name": "elementDType", "type": "dtype"}] + "attrs": [{ "tfName": "element_dtype", "name": "elementDType", "type": "dtype" }] }, { "tfOpName": "TensorListPushBack", "category": "control", "inputs": [ - {"start": 0, "name": "tensorListId", "type": "tensor"}, - {"start": 1, "name": "tensor", "type": "tensor"} + { "start": 0, "name": "tensorListId", "type": "tensor" }, + { "start": 1, "name": "tensor", "type": "tensor" } ], "attrs": [ - {"tfName": "element_dtype", "name": "elementDType", "type": "dtype"} + { "tfName": "element_dtype", "name": "elementDType", "type": "dtype" } ] } ]; @@ -29579,37 +29579,37 @@ var json4 = [ "tfOpName": "AvgPool", "category": "convolution", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, { "tfName": "data_format", "name": "dataFormat", "type": "string", "notSupported": true }, - {"tfName": "ksize", "name": "kernelSize", "type": "number[]"}, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "ksize", "name": "kernelSize", "type": "number[]" }, + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "MaxPool", "category": "convolution", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, { "tfName": "data_format", "name": "dataFormat", "type": "string", "notSupported": true }, - {"tfName": "ksize", "name": "kernelSize", "type": "number[]"}, + { "tfName": "ksize", "name": "kernelSize", "type": "number[]" }, { "tfName": "explicit_paddings", "name": "explicitPaddings", @@ -29617,82 +29617,82 @@ var json4 = [ "defaultValue": [], "notSupported": true }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "MaxPoolWithArgmax", "category": "convolution", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, - {"tfName": "ksize", "name": "kernelSize", "type": "number[]"}, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, + { "tfName": "ksize", "name": "kernelSize", "type": "number[]" }, { "tfName": "include_batch_in_index", "name": "includeBatchInIndex", "type": "bool" }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "AvgPool3D", "category": "convolution", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, { "tfName": "data_format", "name": "dataFormat", "type": "string", "notSupported": true }, - {"tfName": "ksize", "name": "kernelSize", "type": "number[]"}, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "ksize", "name": "kernelSize", "type": "number[]" }, + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "MaxPool3D", "category": "convolution", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, { "tfName": "data_format", "name": "dataFormat", "type": "string", "notSupported": true }, - {"tfName": "ksize", "name": "kernelSize", "type": "number[]"}, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "ksize", "name": "kernelSize", "type": "number[]" }, + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Conv1D", "category": "convolution", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "filter", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "filter", "type": "tensor" } ], "attrs": [ - {"tfName": "stride", "name": "stride", "type": "number"}, - {"tfName": "padding", "name": "pad", "type": "string"}, + { "tfName": "stride", "name": "stride", "type": "number" }, + { "tfName": "padding", "name": "pad", "type": "string" }, { "tfName": "data_format", "name": "dataFormat", "type": "string", "defaultValue": "NWC" }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true}, + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true }, { "tfName": "dilation", "name": "dilation", @@ -29705,14 +29705,14 @@ var json4 = [ "tfOpName": "Conv2D", "category": "convolution", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "filter", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "filter", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true}, - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, - {"tfName": "useCudnnOnGpu", "name": "useCudnnOnGpu", "type": "bool"}, + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true }, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, + { "tfName": "useCudnnOnGpu", "name": "useCudnnOnGpu", "type": "bool" }, { "tfName": "data_format", "name": "dataFormat", @@ -29725,22 +29725,22 @@ var json4 = [ "type": "number[]", "defaultValue": [] }, - {"tfName": "dilations", "name": "dilations", "type": "number[]"} + { "tfName": "dilations", "name": "dilations", "type": "number[]" } ] }, { "tfOpName": "_FusedConv2D", "category": "convolution", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "filter", "type": "tensor"}, - {"start": 2, end: 0, "name": "args", "type": "tensors"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "filter", "type": "tensor" }, + { "start": 2, end: 0, "name": "args", "type": "tensors" } ], "attrs": [ - {"tfName": "num_args", "name": "numArgs", "type": "number"}, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true}, - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, + { "tfName": "num_args", "name": "numArgs", "type": "number" }, + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true }, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, { "tfName": "explicit_paddings", "name": "explicitPaddings", @@ -29788,13 +29788,13 @@ var json4 = [ "tfOpName": "Conv2DBackpropInput", "category": "convolution", "inputs": [ - {"start": 2, "name": "x", "type": "tensor"}, - {"start": 1, "name": "filter", "type": "tensor"}, - {"start": 0, "name": "outputShape", "type": "number[]"} + { "start": 2, "name": "x", "type": "tensor" }, + { "start": 1, "name": "filter", "type": "tensor" }, + { "start": 0, "name": "outputShape", "type": "number[]" } ], "attrs": [ - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, { "tfName": "data_format", "name": "dataFormat", @@ -29819,12 +29819,12 @@ var json4 = [ "tfOpName": "DepthwiseConv2d", "category": "convolution", "inputs": [ - {"start": 0, "name": "input", "type": "tensor"}, - {"start": 1, "name": "filter", "type": "tensor"} + { "start": 0, "name": "input", "type": "tensor" }, + { "start": 1, "name": "filter", "type": "tensor" } ], "attrs": [ - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, { "tfName": "data_format", "name": "dataFormat", @@ -29837,19 +29837,19 @@ var json4 = [ "type": "number[]", "defaultValue": [] }, - {"tfName": "dilations", "name": "dilations", "type": "number[]"} + { "tfName": "dilations", "name": "dilations", "type": "number[]" } ] }, { "tfOpName": "DepthwiseConv2dNative", "category": "convolution", "inputs": [ - {"start": 0, "name": "input", "type": "tensor"}, - {"start": 1, "name": "filter", "type": "tensor"} + { "start": 0, "name": "input", "type": "tensor" }, + { "start": 1, "name": "filter", "type": "tensor" } ], "attrs": [ - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, { "tfName": "data_format", "name": "dataFormat", @@ -29862,22 +29862,22 @@ var json4 = [ "type": "number[]", "defaultValue": [] }, - {"tfName": "dilations", "name": "dilations", "type": "number[]"} + { "tfName": "dilations", "name": "dilations", "type": "number[]" } ] }, { "tfOpName": "FusedDepthwiseConv2dNative", "category": "convolution", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "filter", "type": "tensor"}, - {"start": 2, end: 0, "name": "args", "type": "tensors"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "filter", "type": "tensor" }, + { "start": 2, end: 0, "name": "args", "type": "tensors" } ], "attrs": [ - {"tfName": "num_args", "name": "numArgs", "type": "number"}, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true}, - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, + { "tfName": "num_args", "name": "numArgs", "type": "number" }, + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true }, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, { "tfName": "data_format", "name": "dataFormat", @@ -29908,32 +29908,32 @@ var json4 = [ "tfOpName": "Conv3D", "category": "convolution", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "filter", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "filter", "type": "tensor" } ], "attrs": [ - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"}, + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" }, { "tfName": "data_format", "name": "dataFormat", "type": "string", "defaultValue": "NHWC" }, - {"tfName": "dilations", "name": "dilations", "type": "number[]"} + { "tfName": "dilations", "name": "dilations", "type": "number[]" } ] }, { "tfOpName": "Dilation2D", "category": "convolution", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "filter", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "filter", "type": "tensor" } ], "attrs": [ - {"tfName": "strides", "name": "strides", "type": "number[]"}, - {"tfName": "rates", "name": "dilations", "type": "number[]"}, - {"tfName": "padding", "name": "pad", "type": "string"} + { "tfName": "strides", "name": "strides", "type": "number[]" }, + { "tfName": "rates", "name": "dilations", "type": "number[]" }, + { "tfName": "padding", "name": "pad", "type": "string" } ] } ]; @@ -29946,31 +29946,31 @@ var json5 = [ "tfOpName": "Fill", "category": "creation", "inputs": [ - {"start": 0, "name": "shape", "type": "number[]"}, - {"start": 1, "name": "value", "type": "number"} + { "start": 0, "name": "shape", "type": "number[]" }, + { "start": 1, "name": "value", "type": "number" } ], - "attrs": [{"tfName": "T", "name": "dtype", "type": "dtype"}] + "attrs": [{ "tfName": "T", "name": "dtype", "type": "dtype" }] }, { "tfOpName": "LinSpace", "category": "creation", "inputs": [ - {"start": 0, "name": "start", "type": "number"}, - {"start": 1, "name": "stop", "type": "number"}, - {"start": 2, "name": "num", "type": "number"} + { "start": 0, "name": "start", "type": "number" }, + { "start": 1, "name": "stop", "type": "number" }, + { "start": 2, "name": "num", "type": "number" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "OneHot", "category": "creation", "inputs": [ - {"start": 0, "name": "indices", "type": "tensor"}, - {"start": 1, "name": "depth", "type": "number"}, - {"start": 2, "name": "onValue", "type": "number", "defaultValue": 1}, - {"start": 3, "name": "offValue", "type": "number", "defaultValue": 0} + { "start": 0, "name": "indices", "type": "tensor" }, + { "start": 1, "name": "depth", "type": "number" }, + { "start": 2, "name": "onValue", "type": "number", "defaultValue": 1 }, + { "start": 3, "name": "offValue", "type": "number", "defaultValue": 0 } ], "attrs": [ { @@ -29979,30 +29979,30 @@ var json5 = [ "type": "number", "notSupported": true }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Ones", "category": "creation", "inputs": [ - {"start": 0, "name": "shape", "type": "number[]"} + { "start": 0, "name": "shape", "type": "number[]" } ], - "attrs": [{"tfName": "T", "name": "dtype", "type": "dtype"}] + "attrs": [{ "tfName": "T", "name": "dtype", "type": "dtype" }] }, { "tfOpName": "OnesLike", "category": "creation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], - "attrs": [{"tfName": "dtype", "name": "dtype", "type": "dtype"}] + "attrs": [{ "tfName": "dtype", "name": "dtype", "type": "dtype" }] }, { "tfOpName": "RandomUniform", "category": "creation", "inputs": [ - {"start": 0, "name": "shape", "type": "number[]"} + { "start": 0, "name": "shape", "type": "number[]" } ], "attrs": [ { @@ -30017,8 +30017,8 @@ var json5 = [ "type": "number", "defaultValue": 1 }, - {"tfName": "dtype", "name": "dtype", "type": "dtype"}, - {"tfName": "seed", "name": "seed", "type": "number", "defaultValue": 0}, + { "tfName": "dtype", "name": "dtype", "type": "dtype" }, + { "tfName": "seed", "name": "seed", "type": "number", "defaultValue": 0 }, { "tfName": "seed2", "name": "seed2", @@ -30026,24 +30026,24 @@ var json5 = [ "defaultValue": 0, "notSupported": true }, - {"tfName": "T", "name": "T", "type": "number", "notSupported": true} + { "tfName": "T", "name": "T", "type": "number", "notSupported": true } ] }, { "tfOpName": "Range", "category": "creation", "inputs": [ - {"start": 0, "name": "start", "type": "number"}, - {"start": 1, "name": "stop", "type": "number"}, - {"start": 2, "name": "step", "type": "number", "defaultValue": 0} + { "start": 0, "name": "start", "type": "number" }, + { "start": 1, "name": "stop", "type": "number" }, + { "start": 2, "name": "step", "type": "number", "defaultValue": 0 } ], - "attrs": [{"tfName": "Tidx", "name": "dtype", "type": "dtype"}] + "attrs": [{ "tfName": "Tidx", "name": "dtype", "type": "dtype" }] }, { "tfOpName": "TruncatedNormal", "category": "creation", "inputs": [ - {"start": 0, "name": "shape", "type": "number[]"} + { "start": 0, "name": "shape", "type": "number[]" } ], "attrs": [ { @@ -30058,7 +30058,7 @@ var json5 = [ "type": "number", "defaultValue": 1 }, - {"tfName": "seed", "name": "seed", "type": "number"}, + { "tfName": "seed", "name": "seed", "type": "number" }, { "tfName": "seed2", "name": "seed2", @@ -30066,38 +30066,38 @@ var json5 = [ "defaultValue": 0, "notSupported": true }, - {"tfName": "dtype", "name": "dtype", "type": "dtype"}, - {"tfName": "T", "name": "T", "type": "number", "notSupported": true} + { "tfName": "dtype", "name": "dtype", "type": "dtype" }, + { "tfName": "T", "name": "T", "type": "number", "notSupported": true } ] }, { "tfOpName": "Zeros", "category": "creation", "inputs": [ - {"start": 0, "name": "shape", "type": "number[]"} + { "start": 0, "name": "shape", "type": "number[]" } ], - "attrs": [{"tfName": "T", "name": "dtype", "type": "dtype"}] + "attrs": [{ "tfName": "T", "name": "dtype", "type": "dtype" }] }, { "tfOpName": "ZerosLike", "category": "creation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], - "attrs": [{"tfName": "T", "name": "dtype", "type": "dtype"}] + "attrs": [{ "tfName": "T", "name": "dtype", "type": "dtype" }] }, { "tfOpName": "Multinomial", "category": "creation", "inputs": [ - {"start": 0, "name": "logits", "type": "tensor"}, - {"start": 1, "name": "numSamples", "type": "number"} + { "start": 0, "name": "logits", "type": "tensor" }, + { "start": 1, "name": "numSamples", "type": "number" } ], "attrs": [ - {"tfName": "seed", "name": "seed", "type": "number"}, - {"tfName": "seed2", "name": "seed2", "type": "number"}, - {"tfName": "T", "name": "dtype", "type": "dtype"}, - {"tfName": "output_dtype", "name": "output_dtype", "type": "dtype"} + { "tfName": "seed", "name": "seed", "type": "number" }, + { "tfName": "seed2", "name": "seed2", "type": "number" }, + { "tfName": "T", "name": "dtype", "type": "dtype" }, + { "tfName": "output_dtype", "name": "output_dtype", "type": "dtype" } ] } ]; @@ -30110,35 +30110,35 @@ var json6 = [ "tfOpName": "NonMaxSuppressionV2", "category": "dynamic", "inputs": [ - {"start": 0, "name": "boxes", "type": "tensor"}, - {"start": 1, "name": "scores", "type": "tensor"}, - {"start": 2, "name": "maxOutputSize", "type": "number"}, - {"start": 3, "name": "iouThreshold", "type": "number"} + { "start": 0, "name": "boxes", "type": "tensor" }, + { "start": 1, "name": "scores", "type": "tensor" }, + { "start": 2, "name": "maxOutputSize", "type": "number" }, + { "start": 3, "name": "iouThreshold", "type": "number" } ] }, { "tfOpName": "NonMaxSuppressionV3", "category": "dynamic", "inputs": [ - {"start": 0, "name": "boxes", "type": "tensor"}, - {"start": 1, "name": "scores", "type": "tensor"}, - {"start": 2, "name": "maxOutputSize", "type": "number"}, - {"start": 3, "name": "iouThreshold", "type": "number"}, - {"start": 4, "name": "scoreThreshold", "type": "number"} + { "start": 0, "name": "boxes", "type": "tensor" }, + { "start": 1, "name": "scores", "type": "tensor" }, + { "start": 2, "name": "maxOutputSize", "type": "number" }, + { "start": 3, "name": "iouThreshold", "type": "number" }, + { "start": 4, "name": "scoreThreshold", "type": "number" } ] }, { "tfOpName": "NonMaxSuppressionV4", "category": "dynamic", "inputs": [ - {"start": 0, "name": "boxes", "type": "tensor"}, - {"start": 1, "name": "scores", "type": "tensor"}, - {"start": 2, "name": "maxOutputSize", "type": "number"}, - {"start": 3, "name": "iouThreshold", "type": "number"}, - {"start": 4, "name": "scoreThreshold", "type": "number"} + { "start": 0, "name": "boxes", "type": "tensor" }, + { "start": 1, "name": "scores", "type": "tensor" }, + { "start": 2, "name": "maxOutputSize", "type": "number" }, + { "start": 3, "name": "iouThreshold", "type": "number" }, + { "start": 4, "name": "scoreThreshold", "type": "number" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true}, + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true }, { "tfName": "T_threshold", "name": "threshold", @@ -30156,30 +30156,30 @@ var json6 = [ "tfOpName": "NonMaxSuppressionV5", "category": "dynamic", "inputs": [ - {"start": 0, "name": "boxes", "type": "tensor"}, - {"start": 1, "name": "scores", "type": "tensor"}, - {"start": 2, "name": "maxOutputSize", "type": "number"}, - {"start": 3, "name": "iouThreshold", "type": "number"}, - {"start": 4, "name": "scoreThreshold", "type": "number"}, - {"start": 5, "name": "softNmsSigma", "type": "number"} + { "start": 0, "name": "boxes", "type": "tensor" }, + { "start": 1, "name": "scores", "type": "tensor" }, + { "start": 2, "name": "maxOutputSize", "type": "number" }, + { "start": 3, "name": "iouThreshold", "type": "number" }, + { "start": 4, "name": "scoreThreshold", "type": "number" }, + { "start": 5, "name": "softNmsSigma", "type": "number" } ] }, { "tfOpName": "Where", "category": "dynamic", "inputs": [ - {"start": 0, "name": "condition", "type": "tensor"} + { "start": 0, "name": "condition", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "ListDiff", "category": "dynamic", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "y", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "y", "type": "tensor" } ], "attrs": [{ "tfName": "T", @@ -30198,24 +30198,24 @@ var json7 = [ "tfOpName": "TopKV2", "category": "evaluation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "k", "type": "number"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "k", "type": "number" } ], - "attrs": [{"tfName": "sorted", "name": "sorted", "type": "bool"}] + "attrs": [{ "tfName": "sorted", "name": "sorted", "type": "bool" }] }, { "tfOpName": "Unique", "category": "evaluation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ] }, { "tfOpName": "UniqueV2", "category": "evaluation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number" } ] } ]; @@ -30228,66 +30228,66 @@ var json8 = [ "tfOpName": "PlaceholderWithDefault", "category": "graph", "inputs": [ - {"start": 0, "name": "default", "type": "tensor"} + { "start": 0, "name": "default", "type": "tensor" } ], "attrs": [ - {"tfName": "shape", "name": "shape", "type": "shape"}, - {"tfName": "dtype", "name": "dtype", "type": "dtype"} + { "tfName": "shape", "name": "shape", "type": "shape" }, + { "tfName": "dtype", "name": "dtype", "type": "dtype" } ] }, { "tfOpName": "Placeholder", "category": "graph", "attrs": [ - {"tfName": "shape", "name": "shape", "type": "shape"}, - {"tfName": "dtype", "name": "dtype", "type": "dtype"} + { "tfName": "shape", "name": "shape", "type": "shape" }, + { "tfName": "dtype", "name": "dtype", "type": "dtype" } ] }, - {"tfOpName": "Const", "category": "graph"}, + { "tfOpName": "Const", "category": "graph" }, { "tfOpName": "Identity", "category": "graph", - "inputs": [{"start": 0, "name": "x", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "x", "type": "tensor" }] }, { "tfOpName": "IdentityN", "category": "graph", - "inputs": [{"start": 0, "end": 0, "name": "x", "type": "tensors"}] + "inputs": [{ "start": 0, "end": 0, "name": "x", "type": "tensors" }] }, { "tfOpName": "Snapshot", "category": "graph", - "inputs": [{"start": 0, "name": "x", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "x", "type": "tensor" }] }, { "tfOpName": "Rank", "category": "graph", - "inputs": [{"start": 0, "name": "x", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "x", "type": "tensor" }] }, { "tfOpName": "Size", "category": "graph", - "inputs": [{"start": 0, "name": "x", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "x", "type": "tensor" }] }, { "tfOpName": "Shape", "category": "graph", - "inputs": [{"start": 0, "name": "x", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "x", "type": "tensor" }] }, { "tfOpName": "ShapeN", "category": "graph", - "inputs": [{"start": 0, "end": 0, "name": "x", "type": "tensors"}] + "inputs": [{ "start": 0, "end": 0, "name": "x", "type": "tensors" }] }, { "tfOpName": "Print", "category": "graph", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "data", "type": "tensors"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "data", "type": "tensors" } ], "attrs": [ - {"tfName": "message", "name": "message", "type": "string"}, + { "tfName": "message", "name": "message", "type": "string" }, { "tfName": "first_n", "name": "firstN", @@ -30302,21 +30302,21 @@ var json8 = [ } ] }, - {"tfOpName": "NoOp", "category": "graph", "inputs": []}, + { "tfOpName": "NoOp", "category": "graph", "inputs": [] }, { "tfOpName": "StopGradient", "category": "graph", - "inputs": [{"start": 0, "name": "x", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "x", "type": "tensor" }] }, { "tfOpName": "FakeQuantWithMinMaxVars", "category": "graph", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "min", "name": "min", "type": "number"}, - {"tfName": "max", "name": "max", "type": "number"} + { "tfName": "min", "name": "min", "type": "number" }, + { "tfName": "max", "name": "max", "type": "number" } ] } ]; @@ -30330,14 +30330,14 @@ var json9 = [ "category": "hash_table", "inputs": [], "attrs": [ - {"tfName": "shared_name", "name": "sharedName", "type": "string"}, + { "tfName": "shared_name", "name": "sharedName", "type": "string" }, { "tfName": "use_node_name_sharing", "name": "useNodeNameSharing", "type": "bool" }, - {"tfName": "key_dtype", "name": "keyDType", "type": "dtype"}, - {"tfName": "value_dtype", "name": "valueDType", "type": "dtype"} + { "tfName": "key_dtype", "name": "keyDType", "type": "dtype" }, + { "tfName": "value_dtype", "name": "valueDType", "type": "dtype" } ] }, { @@ -30345,26 +30345,26 @@ var json9 = [ "category": "hash_table", "inputs": [], "attrs": [ - {"tfName": "shared_name", "name": "sharedName", "type": "string"}, + { "tfName": "shared_name", "name": "sharedName", "type": "string" }, { "tfName": "use_node_name_sharing", "name": "useNodeNameSharing", "type": "bool" }, - {"tfName": "key_dtype", "name": "keyDType", "type": "dtype"}, - {"tfName": "value_dtype", "name": "valueDType", "type": "dtype"} + { "tfName": "key_dtype", "name": "keyDType", "type": "dtype" }, + { "tfName": "value_dtype", "name": "valueDType", "type": "dtype" } ] }, { "tfOpName": "LookupTableImport", "category": "hash_table", "inputs": [ - {"start": 0, "name": "tableHandle", "type": "tensor"}, - {"start": 1, "name": "keys", "type": "tensor"}, - {"start": 2, "name": "values", "type": "tensor"} + { "start": 0, "name": "tableHandle", "type": "tensor" }, + { "start": 1, "name": "keys", "type": "tensor" }, + { "start": 2, "name": "values", "type": "tensor" } ], "attrs": [ - {"tfName": "Tin", "name": "tIn", "type": "dtype", "notSupported": true}, + { "tfName": "Tin", "name": "tIn", "type": "dtype", "notSupported": true }, { "tfName": "Tout", "name": "tOut", @@ -30377,12 +30377,12 @@ var json9 = [ "tfOpName": "LookupTableImportV2", "category": "hash_table", "inputs": [ - {"start": 0, "name": "tableHandle", "type": "tensor"}, - {"start": 1, "name": "keys", "type": "tensor"}, - {"start": 2, "name": "values", "type": "tensor"} + { "start": 0, "name": "tableHandle", "type": "tensor" }, + { "start": 1, "name": "keys", "type": "tensor" }, + { "start": 2, "name": "values", "type": "tensor" } ], "attrs": [ - {"tfName": "Tin", "name": "tIn", "type": "dtype", "notSupported": true}, + { "tfName": "Tin", "name": "tIn", "type": "dtype", "notSupported": true }, { "tfName": "Tout", "name": "tOut", @@ -30395,12 +30395,12 @@ var json9 = [ "tfOpName": "LookupTableFind", "category": "hash_table", "inputs": [ - {"start": 0, "name": "tableHandle", "type": "tensor"}, - {"start": 1, "name": "keys", "type": "tensor"}, - {"start": 2, "name": "defaultValue", "type": "tensor"} + { "start": 0, "name": "tableHandle", "type": "tensor" }, + { "start": 1, "name": "keys", "type": "tensor" }, + { "start": 2, "name": "defaultValue", "type": "tensor" } ], "attrs": [ - {"tfName": "Tin", "name": "tIn", "type": "dtype", "notSupported": true}, + { "tfName": "Tin", "name": "tIn", "type": "dtype", "notSupported": true }, { "tfName": "Tout", "name": "tOut", @@ -30413,12 +30413,12 @@ var json9 = [ "tfOpName": "LookupTableFindV2", "category": "hash_table", "inputs": [ - {"start": 0, "name": "tableHandle", "type": "tensor"}, - {"start": 1, "name": "keys", "type": "tensor"}, - {"start": 2, "name": "defaultValue", "type": "tensor"} + { "start": 0, "name": "tableHandle", "type": "tensor" }, + { "start": 1, "name": "keys", "type": "tensor" }, + { "start": 2, "name": "defaultValue", "type": "tensor" } ], "attrs": [ - {"tfName": "Tin", "name": "tIn", "type": "dtype", "notSupported": true}, + { "tfName": "Tin", "name": "tIn", "type": "dtype", "notSupported": true }, { "tfName": "Tout", "name": "tOut", @@ -30431,14 +30431,14 @@ var json9 = [ "tfOpName": "LookupTableSize", "category": "hash_table", "inputs": [ - {"start": 0, "name": "tableHandle", "type": "tensor"} + { "start": 0, "name": "tableHandle", "type": "tensor" } ] }, { "tfOpName": "LookupTableSizeV2", "category": "hash_table", "inputs": [ - {"start": 0, "name": "tableHandle", "type": "tensor"} + { "start": 0, "name": "tableHandle", "type": "tensor" } ] } ]; @@ -30451,47 +30451,47 @@ var json10 = [ "tfOpName": "ResizeBilinear", "category": "image", "inputs": [ - {"start": 0, "name": "images", "type": "tensor"}, - {"start": 1, "name": "size", "type": "number[]"} + { "start": 0, "name": "images", "type": "tensor" }, + { "start": 1, "name": "size", "type": "number[]" } ], "attrs": [ - {"tfName": "align_corners", "name": "alignCorners", "type": "bool"}, + { "tfName": "align_corners", "name": "alignCorners", "type": "bool" }, { "tfName": "half_pixel_centers", "name": "halfPixelCenters", "type": "bool" }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "ResizeNearestNeighbor", "category": "image", "inputs": [ - {"start": 0, "name": "images", "type": "tensor"}, - {"start": 1, "name": "size", "type": "number[]"} + { "start": 0, "name": "images", "type": "tensor" }, + { "start": 1, "name": "size", "type": "number[]" } ], "attrs": [ - {"tfName": "align_corners", "name": "alignCorners", "type": "bool"}, + { "tfName": "align_corners", "name": "alignCorners", "type": "bool" }, { "tfName": "half_pixel_centers", "name": "halfPixelCenters", "type": "bool" }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "CropAndResize", "category": "image", "inputs": [ - {"start": 0, "name": "image", "type": "tensor"}, - {"start": 1, "name": "boxes", "type": "tensor"}, - {"start": 2, "name": "boxInd", "type": "tensor"}, - {"start": 3, "name": "cropSize", "type": "number[]"} + { "start": 0, "name": "image", "type": "tensor" }, + { "start": 1, "name": "boxes", "type": "tensor" }, + { "start": 2, "name": "boxInd", "type": "tensor" }, + { "start": 3, "name": "cropSize", "type": "number[]" } ], "attrs": [ - {"tfName": "method", "name": "method", "type": "string"}, + { "tfName": "method", "name": "method", "type": "string" }, { "tfName": "extrapolation_value", "name": "extrapolationValue", @@ -30509,119 +30509,119 @@ var json11 = [ "tfOpName": "Equal", "category": "logical", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "NotEqual", "category": "logical", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Greater", "category": "logical", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "GreaterEqual", "category": "logical", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Less", "category": "logical", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "LessEqual", "category": "logical", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "LogicalAnd", "category": "logical", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "LogicalNot", "category": "logical", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "LogicalOr", "category": "logical", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Select", "category": "logical", "inputs": [ - {"start": 0, "name": "condition", "type": "tensor"}, - {"start": 1, "name": "a", "type": "tensor"}, - {"start": 2, "name": "b", "type": "tensor"} + { "start": 0, "name": "condition", "type": "tensor" }, + { "start": 1, "name": "a", "type": "tensor" }, + { "start": 2, "name": "b", "type": "tensor" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "SelectV2", "category": "logical", "inputs": [ - {"start": 0, "name": "condition", "type": "tensor"}, - {"start": 1, "name": "a", "type": "tensor"}, - {"start": 2, "name": "b", "type": "tensor"} + { "start": 0, "name": "condition", "type": "tensor" }, + { "start": 1, "name": "a", "type": "tensor" }, + { "start": 2, "name": "b", "type": "tensor" } ], "attrs": [{ "tfName": "T", @@ -30640,12 +30640,12 @@ var json12 = [ "tfOpName": "_FusedMatMul", "category": "matrices", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"}, - {"start": 2, end: 0, "name": "args", "type": "tensors"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" }, + { "start": 2, end: 0, "name": "args", "type": "tensors" } ], "attrs": [ - {"tfName": "num_args", "name": "numArgs", "type": "number"}, + { "tfName": "num_args", "name": "numArgs", "type": "number" }, { "tfName": "fused_ops", "name": "fusedOps", @@ -30670,15 +30670,15 @@ var json12 = [ "type": "bool", "defaultValue": false }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "MatMul", "category": "matrices", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ { @@ -30693,15 +30693,15 @@ var json12 = [ "type": "bool", "defaultValue": false }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "BatchMatMul", "category": "matrices", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ { @@ -30716,15 +30716,15 @@ var json12 = [ "type": "bool", "defaultValue": false }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "BatchMatMulV2", "category": "matrices", "inputs": [ - {"start": 0, "name": "a", "type": "tensor"}, - {"start": 1, "name": "b", "type": "tensor"} + { "start": 0, "name": "a", "type": "tensor" }, + { "start": 1, "name": "b", "type": "tensor" } ], "attrs": [ { @@ -30739,28 +30739,28 @@ var json12 = [ "type": "bool", "defaultValue": false }, - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Transpose", "category": "matrices", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "perm", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "perm", "type": "number[]" } ], "attrs": [ - {"tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true} + { "tfName": "T", "name": "dtype", "type": "dtype", "notSupported": true } ] }, { "tfOpName": "Einsum", "category": "matrices", - "inputs": [{"start": 0, "end": 0, "name": "tensors", "type": "tensors"}], + "inputs": [{ "start": 0, "end": 0, "name": "tensors", "type": "tensors" }], "attrs": [ - {"tfName": "equation", "name": "equation", "type": "string"}, - {"tfName": "N", "name": "n", "type": "number", "defaultValue": 2}, - {"tfName": "T", "name": "dtype", "type": "dtype"} + { "tfName": "equation", "name": "equation", "type": "string" }, + { "tfName": "N", "name": "n", "type": "number", "defaultValue": 2 }, + { "tfName": "T", "name": "dtype", "type": "dtype" } ] } ]; @@ -30773,11 +30773,11 @@ var json13 = [ "tfOpName": "FusedBatchNorm", "category": "normalization", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "scale", "type": "tensor"}, - {"start": 2, "name": "offset", "type": "tensor"}, - {"start": 3, "name": "mean", "type": "tensor"}, - {"start": 4, "name": "variance", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "scale", "type": "tensor" }, + { "start": 2, "name": "offset", "type": "tensor" }, + { "start": 3, "name": "mean", "type": "tensor" }, + { "start": 4, "name": "variance", "type": "tensor" } ], "attrs": [ { @@ -30798,11 +30798,11 @@ var json13 = [ "tfOpName": "FusedBatchNormV2", "category": "normalization", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "scale", "type": "tensor"}, - {"start": 2, "name": "offset", "type": "tensor"}, - {"start": 3, "name": "mean", "type": "tensor"}, - {"start": 4, "name": "variance", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "scale", "type": "tensor" }, + { "start": 2, "name": "offset", "type": "tensor" }, + { "start": 3, "name": "mean", "type": "tensor" }, + { "start": 4, "name": "variance", "type": "tensor" } ], "attrs": [ { @@ -30823,11 +30823,11 @@ var json13 = [ "tfOpName": "FusedBatchNormV3", "category": "normalization", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "scale", "type": "tensor"}, - {"start": 2, "name": "offset", "type": "tensor"}, - {"start": 3, "name": "mean", "type": "tensor"}, - {"start": 4, "name": "variance", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "scale", "type": "tensor" }, + { "start": 2, "name": "offset", "type": "tensor" }, + { "start": 3, "name": "mean", "type": "tensor" }, + { "start": 4, "name": "variance", "type": "tensor" } ], "attrs": [ { @@ -30848,7 +30848,7 @@ var json13 = [ "tfOpName": "LRN", "category": "normalization", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ { @@ -30857,7 +30857,7 @@ var json13 = [ "type": "number", "defaultValue": 5 }, - {"tfName": "bias", "name": "bias", "type": "number", "defaultValue": 1}, + { "tfName": "bias", "name": "bias", "type": "number", "defaultValue": 1 }, { "tfName": "alpha", "name": "alpha", @@ -30875,21 +30875,21 @@ var json13 = [ { "tfOpName": "Softmax", "category": "normalization", - "inputs": [{"start": 0, "name": "x", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "x", "type": "tensor" }] }, { "tfOpName": "LogSoftmax", "category": "normalization", - "inputs": [{"start": 0, "name": "x", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "x", "type": "tensor" }] }, { "tfOpName": "SparseToDense", "category": "normalization", "inputs": [ - {"start": 0, "name": "sparseIndices", "type": "tensor"}, - {"start": 1, "name": "outputShape", "type": "number[]"}, - {"start": 2, "name": "sparseValues", "type": "tensor"}, - {"start": 3, "name": "defaultValue", "type": "tensor"} + { "start": 0, "name": "sparseIndices", "type": "tensor" }, + { "start": 1, "name": "outputShape", "type": "number[]" }, + { "start": 2, "name": "sparseValues", "type": "tensor" }, + { "start": 3, "name": "defaultValue", "type": "tensor" } ], "attrs": [{ "tfName": "validate_indices", @@ -30909,110 +30909,110 @@ var json14 = [ "tfOpName": "Bincount", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "size", "type": "number"}, - {"start": 2, "name": "weights", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "size", "type": "number" }, + { "start": 2, "name": "weights", "type": "tensor" } ] }, { "tfOpName": "DenseBincount", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "size", "type": "number"}, - {"start": 2, "name": "weights", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "size", "type": "number" }, + { "start": 2, "name": "weights", "type": "tensor" } ], - "attrs": [{"tfName": "binary_output", "name": "binaryOutput", "type": "bool"}] + "attrs": [{ "tfName": "binary_output", "name": "binaryOutput", "type": "bool" }] }, { "tfOpName": "Max", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number[]" } ], - "attrs": [{"tfName": "keep_dims", "name": "keepDims", "type": "bool"}] + "attrs": [{ "tfName": "keep_dims", "name": "keepDims", "type": "bool" }] }, { "tfOpName": "Mean", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number[]" } ], - "attrs": [{"tfName": "keep_dims", "name": "keepDims", "type": "bool"}] + "attrs": [{ "tfName": "keep_dims", "name": "keepDims", "type": "bool" }] }, { "tfOpName": "Min", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number[]" } ], - "attrs": [{"tfName": "keep_dims", "name": "keepDims", "type": "bool"}] + "attrs": [{ "tfName": "keep_dims", "name": "keepDims", "type": "bool" }] }, { "tfOpName": "Sum", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number[]" } ], - "attrs": [{"tfName": "keep_dims", "name": "keepDims", "type": "bool"}] + "attrs": [{ "tfName": "keep_dims", "name": "keepDims", "type": "bool" }] }, { "tfOpName": "All", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number[]" } ], - "attrs": [{"tfName": "keep_dims", "name": "keepDims", "type": "bool"}] + "attrs": [{ "tfName": "keep_dims", "name": "keepDims", "type": "bool" }] }, { "tfOpName": "Any", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number[]" } ], - "attrs": [{"tfName": "keep_dims", "name": "keepDims", "type": "bool"}] + "attrs": [{ "tfName": "keep_dims", "name": "keepDims", "type": "bool" }] }, { "tfOpName": "ArgMax", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number" } ] }, { "tfOpName": "ArgMin", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number" } ] }, { "tfOpName": "Prod", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number[]" } ], - "attrs": [{"tfName": "keep_dims", "name": "keepDims", "type": "bool"}] + "attrs": [{ "tfName": "keep_dims", "name": "keepDims", "type": "bool" }] }, { "tfOpName": "Cumsum", "category": "reduction", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number" } ], "attrs": [ - {"tfName": "exclusive", "name": "exclusive", "type": "bool"}, - {"tfName": "reverse", "name": "reverse", "type": "bool"} + { "tfName": "exclusive", "name": "exclusive", "type": "bool" }, + { "tfName": "reverse", "name": "reverse", "type": "bool" } ] } ]; @@ -31025,27 +31025,27 @@ var json15 = [ "tfOpName": "ConcatV2", "category": "slice_join", "inputs": [ - {"start": 0, "end": -1, "name": "tensors", "type": "tensors"}, - {"start": -1, "name": "axis", "type": "number"} + { "start": 0, "end": -1, "name": "tensors", "type": "tensors" }, + { "start": -1, "name": "axis", "type": "number" } ], - "attrs": [{"tfName": "N", "name": "n", "type": "number", "defaultValue": 2}] + "attrs": [{ "tfName": "N", "name": "n", "type": "number", "defaultValue": 2 }] }, { "tfOpName": "Concat", "category": "slice_join", "inputs": [ - {"start": 1, "end": 0, "name": "tensors", "type": "tensors"}, - {"start": 0, "name": "axis", "type": "number"} + { "start": 1, "end": 0, "name": "tensors", "type": "tensors" }, + { "start": 0, "name": "axis", "type": "number" } ], - "attrs": [{"tfName": "N", "name": "n", "type": "number", "defaultValue": 2}] + "attrs": [{ "tfName": "N", "name": "n", "type": "number", "defaultValue": 2 }] }, { "tfOpName": "GatherV2", "category": "slice_join", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "indices", "type": "tensor"}, - {"start": 2, "name": "axis", "type": "number", "defaultValue": 0} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "indices", "type": "tensor" }, + { "start": 2, "name": "axis", "type": "number", "defaultValue": 0 } ], "attrs": [{ "tfName": "batch_dims", @@ -31058,8 +31058,8 @@ var json15 = [ "tfOpName": "Gather", "category": "slice_join", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "indices", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "indices", "type": "tensor" } ], "attrs": [{ "tfName": "validate_indices", @@ -31072,35 +31072,35 @@ var json15 = [ "tfOpName": "Reverse", "category": "slice_join", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "dims", "type": "bool[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "dims", "type": "bool[]" } ] }, { "tfOpName": "ReverseV2", "category": "slice_join", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number[]" } ] }, { "tfOpName": "Slice", "category": "slice_join", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "begin", "type": "number[]"}, - {"start": 2, "name": "size", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "begin", "type": "number[]" }, + { "start": 2, "name": "size", "type": "number[]" } ] }, { "tfOpName": "StridedSlice", "category": "slice_join", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "begin", "type": "number[]"}, - {"start": 2, "name": "end", "type": "number[]"}, - {"start": 3, "name": "strides", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "begin", "type": "number[]" }, + { "start": 2, "name": "end", "type": "number[]" }, + { "start": 3, "name": "strides", "type": "number[]" } ], "attrs": [ { @@ -31139,20 +31139,20 @@ var json15 = [ "tfOpName": "Pack", "category": "slice_join", "inputs": [ - {"start": 0, "end": 0, "name": "tensors", "type": "tensors"} + { "start": 0, "end": 0, "name": "tensors", "type": "tensors" } ], "attrs": [ - {"tfName": "axis", "name": "axis", "type": "number", "defaultValue": 0} + { "tfName": "axis", "name": "axis", "type": "number", "defaultValue": 0 } ] }, { "tfOpName": "Unpack", "category": "slice_join", "inputs": [ - {"start": 0, "name": "tensor", "type": "tensor"} + { "start": 0, "name": "tensor", "type": "tensor" } ], "attrs": [ - {"tfName": "axis", "name": "axis", "type": "number", "defaultValue": 0}, + { "tfName": "axis", "name": "axis", "type": "number", "defaultValue": 0 }, { "tfName": "num", "name": "num", @@ -31166,16 +31166,16 @@ var json15 = [ "tfOpName": "Tile", "category": "slice_join", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "reps", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "reps", "type": "number[]" } ] }, { "tfOpName": "Split", "category": "slice_join", "inputs": [ - {"start": 0, "name": "axis", "type": "number", "defaultValue": 0}, - {"start": 1, "name": "x", "type": "tensor"} + { "start": 0, "name": "axis", "type": "number", "defaultValue": 0 }, + { "start": 1, "name": "x", "type": "tensor" } ], "attrs": [{ "tfName": "num_split", @@ -31188,36 +31188,36 @@ var json15 = [ "tfOpName": "SplitV", "category": "slice_join", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "numOrSizeSplits", "type": "number[]"}, - {"start": 2, "name": "axis", "type": "number", "defaultValue": 0} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "numOrSizeSplits", "type": "number[]" }, + { "start": 2, "name": "axis", "type": "number", "defaultValue": 0 } ] }, { "tfOpName": "ScatterNd", "category": "slice_join", "inputs": [ - {"start": 0, "name": "indices", "type": "tensor"}, - {"start": 1, "name": "values", "type": "tensor"}, - {"start": 2, "name": "shape", "type": "number[]"} + { "start": 0, "name": "indices", "type": "tensor" }, + { "start": 1, "name": "values", "type": "tensor" }, + { "start": 2, "name": "shape", "type": "number[]" } ] }, { "tfOpName": "GatherNd", "category": "slice_join", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "indices", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "indices", "type": "tensor" } ] }, { "tfOpName": "SparseToDense", "category": "slice_join", "inputs": [ - {"start": 0, "name": "sparseIndices", "type": "tensor"}, - {"start": 1, "name": "outputShape", "type": "number[]"}, - {"start": 2, "name": "sparseValues", "type": "tensor"}, - {"start": 3, "name": "defaultValue", "type": "tensor"} + { "start": 0, "name": "sparseIndices", "type": "tensor" }, + { "start": 1, "name": "outputShape", "type": "number[]" }, + { "start": 2, "name": "sparseValues", "type": "tensor" }, + { "start": 3, "name": "defaultValue", "type": "tensor" } ], "attrs": [{ "tfName": "validate_indices", @@ -31236,18 +31236,18 @@ var json16 = [ { "tfOpName": "FFT", "category": "spectral", - "inputs": [{"start": 0, "name": "x", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "x", "type": "tensor" }] }, { "tfOpName": "IFFT", "category": "spectral", - "inputs": [{"start": 0, "name": "x", "type": "tensor"}] + "inputs": [{ "start": 0, "name": "x", "type": "tensor" }] }, { "tfOpName": "RFFT", "category": "spectral", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, + { "start": 0, "name": "x", "type": "tensor" }, { "start": 1, "name": "fft_length", @@ -31260,7 +31260,7 @@ var json16 = [ "tfOpName": "IRFFT", "category": "spectral", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, + { "start": 0, "name": "x", "type": "tensor" }, { "start": 1, "name": "fft_length", @@ -31279,7 +31279,7 @@ var json17 = [ "tfOpName": "Cast", "category": "transformation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ { @@ -31288,32 +31288,32 @@ var json17 = [ "type": "dtype", "notSupported": true }, - {"tfName": "DstT", "name": "dtype", "type": "dtype"} + { "tfName": "DstT", "name": "dtype", "type": "dtype" } ] }, { "tfOpName": "ExpandDims", "category": "transformation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "axis", "type": "number"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "axis", "type": "number" } ] }, { "tfOpName": "MirrorPad", "category": "transformation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "padding", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "padding", "type": "number[]" } ], - "attrs": [{"tfName": "mode", "name": "mode", "type": "string"}] + "attrs": [{ "tfName": "mode", "name": "mode", "type": "string" }] }, { "tfOpName": "Pad", "category": "transformation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "padding", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "padding", "type": "number[]" } ], "attrs": [{ "tfName": "constant_value", @@ -31326,8 +31326,8 @@ var json17 = [ "tfOpName": "PadV2", "category": "transformation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "padding", "type": "number[]"}, + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "padding", "type": "number[]" }, { "start": 2, "name": "constantValue", @@ -31340,15 +31340,15 @@ var json17 = [ "tfOpName": "Reshape", "category": "transformation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "shape", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "shape", "type": "number[]" } ] }, { "tfOpName": "Squeeze", "category": "transformation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [{ "tfName": "axis", @@ -31361,37 +31361,37 @@ var json17 = [ "tfOpName": "SpaceToBatchND", "category": "transformation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "blockShape", "type": "number[]"}, - {"start": 2, "name": "paddings", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "blockShape", "type": "number[]" }, + { "start": 2, "name": "paddings", "type": "number[]" } ] }, { "tfOpName": "BatchToSpaceND", "category": "transformation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "blockShape", "type": "number[]"}, - {"start": 2, "name": "crops", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "blockShape", "type": "number[]" }, + { "start": 2, "name": "crops", "type": "number[]" } ] }, { "tfOpName": "DepthToSpace", "category": "transformation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"} + { "start": 0, "name": "x", "type": "tensor" } ], "attrs": [ - {"tfName": "block_size", "name": "blockSize", "type": "number"}, - {"tfName": "data_format", "name": "dataFormat", "type": "string"} + { "tfName": "block_size", "name": "blockSize", "type": "number" }, + { "tfName": "data_format", "name": "dataFormat", "type": "string" } ] }, { "tfOpName": "BroadcastTo", "category": "transformation", "inputs": [ - {"start": 0, "name": "x", "type": "tensor"}, - {"start": 1, "name": "shape", "type": "number[]"} + { "start": 0, "name": "x", "type": "tensor" }, + { "start": 1, "name": "shape", "type": "number[]" } ], "attrs": [] } @@ -31495,7 +31495,7 @@ var OperationMapper = class { return functions2; }, {}); } - const result = {nodes, inputs, outputs, weights, placeholders, signature, functions}; + const result = { nodes, inputs, outputs, weights, placeholders, signature, functions }; if (initNodes.length > 0) { result.initNodes = initNodes; } @@ -31610,7 +31610,7 @@ var OperationMapper = class { default: throw new Error(`Unsupported param type: ${param.type} for op: ${node2.op}`); } - map[param.name] = {value, type}; + map[param.name] = { value, type }; return map; }, {}); } @@ -31641,7 +31641,7 @@ var OperationMapper = class { inputNames: [], category: "graph", inputParams: {}, - attrParams: {dtype: {value: parseDtypeParam(arg.type), type: "dtype"}}, + attrParams: { dtype: { value: parseDtypeParam(arg.type), type: "dtype" } }, children: [] }; node2.signatureKey = arg.name; @@ -31667,7 +31667,7 @@ var OperationMapper = class { } }); const signature = this.mapArgsToSignature(functionDef); - return {nodes, inputs, outputs, weights, placeholders, signature}; + return { nodes, inputs, outputs, weights, placeholders, signature }; } mapArgsToSignature(functionDef) { return { @@ -31687,7 +31687,7 @@ var OperationMapper = class { if (nameMap2 != null) { name = nameMap2[name]; } - return {name, dtype: arg.type}; + return { name, dtype: arg.type }; } }; function decodeBase64(text) { @@ -32736,7 +32736,7 @@ var executeOp4 = (node2, tensorMap, context) => { return [conv2d(getParamValue("x", node2, tensorMap, context), getParamValue("filter", node2, tensorMap, context), [stride[1], stride[2]], pad3, dataFormat, [dilations[1], dilations[2]])]; } case "_FusedConv2D": { - const {stride, pad: pad3, dataFormat, dilations, biasArg, preluArg, activationFunc, leakyreluAlpha} = fusedConvAndDepthWiseParams(node2, tensorMap, context); + const { stride, pad: pad3, dataFormat, dilations, biasArg, preluArg, activationFunc, leakyreluAlpha } = fusedConvAndDepthWiseParams(node2, tensorMap, context); return [fused_ops_exports.conv2d({ x: getParamValue("x", node2, tensorMap, context), filter: getParamValue("filter", node2, tensorMap, context), @@ -32751,7 +32751,7 @@ var executeOp4 = (node2, tensorMap, context) => { })]; } case "FusedDepthwiseConv2dNative": { - const {stride, pad: pad3, dataFormat, dilations, biasArg, preluArg, activationFunc, leakyreluAlpha} = fusedConvAndDepthWiseParams(node2, tensorMap, context); + const { stride, pad: pad3, dataFormat, dilations, biasArg, preluArg, activationFunc, leakyreluAlpha } = fusedConvAndDepthWiseParams(node2, tensorMap, context); return [fused_ops_exports.depthwiseConv2d({ x: getParamValue("x", node2, tensorMap, context), filter: getParamValue("filter", node2, tensorMap, context), @@ -32804,7 +32804,7 @@ var executeOp4 = (node2, tensorMap, context) => { const pad3 = getParamValue("pad", node2, tensorMap, context); const kernelSize = getParamValue("kernelSize", node2, tensorMap, context); const includeBatchInIndex = getParamValue("includeBatchInIndex", node2, tensorMap, context); - const {result, indexes} = maxPoolWithArgmax(getParamValue("x", node2, tensorMap, context), [kernelSize[1], kernelSize[2]], [stride[1], stride[2]], pad3, includeBatchInIndex); + const { result, indexes } = maxPoolWithArgmax(getParamValue("x", node2, tensorMap, context), [kernelSize[1], kernelSize[2]], [stride[1], stride[2]], pad3, includeBatchInIndex); return [result, indexes]; } case "AvgPool3D": { @@ -32911,19 +32911,19 @@ function nmsParams(node2, tensorMap, context) { var executeOp6 = async (node2, tensorMap, context) => { switch (node2.op) { case "NonMaxSuppressionV5": { - const {boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma} = nmsParams(node2, tensorMap, context); + const { boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma } = nmsParams(node2, tensorMap, context); const result = await image.nonMaxSuppressionWithScoreAsync(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma); return [result.selectedIndices, result.selectedScores]; } case "NonMaxSuppressionV4": { - const {boxes, scores, maxOutputSize, iouThreshold, scoreThreshold} = nmsParams(node2, tensorMap, context); + const { boxes, scores, maxOutputSize, iouThreshold, scoreThreshold } = nmsParams(node2, tensorMap, context); const padToMaxOutputSize = getParamValue("padToMaxOutputSize", node2, tensorMap, context); const result = await image.nonMaxSuppressionPaddedAsync(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize); return [result.selectedIndices, result.validOutputs]; } case "NonMaxSuppressionV3": case "NonMaxSuppressionV2": { - const {boxes, scores, maxOutputSize, iouThreshold, scoreThreshold} = nmsParams(node2, tensorMap, context); + const { boxes, scores, maxOutputSize, iouThreshold, scoreThreshold } = nmsParams(node2, tensorMap, context); return [await image.nonMaxSuppressionAsync(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold)]; } case "Where": { @@ -33421,7 +33421,7 @@ var executeOp15 = (node2, tensorMap, context) => { var executeOp16 = (node2, tensorMap, context) => { switch (node2.op) { case "SparseReshape": { - const {outputIndices, outputShape} = sparse.sparseReshape(getParamValue("inputIndices", node2, tensorMap, context), getParamValue("inputShape", node2, tensorMap, context), getParamValue("newShape", node2, tensorMap, context)); + const { outputIndices, outputShape } = sparse.sparseReshape(getParamValue("inputIndices", node2, tensorMap, context), getParamValue("inputShape", node2, tensorMap, context), getParamValue("newShape", node2, tensorMap, context)); return [outputIndices, outputShape]; } default: @@ -33552,13 +33552,13 @@ var ExecutionContext = class { this.tensorArrayMap = tensorArrayMap; this.tensorListMap = tensorListMap; this.functionMap = functionMap; - this.rootContext = {id: 0, frameName: "", iterationId: 0}; + this.rootContext = { id: 0, frameName: "", iterationId: 0 }; this.contexts = [this.rootContext]; this.lastId = 0; this.generateCurrentContextIds(); } newFrame(id, frameName) { - return {id, frameName, iterationId: 0}; + return { id, frameName, iterationId: 0 }; } set currentContext(contexts2) { if (this.contexts !== contexts2) { @@ -33683,10 +33683,10 @@ function getExecutionSubgraph(inputs, outputs, weightMap, initNodes) { frontier.push(input2); }); } - return {inputs, outputs, usedNodes, missingInputs, dynamicNode, syncInputs}; + return { inputs, outputs, usedNodes, missingInputs, dynamicNode, syncInputs }; } function getNodesInTopologicalOrder(graph2, weightMap, executionInfo) { - const {usedNodes, inputs} = executionInfo; + const { usedNodes, inputs } = executionInfo; const frontier = []; const inputNodes = Object.keys(inputs).map((name) => parseNodeName(name)[0]).map((name) => graph2.nodes[name]); const initNodes = graph2.initNodes; @@ -33836,7 +33836,7 @@ var GraphExecutor = class { } compile(inputs, outputs) { const executionInfo = getExecutionSubgraph(inputs, outputs, this.weightMap, this._initNodes); - const {missingInputs, dynamicNode, syncInputs} = executionInfo; + const { missingInputs, dynamicNode, syncInputs } = executionInfo; if (dynamicNode != null) { throw new Error(`This execution contains the node '${dynamicNode.name}', which has the dynamic op '${dynamicNode.op}'. Please use model.executeAsync() instead. Alternatively, to avoid the dynamic ops, specify the inputs [${syncInputs}]`); } @@ -33973,13 +33973,13 @@ var GraphExecutor = class { if (outputNodes.length === 0) { outputNodes = this._outputs; } - const {usedNodes, missingInputs, dynamicNode, syncInputs} = getExecutionSubgraph(inputs, outputNodes, this.weightMap, this._initNodes); + const { usedNodes, missingInputs, dynamicNode, syncInputs } = getExecutionSubgraph(inputs, outputNodes, this.weightMap, this._initNodes); const stack2 = [ ...inputNodes, ...this.graph.weights, ...this._initNodes || [] ].map((node2) => { - return {node: node2, contexts: context.currentContext}; + return { node: node2, contexts: context.currentContext }; }); const tensorsMap = Object.assign({}, this.weightMap); Object.keys(inputs).forEach((name) => { @@ -34053,13 +34053,13 @@ var GraphExecutor = class { return !!getTensor(name, tensorMap, context); })) { added[nodeName] = true; - stack2.push({contexts: context.currentContext, node: childNode}); + stack2.push({ contexts: context.currentContext, node: childNode }); } } else if (childNode.inputNames.every((name) => { return !!getTensor(name, tensorMap, context); })) { added[nodeName] = true; - stack2.push({contexts: context.currentContext, node: childNode}); + stack2.push({ contexts: context.currentContext, node: childNode }); } }); } @@ -34396,9 +34396,9 @@ function zipToList(x) { return null; } if (isIterable2(x[0])) { - return {value: null, recurse: true}; + return { value: null, recurse: true }; } else { - return {value: x, recurse: false}; + return { value: x, recurse: false }; } } async function deepMapAndAwaitAll(input2, mapFn) { @@ -34428,11 +34428,11 @@ function deepClone(container) { } function cloneIfTensor(item) { if (item instanceof Tensor) { - return {value: item.clone(), recurse: false}; + return { value: item.clone(), recurse: false }; } else if (isIterable2(item)) { - return {value: null, recurse: true}; + return { value: null, recurse: true }; } else { - return {value: item, recurse: false}; + return { value: item, recurse: false }; } } var RingBuffer = class { @@ -34674,11 +34674,11 @@ var ArrayIterator = class extends LazyIterator { } async next() { if (this.trav >= this.items.length) { - return {value: null, done: true}; + return { value: null, done: true }; } const item = this.items[this.trav]; this.trav++; - return {value: deepClone(item), done: false}; + return { value: deepClone(item), done: false }; } }; var FunctionCallIterator = class extends LazyIterator { @@ -34702,7 +34702,7 @@ var SerialIterator = class extends LazyIterator { constructor(upstream) { super(); this.upstream = upstream; - this.lastRead = Promise.resolve({value: null, done: false}); + this.lastRead = Promise.resolve({ value: null, done: false }); } summary() { return `${this.upstream.summary()} -> Serial`; @@ -34721,7 +34721,7 @@ var SkipIterator = class extends LazyIterator { this.upstream = upstream; this.maxCount = maxCount; this.count = 0; - this.lastRead = Promise.resolve({value: null, done: false}); + this.lastRead = Promise.resolve({ value: null, done: false }); } summary() { return `${this.upstream.summary()} -> Skip`; @@ -34753,7 +34753,7 @@ var TakeIterator = class extends LazyIterator { } async next() { if (this.count++ >= this.maxCount) { - return {value: null, done: true}; + return { value: null, done: true }; } return this.upstream.next(); } @@ -34764,7 +34764,7 @@ var RowMajorBatchIterator = class extends LazyIterator { this.upstream = upstream; this.batchSize = batchSize; this.enableSmallLastBatch = enableSmallLastBatch; - this.lastRead = Promise.resolve({value: null, done: false}); + this.lastRead = Promise.resolve({ value: null, done: false }); } summary() { return `${this.upstream.summary()} -> RowMajorBatch`; @@ -34779,13 +34779,13 @@ var RowMajorBatchIterator = class extends LazyIterator { const item = await this.upstream.next(); if (item.done) { if (this.enableSmallLastBatch && batch.length > 0) { - return {value: batch, done: false}; + return { value: batch, done: false }; } - return {value: null, done: true}; + return { value: null, done: true }; } batch.push(item.value); } - return {value: batch, done: false}; + return { value: batch, done: false }; } }; var FilterIterator = class extends LazyIterator { @@ -34793,7 +34793,7 @@ var FilterIterator = class extends LazyIterator { super(); this.upstream = upstream; this.predicate = predicate; - this.lastRead = Promise.resolve({value: null, done: false}); + this.lastRead = Promise.resolve({ value: null, done: false }); } summary() { return `${this.upstream.summary()} -> Filter`; @@ -34824,7 +34824,7 @@ var MapIterator = class extends LazyIterator { async next() { const item = await this.upstream.next(); if (item.done) { - return {value: null, done: true}; + return { value: null, done: true }; } const inputTensors = tensor_util_exports.getTensorsInContainer(item.value); const mapped = this.transform(item.value); @@ -34834,7 +34834,7 @@ var MapIterator = class extends LazyIterator { t.dispose(); } } - return {value: mapped, done: false}; + return { value: mapped, done: false }; } }; var ErrorHandlingLazyIterator = class extends LazyIterator { @@ -34843,7 +34843,7 @@ var ErrorHandlingLazyIterator = class extends LazyIterator { this.upstream = upstream; this.handler = handler; this.count = 0; - this.lastRead = Promise.resolve({value: null, done: false}); + this.lastRead = Promise.resolve({ value: null, done: false }); } summary() { return `${this.upstream.summary()} -> handleErrors`; @@ -34858,7 +34858,7 @@ var ErrorHandlingLazyIterator = class extends LazyIterator { return await this.upstream.next(); } catch (e) { if (!this.handler(e)) { - return {value: null, done: true}; + return { value: null, done: true }; } } } @@ -34876,7 +34876,7 @@ var AsyncMapIterator = class extends LazyIterator { async next() { const item = await this.upstream.next(); if (item.done) { - return {value: null, done: true}; + return { value: null, done: true }; } const inputTensors = tensor_util_exports.getTensorsInContainer(item.value); const mapped = await this.transform(item.value); @@ -34886,14 +34886,14 @@ var AsyncMapIterator = class extends LazyIterator { t.dispose(); } } - return {value: mapped, done: false}; + return { value: mapped, done: false }; } }; var OneToManyIterator = class extends LazyIterator { constructor() { super(); this.outputQueue = new GrowingRingBuffer(); - this.lastRead = Promise.resolve({value: null, done: false}); + this.lastRead = Promise.resolve({ value: null, done: false }); } async next() { this.lastRead = this.lastRead.then(() => this.serialNext()); @@ -34902,10 +34902,10 @@ var OneToManyIterator = class extends LazyIterator { async serialNext() { while (this.outputQueue.length() === 0) { if (!await this.pump()) { - return {value: null, done: true}; + return { value: null, done: true }; } } - return {value: this.outputQueue.shift(), done: false}; + return { value: this.outputQueue.shift(), done: false }; } }; var FlatmapIterator = class extends OneToManyIterator { @@ -34955,7 +34955,7 @@ var ChainedIterator = class extends LazyIterator { if (this.iterator == null) { const iteratorResult = await this.moreIterators.next(); if (iteratorResult.done) { - return {value: null, done: true}; + return { value: null, done: true }; } this.iterator = iteratorResult.value; if (this.baseErrorHandler != null) { @@ -35006,25 +35006,25 @@ var ZipIterator = class extends LazyIterator { recurse: false }; } else { - return {value: null, recurse: true}; + return { value: null, recurse: true }; } } const mapped = await deepMapAndAwaitAll(this.iterators, getNext); if (numIterators === iteratorsDone) { - return {value: null, done: true}; + return { value: null, done: true }; } if (iteratorsDone > 0) { switch (this.mismatchMode) { case ZipMismatchMode.FAIL: throw new Error(`Zipped streams should have the same length. Mismatched at element ${this.count}.`); case ZipMismatchMode.SHORTEST: - return {value: null, done: true}; + return { value: null, done: true }; case ZipMismatchMode.LONGEST: default: } } this.count++; - return {value: mapped, done: false}; + return { value: mapped, done: false }; } async next() { this.currentPromise = this.nextState(this.currentPromise); @@ -35059,7 +35059,7 @@ var ShuffleIterator = class extends PrefetchIterator { this.windowSize = windowSize; this.upstreamExhausted = false; this.random = seedrandom2.alea(seed || util_exports.now().toString()); - this.lastRead = Promise.resolve({value: null, done: false}); + this.lastRead = Promise.resolve({ value: null, done: false }); } async next() { this.lastRead = this.lastRead.then(() => this.serialNext()); @@ -35085,7 +35085,7 @@ var ShuffleIterator = class extends PrefetchIterator { return result; } } - return {value: null, done: true}; + return { value: null, done: true }; } }; var Dataset = class { @@ -35167,7 +35167,7 @@ var Dataset = class { size = null; } return datasetFromIteratorFn(async () => { - const iteratorIterator = iteratorFromFunction(async () => ({value: await base2.iterator(), done: false})); + const iteratorIterator = iteratorFromFunction(async () => ({ value: await base2.iterator(), done: false })); return iteratorFromConcatenated(iteratorIterator.take(count2)); }, size); } @@ -35258,9 +35258,9 @@ function zip(datasets) { return datasetFromIteratorFn(async () => { const streams = await deepMapAndAwaitAll(datasets, (d) => { if (d instanceof Dataset) { - return {value: d.iterator(), recurse: false}; + return { value: d.iterator(), recurse: false }; } else if (isIterable2(d)) { - return {value: null, recurse: true}; + return { value: null, recurse: true }; } else { throw new Error("Leaves of the structure passed to zip() must be Datasets, not primitives."); } @@ -35275,9 +35275,9 @@ function deepBatchConcat(rows) { const exampleRow = rows[0]; if (canTensorify(exampleRow)) { const value = batchConcat(rows); - return {value, recurse: false}; + return { value, recurse: false }; } - return {value: null, recurse: true}; + return { value: null, recurse: true }; } function batchConcat(arrays) { if (arrays.length === 0) { @@ -35447,7 +35447,7 @@ var CSVDataset = class extends Dataset { if (Object.keys(labels).length === 0) { return features; } else { - return {xs: features, ys: labels}; + return { xs: features, ys: labels }; } } getBoolean(value) { @@ -35601,7 +35601,7 @@ var MicrophoneIterator = class extends LazyIterator { } async next() { if (this.isClosed) { - return {value: null, done: true}; + return { value: null, done: true }; } let spectrogramTensor; let waveformTensor; @@ -35615,7 +35615,7 @@ var MicrophoneIterator = class extends LazyIterator { waveformTensor = this.getTensorFromAudioDataArray(timeData, [this.numFrames * this.fftSize, 1]); } return { - value: {"spectrogram": spectrogramTensor, "waveform": waveformTensor}, + value: { "spectrogram": spectrogramTensor, "waveform": waveformTensor }, done: false }; } @@ -35631,7 +35631,7 @@ var MicrophoneIterator = class extends LazyIterator { if (this.includeSpectrogram) { this.analyser.getFloatFrequencyData(this.freqData); if (this.freqData[0] === -Infinity) { - resolve({freqDataQueue, timeDataQueue}); + resolve({ freqDataQueue, timeDataQueue }); } freqDataQueue.push(this.freqData.slice(0, this.columnTruncateLength)); } @@ -35641,7 +35641,7 @@ var MicrophoneIterator = class extends LazyIterator { } if (++currentFrames === this.numFrames) { clearInterval(intervalID); - resolve({freqDataQueue, timeDataQueue}); + resolve({ freqDataQueue, timeDataQueue }); } }, this.fftSize / this.sampleRateHz * 1e3); }); @@ -35753,7 +35753,7 @@ var WebcamIterator = class extends LazyIterator { } async next() { if (this.isClosed) { - return {value: null, done: true}; + return { value: null, done: true }; } let img; try { @@ -35763,14 +35763,14 @@ var WebcamIterator = class extends LazyIterator { } if (this.resize) { try { - return {value: this.cropAndResizeFrame(img), done: false}; + return { value: this.cropAndResizeFrame(img), done: false }; } catch (e) { throw new Error(`Error thrown cropping the video: ${e.message}`); } finally { img.dispose(); } } else { - return {value: img, done: false}; + return { value: img, done: false }; } } needToResize() { @@ -35880,7 +35880,7 @@ var Utf8IteratorImpl = class extends OneToManyIterator { if (env().get("IS_BROWSER")) { this.decoder = new TextDecoder("utf-8"); } else { - const {StringDecoder} = require_string_decoder(); + const { StringDecoder } = require_string_decoder(); this.decoder = new StringDecoder("utf8"); } } @@ -35897,7 +35897,7 @@ var Utf8IteratorImpl = class extends OneToManyIterator { } let text; if (env().get("IS_BROWSER")) { - text = this.decoder.decode(chunk, {stream: true}); + text = this.decoder.decode(chunk, { stream: true }); } else { text = this.decoder.write(Buffer.from(chunk.buffer)); } @@ -35919,7 +35919,7 @@ var FileChunkIterator = class extends ByteChunkIterator { } async next() { if (this.offset >= (this.file instanceof Uint8Array ? this.file.byteLength : this.file.size)) { - return {value: null, done: true}; + return { value: null, done: true }; } const chunk = new Promise((resolve, reject) => { const end = this.offset + this.chunkSize; @@ -35948,7 +35948,7 @@ var FileChunkIterator = class extends ByteChunkIterator { } this.offset = end; }); - return {value: await chunk, done: false}; + return { value: await chunk, done: false }; } }; async function urlChunkIterator(url, options = {}) { @@ -36061,8 +36061,8 @@ var MathBackendCPU = class extends KernelBackend { backend_util_exports.warn("\n============================\nHi there \u{1F44B}. Looks like you are running TensorFlow.js in Node.js. To speed things up dramatically, install our node backend, which binds to TensorFlow C++, by running npm i @tensorflow/tfjs-node, or npm i @tensorflow/tfjs-node-gpu if you have CUDA. Then call require('@tensorflow/tfjs-node'); (-gpu suffix for CUDA) at the start of your program. Visit https://github.com/tensorflow/tfjs-node for more details.\n============================"); } } - const dataId = {id: this.nextDataId()}; - this.data.set(dataId, {values, dtype, refCount: 1}); + const dataId = { id: this.nextDataId() }; + this.data.set(dataId, { values, dtype, refCount: 1 }); return dataId; } makeTensorInfo(shape, dtype, values) { @@ -36073,7 +36073,7 @@ var MathBackendCPU = class extends KernelBackend { } else { outId = this.write(values, shape, dtype); } - return {dataId: outId, shape, dtype}; + return { dataId: outId, shape, dtype }; } refCount(dataId) { if (this.data.has(dataId)) { @@ -36093,7 +36093,7 @@ var MathBackendCPU = class extends KernelBackend { } } move(dataId, values, shape, dtype, refCount) { - this.data.set(dataId, {values, dtype, refCount}); + this.data.set(dataId, { values, dtype, refCount }); } numDataIds() { return this.data.numDataIds(); @@ -36102,7 +36102,7 @@ var MathBackendCPU = class extends KernelBackend { return this.readSync(dataId); } readSync(dataId) { - const {dtype, complexTensorInfos} = this.data.get(dataId); + const { dtype, complexTensorInfos } = this.data.get(dataId); if (dtype === "complex64") { const realValues = this.readSync(complexTensorInfos.real.dataId); const imagValues = this.readSync(complexTensorInfos.imag.dataId); @@ -36132,7 +36132,7 @@ var MathBackendCPU = class extends KernelBackend { if (!force && this.data.get(dataId).refCount > 0) { return false; } - const {complexTensorInfos} = this.data.get(dataId); + const { complexTensorInfos } = this.data.get(dataId); if (complexTensorInfos != null) { this.disposeData(complexTensorInfos.real.dataId, true); this.disposeData(complexTensorInfos.imag.dataId, true); @@ -36148,7 +36148,7 @@ var MathBackendCPU = class extends KernelBackend { const start = util_exports.now(); f(); const kernelMs = util_exports.now() - start; - return {kernelMs}; + return { kernelMs }; } memory() { return { @@ -36215,7 +36215,7 @@ function simpleAbsImpl(vals) { return resultValues; } var abs2 = (args) => { - const {x} = args.inputs; + const { x } = args.inputs; const cpuBackend = args.backend; assertNotComplex(x, "abs"); let resultValues = new Float32Array(util_exports.sizeFromShape(x.shape)); @@ -36261,8 +36261,8 @@ function createSimpleBinaryKernelImpl(op2) { }; } function complex2(args) { - const {inputs, backend: backend2} = args; - const {real: real4, imag: imag4} = inputs; + const { inputs, backend: backend2 } = args; + const { real: real4, imag: imag4 } = inputs; const realVals = backend2.data.get(real4.dataId).values; const imagVals = backend2.data.get(imag4.dataId).values; const complexInfo = backend2.makeTensorInfo(real4.shape, "complex64"); @@ -36282,16 +36282,16 @@ function zeros3(backend2, shape, dtype = "float32") { if (dtype === "complex64") { const real4 = zeros3(backend2, shape, "float32"); const imag4 = zeros3(backend2, shape, "float32"); - return complex2({inputs: {real: real4, imag: imag4}, backend: backend2}); + return complex2({ inputs: { real: real4, imag: imag4 }, backend: backend2 }); } const values = util_exports.makeZerosTypedArray(util_exports.sizeFromShape(shape), dtype); return backend2.makeTensorInfo(shape, dtype, values); } function identity2(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; backend2.incRef(x.dataId); - return {dataId: x.dataId, shape: x.shape, dtype: x.dtype}; + return { dataId: x.dataId, shape: x.shape, dtype: x.dtype }; } var identityConfig = { kernelName: Identity, @@ -36299,8 +36299,8 @@ var identityConfig = { kernelFunc: identity2 }; function real2(args) { - const {inputs, backend: backend2} = args; - const {input: input2} = inputs; + const { inputs, backend: backend2 } = args; + const { input: input2 } = inputs; const real4 = backend2.data.get(input2.dataId).complexTensorInfos.real; const realVal = backend2.data.get(real4.dataId).values; return backend2.makeTensorInfo(real4.shape, real4.dtype, realVal); @@ -36311,29 +36311,29 @@ var realConfig = { kernelFunc: real2 }; function cast3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {dtype} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { dtype } = attrs; if (dtype === "complex64") { if (x.dtype === "complex64") { - return identity2({inputs: {x}, backend: backend2}); + return identity2({ inputs: { x }, backend: backend2 }); } const zerosTensorInfo = zeros3(backend2, x.shape, x.dtype); - const floatX = cast3({inputs: {x}, backend: backend2, attrs: {dtype: "float32"}}); - const result = complex2({inputs: {real: floatX, imag: zerosTensorInfo}, backend: backend2}); + const floatX = cast3({ inputs: { x }, backend: backend2, attrs: { dtype: "float32" } }); + const result = complex2({ inputs: { real: floatX, imag: zerosTensorInfo }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(zerosTensorInfo); backend2.disposeIntermediateTensorInfo(floatX); return result; } if (x.dtype === "complex64") { - const realPart = real2({inputs: {input: x}, backend: backend2}); - const result = cast3({inputs: {x: realPart}, backend: backend2, attrs: {dtype}}); + const realPart = real2({ inputs: { input: x }, backend: backend2 }); + const result = cast3({ inputs: { x: realPart }, backend: backend2, attrs: { dtype } }); backend2.disposeIntermediateTensorInfo(realPart); return result; } if (!util_exports.hasEncodingLoss(x.dtype, dtype)) { - const result = identity2({inputs: {x}, backend: backend2}); - return {dataId: result.dataId, shape: result.shape, dtype}; + const result = identity2({ inputs: { x }, backend: backend2 }); + return { dataId: result.dataId, shape: result.shape, dtype }; } if (dtype === "int32") { const values = backend2.data.get(x.dataId).values; @@ -36355,8 +36355,8 @@ var castConfig = { }; function binaryKernelFunc(name, simpleImpl, complexImpl, dtype) { if (complexImpl == null) { - return ({inputs, backend: backend2}) => { - const {a, b} = inputs; + return ({ inputs, backend: backend2 }) => { + const { a, b } = inputs; const cpuBackend = backend2; assertNotComplex([a, b], name); const aVals = cpuBackend.data.get(a.dataId).values; @@ -36366,17 +36366,17 @@ function binaryKernelFunc(name, simpleImpl, complexImpl, dtype) { return cpuBackend.makeTensorInfo(resultShape, $dtype, resultData); }; } - return ({inputs, backend: backend2}) => { - const {a, b} = inputs; + return ({ inputs, backend: backend2 }) => { + const { a, b } = inputs; const cpuBackend = backend2; if (a.dtype === "complex64" || b.dtype === "complex64") { - const $aComplex = cast3({inputs: {x: a}, backend: cpuBackend, attrs: {dtype: "complex64"}}); + const $aComplex = cast3({ inputs: { x: a }, backend: cpuBackend, attrs: { dtype: "complex64" } }); const $aComplexVals = cpuBackend.data.get($aComplex.dataId); const aReal = $aComplexVals.complexTensorInfos.real; const aImag = $aComplexVals.complexTensorInfos.imag; const aRealVals = cpuBackend.data.get(aReal.dataId).values; const aImagVals = cpuBackend.data.get(aImag.dataId).values; - const $bComplex = cast3({inputs: {x: b}, backend: cpuBackend, attrs: {dtype: "complex64"}}); + const $bComplex = cast3({ inputs: { x: b }, backend: cpuBackend, attrs: { dtype: "complex64" } }); const $bComplexVals = cpuBackend.data.get($bComplex.dataId); const bReal = $bComplexVals.complexTensorInfos.real; const bImag = $bComplexVals.complexTensorInfos.imag; @@ -36385,7 +36385,7 @@ function binaryKernelFunc(name, simpleImpl, complexImpl, dtype) { const [resultRealData, resultImagData, resultShape] = complexImpl(a.shape, b.shape, aRealVals, aImagVals, bRealVals, bImagVals); const resultReal = cpuBackend.makeTensorInfo(resultShape, "float32", resultRealData); const resultImag = cpuBackend.makeTensorInfo(resultShape, "float32", resultImagData); - const result = complex2({inputs: {real: resultReal, imag: resultImag}, backend: cpuBackend}); + const result = complex2({ inputs: { real: resultReal, imag: resultImag }, backend: cpuBackend }); cpuBackend.disposeIntermediateTensorInfo($aComplex); cpuBackend.disposeIntermediateTensorInfo($bComplex); cpuBackend.disposeIntermediateTensorInfo(resultReal); @@ -36443,7 +36443,7 @@ function createComplexBinaryKernelImpl(op2) { } var addImpl = createSimpleBinaryKernelImpl((a, b) => a + b); var addComplexImpl = createComplexBinaryKernelImpl((aReal, aImag, bReal, bImag) => { - return {real: aReal + bReal, imag: aImag + bImag}; + return { real: aReal + bReal, imag: aImag + bImag }; }); var add4 = binaryKernelFunc(Add, addImpl, addComplexImpl); var addConfig = { @@ -36506,8 +36506,8 @@ function createSimpleUnaryImpl(op2) { }; } function unaryKernelFunc(name, op2, dtype) { - return ({inputs, attrs, backend: backend2}) => { - const {x} = inputs; + return ({ inputs, attrs, backend: backend2 }) => { + const { x } = inputs; assertNotComplex(x, name); if (x.dtype === "string" || dtype === "string") { throw new Error("unaryKernelFunc does not support string input/output"); @@ -36524,8 +36524,8 @@ function unaryKernelFunc(name, op2, dtype) { }; } function unaryKernelFuncFromImpl(name, unaryImpl, dtype) { - return ({inputs, attrs, backend: backend2}) => { - const {x} = inputs; + return ({ inputs, attrs, backend: backend2 }) => { + const { x } = inputs; assertNotComplex(x, name); if (x.dtype === "string" || dtype === "string") { throw new Error("unaryKernelFunc does not support string input/output"); @@ -36681,8 +36681,8 @@ function negImpl(xVals, xShape, xDtype) { return multiplyImpl([], xShape, minusOne, xVals, xDtype); } function neg2(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; assertNotComplex(x, "neg"); const xVals = backend2.data.get(x.dataId).values; const [res, newShape] = negImpl(xVals, x.shape, x.dtype); @@ -36718,9 +36718,9 @@ function transposeImpl(xVals, xShape, dtype, perm, newShape) { return result; } function transpose2(args) { - const {inputs, attrs, backend: backend2} = args; - const {x} = inputs; - const {perm} = attrs; + const { inputs, attrs, backend: backend2 } = args; + const { x } = inputs; + const { perm } = attrs; assertNotComplex(x, "transpose"); const xRank = x.shape.length; const newShape = new Array(xRank); @@ -36730,7 +36730,7 @@ function transpose2(args) { const values = backend2.data.get(x.dataId).values; const result = transposeImpl(values, x.shape, x.dtype, perm, newShape); const dataId = backend2.write(result, newShape, x.dtype); - return {dataId, shape: newShape, dtype: x.dtype}; + return { dataId, shape: newShape, dtype: x.dtype }; } var transposeConfig = { kernelName: Transpose, @@ -36750,12 +36750,12 @@ function prodImpl(xShape, xDtype, xVals, reductionAxes) { } outVals[i] = prod5; } - return {outVals, outShape, outDtype}; + return { outVals, outShape, outDtype }; } function prod2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; assertNotComplex(x, "prod"); const xRank = x.shape.length; const axes = util_exports.parseAxisParam(axis, x.shape); @@ -36764,12 +36764,12 @@ function prod2(args) { let permutedX = x; const intermediateTensorInfos = []; if (permutation != null) { - permutedX = transpose2({inputs: {x}, backend: backend2, attrs: {perm: permutation}}); + permutedX = transpose2({ inputs: { x }, backend: backend2, attrs: { perm: permutation } }); intermediateTensorInfos.push(permutedX); reductionAxes = backend_util_exports.getInnerMostAxes(reductionAxes.length, xRank); } const xVals = backend2.data.get(permutedX.dataId).values; - const {outVals, outShape, outDtype} = prodImpl(permutedX.shape, permutedX.dtype, xVals, reductionAxes); + const { outVals, outShape, outDtype } = prodImpl(permutedX.shape, permutedX.dtype, xVals, reductionAxes); let resultShape = outShape; if (keepDims) { resultShape = backend_util_exports.expandShapeToKeepDim(outShape, axes); @@ -36832,9 +36832,9 @@ function sliceImpl(vals, begin, size, shape, dtype) { return outBuf.values; } function slice2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {begin, size} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { begin, size } = attrs; assertNotComplex(x, "slice"); const [$begin, $size] = slice_util_exports.parseSliceParams(x, begin, size); slice_util_exports.assertParamsValid(x, $begin, $size); @@ -37032,7 +37032,7 @@ function stridedSliceImpl(outShape, xBuf, strides, begin) { } var subImpl = createSimpleBinaryKernelImpl((aValue, bValue) => aValue - bValue); var subComplexImpl = createComplexBinaryKernelImpl((aReal, aImag, bReal, bImag) => { - return {real: aReal - bReal, imag: aImag - bImag}; + return { real: aReal - bReal, imag: aImag - bImag }; }); var sub2 = binaryKernelFunc(Sub, subImpl, subComplexImpl); var subConfig = { @@ -37067,7 +37067,7 @@ function topKImpl(x, xShape, xDtype, k, sorted) { const vals = x.subarray(offset, offset + size); const valAndInd = []; for (let i = 0; i < vals.length; i++) { - valAndInd.push({value: vals[i], index: i}); + valAndInd.push({ value: vals[i], index: i }); } valAndInd.sort((a, b2) => b2.value - a.value); const outOffset = b * k; @@ -37149,9 +37149,9 @@ var eluConfig = { kernelFunc: elu4 }; function leakyRelu2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {alpha} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { alpha } = attrs; assertNotComplex([x], "leakyRelu"); const xSize = util_exports.sizeFromShape(x.shape); const xVals = backend2.data.get(x.dataId).values; @@ -37168,8 +37168,8 @@ var leakyReluConfig = { }; var preluImpl = createSimpleBinaryKernelImpl((xValue, aValue) => xValue < 0 ? aValue * xValue : xValue); function prelu3(args) { - const {inputs, backend: backend2} = args; - const {x, alpha} = inputs; + const { inputs, backend: backend2 } = args; + const { x, alpha } = inputs; assertNotComplex([x, alpha], "prelu"); const aVals = backend2.data.get(x.dataId).values; const bVals = backend2.data.get(alpha.dataId).values; @@ -37201,26 +37201,26 @@ var sigmoidConfig = { }; function applyActivation2(backend2, x, activation2, preluActivationWeights, leakyreluAlpha) { if (activation2 === "linear") { - return identity2({inputs: {x}, backend: backend2}); + return identity2({ inputs: { x }, backend: backend2 }); } else if (activation2 === "relu") { - return relu2({inputs: {x}, backend: backend2}); + return relu2({ inputs: { x }, backend: backend2 }); } else if (activation2 === "elu") { - return elu4({inputs: {x}, backend: backend2}); + return elu4({ inputs: { x }, backend: backend2 }); } else if (activation2 === "relu6") { - return relu62({inputs: {x}, backend: backend2}); + return relu62({ inputs: { x }, backend: backend2 }); } else if (activation2 === "prelu") { - return prelu3({inputs: {x, alpha: preluActivationWeights}, backend: backend2}); + return prelu3({ inputs: { x, alpha: preluActivationWeights }, backend: backend2 }); } else if (activation2 === "leakyrelu") { - return leakyRelu2({inputs: {x}, backend: backend2, attrs: {alpha: leakyreluAlpha}}); + return leakyRelu2({ inputs: { x }, backend: backend2, attrs: { alpha: leakyreluAlpha } }); } else if (activation2 === "sigmoid") { - return sigmoid2({inputs: {x}, backend: backend2}); + return sigmoid2({ inputs: { x }, backend: backend2 }); } throw new Error(`Activation ${activation2} has not been implemented for the CPU backend.`); } function reshape3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {shape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { shape } = attrs; const xSize = util_exports.sizeFromShape(x.shape); const $shape = util_exports.inferFromImplicitShape(shape, xSize); const $xSize = util_exports.sizeFromShape($shape); @@ -37233,7 +37233,7 @@ function reshape3(args) { real4.shape = $shape; imag4.shape = $shape; } - return {dataId: x.dataId, shape: $shape, dtype: x.dtype}; + return { dataId: x.dataId, shape: $shape, dtype: x.dtype }; } var reshapeConfig = { kernelName: Reshape, @@ -37241,9 +37241,9 @@ var reshapeConfig = { kernelFunc: reshape3 }; function batchMatMul(args) { - const {inputs, backend: backend2, attrs} = args; - const {a, b} = inputs; - const {transposeA, transposeB} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { a, b } = inputs; + const { transposeA, transposeB } = attrs; assertNotComplex([a, b], "matMul"); const aRank = a.shape.length; const bRank = b.shape.length; @@ -37262,8 +37262,8 @@ function batchMatMul(args) { util_exports.assert(innerShapeA === innerShapeB, () => `Error in matMul: inner shapes (${innerShapeA}) and (${innerShapeB}) of Tensors with shapes ${a.shape} and ${b.shape} and transposeA=${transposeA} and transposeB=${transposeB} must match.`); const a3dShape = transposeA ? [batchDimA, innerShapeA, outerShapeA] : [batchDimA, outerShapeA, innerShapeA]; const b3dShape = transposeB ? [batchDimB, outerShapeB, innerShapeB] : [batchDimB, innerShapeB, outerShapeB]; - const a3d = reshape3({inputs: {x: a}, backend: backend2, attrs: {shape: a3dShape}}); - const b3d = reshape3({inputs: {x: b}, backend: backend2, attrs: {shape: b3dShape}}); + const a3d = reshape3({ inputs: { x: a }, backend: backend2, attrs: { shape: a3dShape } }); + const b3d = reshape3({ inputs: { x: b }, backend: backend2, attrs: { shape: b3dShape } }); const sharedDim = transposeA ? a3d.shape[1] : a3d.shape[2]; const leftDim = transposeA ? a3d.shape[2] : a3d.shape[1]; const rightDim = transposeB ? b3d.shape[1] : b3d.shape[2]; @@ -37312,17 +37312,17 @@ var batchMatMulConfig = { kernelFunc: batchMatMul }; function _fusedMatMul(args) { - const {inputs, backend: backend2, attrs} = args; - const {a, b, bias, preluActivationWeights} = inputs; - const {transposeA, transposeB, activation: activation2, leakyreluAlpha} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { a, b, bias, preluActivationWeights } = inputs; + const { transposeA, transposeB, activation: activation2, leakyreluAlpha } = attrs; let current; let addRes; let activationRes; const intermediates = []; - const matMulRes = batchMatMul({inputs: {a, b}, attrs: {transposeA, transposeB}, backend: backend2}); + const matMulRes = batchMatMul({ inputs: { a, b }, attrs: { transposeA, transposeB }, backend: backend2 }); current = matMulRes; if (bias) { - addRes = add4({inputs: {a: current, b: bias}, backend: backend2}); + addRes = add4({ inputs: { a: current, b: bias }, backend: backend2 }); intermediates.push(current); current = addRes; } @@ -37354,7 +37354,7 @@ var acoshConfig = { kernelFunc: acosh2 }; function addN2(args) { - const {inputs, backend: backend2} = args; + const { inputs, backend: backend2 } = args; const tensors = inputs; assertNotComplex(inputs, "addN"); const vals = tensors.map((t) => backend2.data.get(t.dataId).values); @@ -37374,16 +37374,16 @@ var addNConfig = { kernelFunc: addN2 }; function all2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; assertNotComplex(x, "all"); const origAxes = util_exports.parseAxisParam(axis, x.shape); let axes = origAxes; const permutedAxes = backend_util_exports.getAxesPermutation(axes, x.shape.length); let $x = x; if (permutedAxes != null) { - $x = transpose2({inputs: {x}, backend: backend2, attrs: {perm: permutedAxes}}); + $x = transpose2({ inputs: { x }, backend: backend2, attrs: { perm: permutedAxes } }); axes = backend_util_exports.getInnerMostAxes(axes.length, x.shape.length); } backend_util_exports.assertAxesAreInnerMostDims("all", axes, $x.shape.length); @@ -37406,7 +37406,7 @@ function all2(args) { const result = backend2.makeTensorInfo(outShape, $x.dtype, vals); if (keepDims) { const expandedShape = backend_util_exports.expandShapeToKeepDim(outShape, origAxes); - const reshapedResult = reshape3({inputs: {x: result}, backend: backend2, attrs: {shape: expandedShape}}); + const reshapedResult = reshape3({ inputs: { x: result }, backend: backend2, attrs: { shape: expandedShape } }); backend2.disposeIntermediateTensorInfo(result); return reshapedResult; } @@ -37418,16 +37418,16 @@ var allConfig = { kernelFunc: all2 }; function any2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; assertNotComplex(x, "any"); const origAxes = util_exports.parseAxisParam(axis, x.shape); let axes = origAxes; const permutedAxes = backend_util_exports.getAxesPermutation(axes, x.shape.length); let $x = x; if (permutedAxes != null) { - $x = transpose2({inputs: {x}, backend: backend2, attrs: {perm: permutedAxes}}); + $x = transpose2({ inputs: { x }, backend: backend2, attrs: { perm: permutedAxes } }); axes = backend_util_exports.getInnerMostAxes(axes.length, x.shape.length); } backend_util_exports.assertAxesAreInnerMostDims("any", axes, $x.shape.length); @@ -37450,7 +37450,7 @@ function any2(args) { const result = backend2.makeTensorInfo(outShape, $x.dtype, vals); if (keepDims) { const expandedShape = backend_util_exports.expandShapeToKeepDim(outShape, origAxes); - const reshapedResult = reshape3({inputs: {x: result}, backend: backend2, attrs: {shape: expandedShape}}); + const reshapedResult = reshape3({ inputs: { x: result }, backend: backend2, attrs: { shape: expandedShape } }); backend2.disposeIntermediateTensorInfo(result); return reshapedResult; } @@ -37462,16 +37462,16 @@ var anyConfig = { kernelFunc: any2 }; function argMax2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis } = attrs; assertNotComplex(x, "argMax"); let axes = util_exports.parseAxisParam(axis, x.shape); const permutedAxes = backend_util_exports.getAxesPermutation(axes, x.shape.length); let $x = x; const intermediateTensorInfos = []; if (permutedAxes != null) { - $x = transpose2({inputs: {x}, backend: backend2, attrs: {perm: permutedAxes}}); + $x = transpose2({ inputs: { x }, backend: backend2, attrs: { perm: permutedAxes } }); intermediateTensorInfos.push($x); axes = backend_util_exports.getInnerMostAxes(axes.length, $x.shape.length); } @@ -37504,16 +37504,16 @@ var argMaxConfig = { kernelFunc: argMax2 }; function argMin2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis } = attrs; assertNotComplex(x, "argMin"); let axes = util_exports.parseAxisParam(axis, x.shape); const permutedAxes = backend_util_exports.getAxesPermutation(axes, x.shape.length); let $x = x; const intermediateTensorInfos = []; if (permutedAxes != null) { - $x = transpose2({inputs: {x}, backend: backend2, attrs: {perm: permutedAxes}}); + $x = transpose2({ inputs: { x }, backend: backend2, attrs: { perm: permutedAxes } }); intermediateTensorInfos.push($x); axes = backend_util_exports.getInnerMostAxes(axes.length, $x.shape.length); } @@ -37829,16 +37829,16 @@ function maxPool3dPositions(xBuf, convInfo) { return maxPositions; } function avgPool2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; assertNotComplex(x, "avgPool"); - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; const dilations = 1; util_exports.assert(backend_util_exports.eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in avgPool: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); const convInfo = backend_util_exports.computePool2DInfo(x.shape, filterSize, strides, dilations, pad3, dimRoundingMode); let res; if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 && util_exports.arraysEqual(convInfo.inShape, convInfo.outShape)) { - res = identity2({inputs: {x}, backend: backend2}); + res = identity2({ inputs: { x }, backend: backend2 }); } else { const xValues = backend2.data.get(x.dataId).values; const strides2 = util_exports.computeStrides(x.shape); @@ -37853,9 +37853,9 @@ var avgPoolConfig = { kernelFunc: avgPool2 }; function avgPool3D(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {filterSize, strides, pad: pad3, dimRoundingMode, dataFormat} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { filterSize, strides, pad: pad3, dimRoundingMode, dataFormat } = attrs; assertNotComplex(x, "avgPool3d"); const convInfo = backend_util_exports.computePool3DInfo(x.shape, filterSize, strides, 1, pad3, dimRoundingMode, dataFormat); const xValues = backend2.data.get(x.dataId).values; @@ -37868,9 +37868,9 @@ var avgPool3DConfig = { kernelFunc: avgPool3D }; function avgPool3DGrad(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, input: input2} = inputs; - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { dy, input: input2 } = inputs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; assertNotComplex([dy, input2], "avgPool3DGrad"); const convInfo = backend_util_exports.computePool3DInfo(input2.shape, filterSize, strides, 1, pad3, dimRoundingMode); const strideDepth = convInfo.strideDepth; @@ -37934,11 +37934,11 @@ var avgPool3DGradConfig2 = { kernelFunc: avgPool3DGrad }; function avgPoolGrad2(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, input: input2} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { dy, input: input2 } = inputs; const x = input2; assertNotComplex([dy, input2], "avgPoolGrad"); - const {filterSize, strides, pad: pad3} = attrs; + const { filterSize, strides, pad: pad3 } = attrs; const convInfo = backend_util_exports.computePool2DInfo(x.shape, filterSize, strides, 1, pad3); const strideHeight = convInfo.strideHeight; const strideWidth = convInfo.strideWidth; @@ -37988,13 +37988,13 @@ var avgPoolGradConfig2 = { kernelFunc: avgPoolGrad2 }; function batchNorm2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, scale: scale22, offset, mean: mean4, variance} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { x, scale: scale22, offset, mean: mean4, variance } = inputs; util_exports.assert(mean4.shape.length === variance.shape.length, () => "Batch normalization gradient requires mean and variance to have equal ranks."); util_exports.assert(offset == null || mean4.shape.length === offset.shape.length, () => "Batch normalization gradient requires mean and offset to have equal ranks."); util_exports.assert(scale22 == null || mean4.shape.length === scale22.shape.length, () => "Batch normalization gradient requires mean and scale to have equal ranks."); assertNotComplex([x, mean4, variance, scale22, offset], "batchNorm"); - let {varianceEpsilon} = attrs; + let { varianceEpsilon } = attrs; if (varianceEpsilon == null) { varianceEpsilon = 1e-3; } @@ -38035,9 +38035,9 @@ var batchNormConfig = { kernelFunc: batchNorm2 }; function batchToSpaceND2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {blockShape, crops} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { blockShape, crops } = attrs; assertNotComplex([x], "batchToSpaceND"); const prod5 = blockShape.reduce((a, b) => a * b); const reshaped = backend_util_exports.getReshaped(x.shape, blockShape, prod5); @@ -38045,13 +38045,13 @@ function batchToSpaceND2(args) { const reshapedPermuted = backend_util_exports.getReshapedPermuted(x.shape, blockShape, prod5); const sliceBeginCoords = backend_util_exports.getSliceBeginCoords(crops, blockShape.length); const sliceSize = backend_util_exports.getSliceSize(reshapedPermuted, crops, blockShape.length); - const xReshaped = reshape3({inputs: {x}, backend: backend2, attrs: {shape: reshaped}}); - const xTransposed = transpose2({inputs: {x: xReshaped}, backend: backend2, attrs: {perm: permuted}}); - const xTransposedReshaped = reshape3({inputs: {x: xTransposed}, backend: backend2, attrs: {shape: reshapedPermuted}}); + const xReshaped = reshape3({ inputs: { x }, backend: backend2, attrs: { shape: reshaped } }); + const xTransposed = transpose2({ inputs: { x: xReshaped }, backend: backend2, attrs: { perm: permuted } }); + const xTransposedReshaped = reshape3({ inputs: { x: xTransposed }, backend: backend2, attrs: { shape: reshapedPermuted } }); const result = slice2({ - inputs: {x: xTransposedReshaped}, + inputs: { x: xTransposedReshaped }, backend: backend2, - attrs: {begin: sliceBeginCoords, size: sliceSize} + attrs: { begin: sliceBeginCoords, size: sliceSize } }); backend2.disposeIntermediateTensorInfo(xReshaped); backend2.disposeIntermediateTensorInfo(xTransposed); @@ -38064,9 +38064,9 @@ var batchToSpaceNDConfig = { kernelFunc: batchToSpaceND2 }; function bincount2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, weights} = inputs; - const {size} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, weights } = inputs; + const { size } = attrs; const xVals = backend2.data.get(x.dataId).values; const weightsVals = backend2.data.get(weights.dataId).values; const outVals = bincountImpl(xVals, weightsVals, weights.dtype, weights.shape, size); @@ -38090,7 +38090,7 @@ var clipConfig = { kernelFunc: clip }; var complexAbs = (args) => { - const {x} = args.inputs; + const { x } = args.inputs; const cpuBackend = args.backend; const resultValues = new Float32Array(util_exports.sizeFromShape(x.shape)); const complexVals = cpuBackend.data.get(x.dataId); @@ -38111,8 +38111,8 @@ var complexAbsConfig = { kernelFunc: complexAbs }; function imag2(args) { - const {inputs, backend: backend2} = args; - const {input: input2} = inputs; + const { inputs, backend: backend2 } = args; + const { input: input2 } = inputs; const imag4 = backend2.data.get(input2.dataId).complexTensorInfos.imag; const imagVal = backend2.data.get(imag4.dataId).values; return backend2.makeTensorInfo(imag4.shape, imag4.dtype, imagVal); @@ -38123,8 +38123,8 @@ var imagConfig = { kernelFunc: imag2 }; function concat2(args) { - const {inputs, backend: backend2, attrs} = args; - const {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { axis } = attrs; const $axis = util_exports.parseAxisParam(axis, inputs[0].shape)[0]; let outShape = backend_util_exports.computeOutShape(inputs.map((t) => t.shape), $axis); if (util_exports.sizeFromShape(outShape) === 0) { @@ -38132,16 +38132,16 @@ function concat2(args) { } const $inputs = inputs.filter((t) => util_exports.sizeFromShape(t.shape) > 0); if ($inputs.length === 1) { - return identity2({inputs: {x: $inputs[0]}, backend: backend2}); + return identity2({ inputs: { x: $inputs[0] }, backend: backend2 }); } const shapes = $inputs.map((t) => t.shape); backend_util_exports.assertParamsConsistent(shapes, $axis); if ($inputs[0].dtype === "complex64") { - const reals = $inputs.map((t) => real2({inputs: {input: t}, backend: backend2})); - const imags = $inputs.map((t) => imag2({inputs: {input: t}, backend: backend2})); - const realConcated = concat2({inputs: reals, backend: backend2, attrs: {axis: $axis}}); - const imagConcated = concat2({inputs: imags, backend: backend2, attrs: {axis: $axis}}); - const result = complex2({inputs: {real: realConcated, imag: imagConcated}, backend: backend2}); + const reals = $inputs.map((t) => real2({ inputs: { input: t }, backend: backend2 })); + const imags = $inputs.map((t) => imag2({ inputs: { input: t }, backend: backend2 })); + const realConcated = concat2({ inputs: reals, backend: backend2, attrs: { axis: $axis } }); + const imagConcated = concat2({ inputs: imags, backend: backend2, attrs: { axis: $axis } }); + const result = complex2({ inputs: { real: realConcated, imag: imagConcated }, backend: backend2 }); reals.forEach((r) => backend2.disposeIntermediateTensorInfo(r)); imags.forEach((i) => backend2.disposeIntermediateTensorInfo(i)); backend2.disposeIntermediateTensorInfo(realConcated); @@ -38151,10 +38151,10 @@ function concat2(args) { const inputs2D = $inputs.map((t) => { const innerSize = util_exports.sizeFromShape(t.shape.slice($axis)); const shape = [-1, innerSize]; - return reshape3({inputs: {x: t}, backend: backend2, attrs: {shape}}); + return reshape3({ inputs: { x: t }, backend: backend2, attrs: { shape } }); }); const inputsValShapes = inputs2D.map((t) => { - return {vals: backend2.data.get(t.dataId).values, shape: t.shape}; + return { vals: backend2.data.get(t.dataId).values, shape: t.shape }; }); outShape = backend_util_exports.computeOutShape(inputs2D.map((t) => t.shape), 1); const simplyConcat = inputs2D[0].shape[0] === 1; @@ -38170,9 +38170,9 @@ var concatConfig = { kernelFunc: concat2 }; function conv2D(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, filter} = inputs; - const {strides, pad: pad3, dataFormat, dilations, dimRoundingMode} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, filter } = inputs; + const { strides, pad: pad3, dataFormat, dilations, dimRoundingMode } = attrs; assertNotComplex([x, filter], "conv2d"); const $dataFormat = backend_util_exports.convertConv2DDataFormat(dataFormat); const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad3, dimRoundingMode, false, $dataFormat); @@ -38241,13 +38241,13 @@ var conv2DConfig = { kernelFunc: conv2D }; function conv2DBackpropFilter2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, dy} = inputs; - const {strides, pad: pad3, dataFormat, dimRoundingMode, filterShape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, dy } = inputs; + const { strides, pad: pad3, dataFormat, dimRoundingMode, filterShape } = attrs; assertNotComplex([x, dy], "conv2dBackpropFilter"); const $dataFormat = backend_util_exports.convertConv2DDataFormat(dataFormat); const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filterShape, strides, 1, pad3, dimRoundingMode, false, $dataFormat); - const {strideHeight, strideWidth, filterHeight, filterWidth} = convInfo; + const { strideHeight, strideWidth, filterHeight, filterWidth } = convInfo; const isChannelsLast = convInfo.dataFormat === "channelsLast"; const dW = new TensorBuffer(convInfo.filterShape, "float32"); const leftPad = convInfo.padInfo.left; @@ -38291,9 +38291,9 @@ var conv2DBackpropFilterConfig = { kernelFunc: conv2DBackpropFilter2 }; function conv2DBackpropInput2(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, filter} = inputs; - const {inputShape, strides, pad: pad3, dataFormat, dimRoundingMode} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { dy, filter } = inputs; + const { inputShape, strides, pad: pad3, dataFormat, dimRoundingMode } = attrs; assertNotComplex([dy, filter], "conv2dBackpropInput"); const filterStrides = util_exports.computeStrides(filter.shape); const dyStrides = util_exports.computeStrides(dy.shape); @@ -38304,7 +38304,7 @@ function conv2DBackpropInput2(args) { const dyValues = backend2.data.get(dy.dataId).values; const fltValues = backend2.data.get(filter.dataId).values; const [fltS0, fltS1, fltS2] = filterStrides; - const {batchSize, filterHeight, filterWidth, inChannels, inHeight, inWidth, outChannels, outHeight, outWidth, strideHeight, strideWidth} = convInfo; + const { batchSize, filterHeight, filterWidth, inChannels, inHeight, inWidth, outChannels, outHeight, outWidth, strideHeight, strideWidth } = convInfo; $dataFormat = convInfo.dataFormat; const topPad = filterHeight - 1 - convInfo.padInfo.top; const leftPad = filterWidth - 1 - convInfo.padInfo.left; @@ -38355,12 +38355,12 @@ var conv2DBackpropInputConfig = { kernelFunc: conv2DBackpropInput2 }; function conv3D(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, filter} = inputs; - const {strides, pad: pad3, dilations} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, filter } = inputs; + const { strides, pad: pad3, dilations } = attrs; assertNotComplex([x, filter], "conv3d"); const convInfo = backend_util_exports.computeConv3DInfo(x.shape, filter.shape, strides, dilations, pad3); - const {filterDepth, filterHeight, filterWidth, dilationDepth, dilationHeight, dilationWidth, padInfo} = convInfo; + const { filterDepth, filterHeight, filterWidth, dilationDepth, dilationHeight, dilationWidth, padInfo } = convInfo; const padFront = padInfo.front; const padLeft = padInfo.left; const padTop = padInfo.top; @@ -38426,9 +38426,9 @@ var conv3DConfig = { kernelFunc: conv3D }; function conv3DBackpropFilterV2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, dy} = inputs; - const {strides, pad: pad3, filterShape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, dy } = inputs; + const { strides, pad: pad3, filterShape } = attrs; assertNotComplex([x, dy], "conv3dBackpropFilterV2"); const xStrides = util_exports.computeStrides(x.shape); const dyStrides = util_exports.computeStrides(dy.shape); @@ -38499,9 +38499,9 @@ var conv3DBackpropFilterV2Config = { kernelFunc: conv3DBackpropFilterV2 }; function conv3DBackpropInputV2(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, filter} = inputs; - const {pad: pad3, strides, inputShape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { dy, filter } = inputs; + const { pad: pad3, strides, inputShape } = attrs; assertNotComplex([dy], "conv3dBackpropInputV2"); const dyStrides = util_exports.computeStrides(dy.shape); const filterStrides = util_exports.computeStrides(filter.shape); @@ -38513,7 +38513,7 @@ function conv3DBackpropInputV2(args) { const [dyS0, dyS1, dyS2, dyS3] = dyStrides; const fltValues = backend2.data.get(filter.dataId).values; const [fltS0, fltS1, fltS2, fltS3] = filterStrides; - const {batchSize, filterDepth, filterHeight, filterWidth, inChannels, inDepth, inHeight, inWidth, outChannels, outDepth, outHeight, outWidth, strideDepth, strideHeight, strideWidth} = convInfo; + const { batchSize, filterDepth, filterHeight, filterWidth, inChannels, inDepth, inHeight, inWidth, outChannels, outDepth, outHeight, outWidth, strideDepth, strideHeight, strideWidth } = convInfo; const frontPad = filterDepth - 1 - convInfo.padInfo.front; const topPad = filterHeight - 1 - convInfo.padInfo.top; const leftPad = filterWidth - 1 - convInfo.padInfo.left; @@ -38574,9 +38574,9 @@ var coshConfig = { kernelFunc: cosh2 }; function cropAndResize2(args) { - const {inputs, backend: backend2, attrs} = args; - const {image: image3, boxes, boxInd} = inputs; - const {cropSize, method, extrapolationValue} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { image: image3, boxes, boxInd } = inputs; + const { cropSize, method, extrapolationValue } = attrs; const [batch, imageHeight, imageWidth, numChannels] = image3.shape; const numBoxes = boxes.shape[0]; const [cropHeight, cropWidth] = cropSize; @@ -38669,14 +38669,14 @@ var cropAndResizeConfig = { kernelFunc: cropAndResize2 }; function cumsum2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, exclusive, reverse: reverse5} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, exclusive, reverse: reverse5 } = attrs; assertNotComplex(x, "cumsum"); const permutation = backend_util_exports.getAxesPermutation([axis], x.shape.length); let $x = x; if (permutation != null) { - $x = transpose2({inputs: {x}, backend: backend2, attrs: {perm: permutation}}); + $x = transpose2({ inputs: { x }, backend: backend2, attrs: { perm: permutation } }); } const permutedAxis = backend_util_exports.getInnerMostAxes(1, x.shape.length)[0]; if (permutedAxis !== $x.shape.length - 1) { @@ -38701,7 +38701,7 @@ function cumsum2(args) { const result = backend2.makeTensorInfo($x.shape, resultDtype, vals); if (permutation != null) { const reversePermutation = backend_util_exports.getUndoAxesPermutation(permutation); - const reverseTransposedResult = transpose2({inputs: {x: result}, backend: backend2, attrs: {perm: reversePermutation}}); + const reverseTransposedResult = transpose2({ inputs: { x: result }, backend: backend2, attrs: { perm: reversePermutation } }); backend2.disposeIntermediateTensorInfo(result); backend2.disposeIntermediateTensorInfo($x); return reverseTransposedResult; @@ -38714,9 +38714,9 @@ var cumsumConfig = { kernelFunc: cumsum2 }; function denseBincount2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, weights} = inputs; - const {size, binaryOutput} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, weights } = inputs; + const { size, binaryOutput } = attrs; if (x.shape.length === 1) { const xVals = backend2.data.get(x.dataId).values; const weightsVals = backend2.data.get(weights.dataId).values; @@ -38736,9 +38736,9 @@ var denseBincountConfig = { kernelFunc: denseBincount2 }; function depthToSpace2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {blockSize, dataFormat} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { blockSize, dataFormat } = attrs; util_exports.assert(dataFormat === "NHWC", () => `Only NHWC dataFormat supported on CPU for depthToSpace. Got ${dataFormat}`); util_exports.assert(blockSize > 1, () => `blockSize should be > 1 for depthToSpace, but was: ${blockSize}`); const batchSize = x.shape[0]; @@ -38775,9 +38775,9 @@ var depthToSpaceConfig = { kernelFunc: depthToSpace2 }; function depthwiseConv2dNative(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, filter} = inputs; - const {strides, pad: pad3, dilations, dimRoundingMode} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, filter } = inputs; + const { strides, pad: pad3, dilations, dimRoundingMode } = attrs; assertNotComplex([x, filter], "depthwiseConv2DNative"); const xStrides = util_exports.computeStrides(x.shape); const filterStrides = util_exports.computeStrides(filter.shape); @@ -38787,7 +38787,7 @@ function depthwiseConv2dNative(args) { } util_exports.assert(backend_util_exports.eitherStridesOrDilationsAreOne(strides, $dilations), () => `Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${strides} and dilations '${$dilations}'`); const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filter.shape, strides, $dilations, pad3, dimRoundingMode, true); - const {filterHeight, filterWidth, dilationHeight, dilationWidth, padInfo} = convInfo; + const { filterHeight, filterWidth, dilationHeight, dilationWidth, padInfo } = convInfo; const padLeft = padInfo.left; const padTop = padInfo.top; const chMul = convInfo.outChannels / convInfo.inChannels; @@ -38841,12 +38841,12 @@ var depthwiseConv2dNativeConfig = { kernelFunc: depthwiseConv2dNative }; function depthwiseConv2dNativeBackpropFilter2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, dy} = inputs; - const {strides, dilations, pad: pad3, dimRoundingMode, filterShape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, dy } = inputs; + const { strides, dilations, pad: pad3, dimRoundingMode, filterShape } = attrs; assertNotComplex([x, dy], "depthwiseConv2dNativeBackpropFilter"); const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filterShape, strides, dilations, pad3, dimRoundingMode, true); - const {strideHeight, strideWidth, filterHeight, filterWidth} = convInfo; + const { strideHeight, strideWidth, filterHeight, filterWidth } = convInfo; const dW = new TensorBuffer(convInfo.filterShape, "float32"); const leftPad = convInfo.padInfo.left; const topPad = convInfo.padInfo.top; @@ -38886,9 +38886,9 @@ var depthwiseConv2dNativeBackpropFilterConfig = { kernelFunc: depthwiseConv2dNativeBackpropFilter2 }; function depthwiseConv2dNativeBackpropInput2(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, filter} = inputs; - const {strides, dilations, pad: pad3, dimRoundingMode, inputShape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { dy, filter } = inputs; + const { strides, dilations, pad: pad3, dimRoundingMode, inputShape } = attrs; assertNotComplex([dy, filter], "depthwiseConv2DNativeBackpropInput"); const dyStrides = util_exports.computeStrides(dy.shape); const filterStrides = util_exports.computeStrides(filter.shape); @@ -38900,7 +38900,7 @@ function depthwiseConv2dNativeBackpropInput2(args) { const [dyS0, dyS1, dyS2] = dyStrides; const fltValues = backend2.data.get(filter.dataId).values; const [fltS0, fltS1, fltS2] = filterStrides; - const {batchSize, filterHeight, filterWidth, inChannels, inHeight, inWidth, outChannels, outHeight, outWidth, strideHeight, strideWidth} = convInfo; + const { batchSize, filterHeight, filterWidth, inChannels, inHeight, inWidth, outChannels, outHeight, outWidth, strideHeight, strideWidth } = convInfo; const topPad = filterHeight - 1 - convInfo.padInfo.top; const leftPad = filterWidth - 1 - convInfo.padInfo.left; const chMul = outChannels / inChannels; @@ -38942,8 +38942,8 @@ var depthwiseConv2dNativeBackpropInputConfig = { kernelFunc: depthwiseConv2dNativeBackpropInput2 }; function diag2(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; const xSize = util_exports.sizeFromShape(x.shape); const xVals = backend2.data.get(x.dataId).values; const outBuf = buffer([xSize, xSize], x.dtype); @@ -38962,15 +38962,15 @@ var diagConfig = { var dilation2dConfig = { kernelName: Dilation2D, backendName: "cpu", - kernelFunc: ({inputs, backend: backend2, attrs}) => { - const {x, filter} = inputs; - const {strides, pad: pad3, dilations} = attrs; + kernelFunc: ({ inputs, backend: backend2, attrs }) => { + const { x, filter } = inputs; + const { strides, pad: pad3, dilations } = attrs; const cpuBackend = backend2; const xVals = cpuBackend.data.get(x.dataId).values; const xRank = x.shape.length; const filterVals = cpuBackend.data.get(filter.dataId).values; const filterRank = filter.shape.length; - const {batchSize, inHeight, inWidth, inChannels, outHeight, outWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth, outShape} = backend_util_exports.computeDilation2DInfo(x.shape, filter.shape, strides, pad3, "NHWC", dilations); + const { batchSize, inHeight, inWidth, inChannels, outHeight, outWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth, outShape } = backend_util_exports.computeDilation2DInfo(x.shape, filter.shape, strides, pad3, "NHWC", dilations); const outSize = util_exports.sizeFromShape(outShape); const outRank = outShape.length; const outputVals = util_exports.getArrayFromDType(x.dtype, outSize); @@ -39004,19 +39004,19 @@ var dilation2dConfig = { } } const dataId = cpuBackend.write(util_exports.toTypedArray(outputVals, x.dtype), outShape, x.dtype); - return {dataId, shape: outShape, dtype: x.dtype}; + return { dataId, shape: outShape, dtype: x.dtype }; } }; var dilation2dBackpropFilterConfig = { kernelName: Dilation2DBackpropFilter, backendName: "cpu", - kernelFunc: ({inputs, backend: backend2, attrs}) => { - const {x, filter, dy} = inputs; - const {strides, pad: pad3, dilations} = attrs; + kernelFunc: ({ inputs, backend: backend2, attrs }) => { + const { x, filter, dy } = inputs; + const { strides, pad: pad3, dilations } = attrs; const cpuBackend = backend2; const $x = util_exports.toNestedArray(x.shape, cpuBackend.data.get(x.dataId).values); const $filter = util_exports.toNestedArray(filter.shape, cpuBackend.data.get(filter.dataId).values); - const {batchSize, inHeight, inWidth, inChannels, outHeight, outWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth, outShape} = backend_util_exports.computeDilation2DInfo(x.shape, filter.shape, strides, pad3, "NHWC", dilations); + const { batchSize, inHeight, inWidth, inChannels, outHeight, outWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth, outShape } = backend_util_exports.computeDilation2DInfo(x.shape, filter.shape, strides, pad3, "NHWC", dilations); util_exports.assert(dy.rank === outShape.length, () => `Error in ${Dilation2DBackpropFilter}, dy must have the same rank as output ${outShape.length}, but got ${dy.rank}`); const $dy = util_exports.toNestedArray(outShape, cpuBackend.data.get(dy.dataId).values); const gradients = util_exports.makeZerosNestedTypedArray(filter.shape, filter.dtype); @@ -39051,19 +39051,19 @@ var dilation2dBackpropFilterConfig = { } } const dataId = cpuBackend.write(util_exports.toTypedArray(gradients, x.dtype), filter.shape, filter.dtype); - return {dataId, shape: filter.shape, dtype: filter.dtype}; + return { dataId, shape: filter.shape, dtype: filter.dtype }; } }; var dilation2dBackpropInputConfig = { kernelName: Dilation2DBackpropInput, backendName: "cpu", - kernelFunc: ({inputs, backend: backend2, attrs}) => { - const {x, filter, dy} = inputs; - const {strides, pad: pad3, dilations} = attrs; + kernelFunc: ({ inputs, backend: backend2, attrs }) => { + const { x, filter, dy } = inputs; + const { strides, pad: pad3, dilations } = attrs; const cpuBackend = backend2; const $x = util_exports.toNestedArray(x.shape, cpuBackend.data.get(x.dataId).values); const $filter = util_exports.toNestedArray(filter.shape, cpuBackend.data.get(filter.dataId).values); - const {batchSize, inHeight, inWidth, inChannels, outHeight, outWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth, outShape} = backend_util_exports.computeDilation2DInfo(x.shape, filter.shape, strides, pad3, "NHWC", dilations); + const { batchSize, inHeight, inWidth, inChannels, outHeight, outWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth, outShape } = backend_util_exports.computeDilation2DInfo(x.shape, filter.shape, strides, pad3, "NHWC", dilations); util_exports.assert(dy.rank === outShape.length, () => `Error in ${Dilation2DBackpropInput}, dy must have the same rank as output ${outShape.length}, but got ${dy.rank}`); const $dy = util_exports.toNestedArray(outShape, cpuBackend.data.get(dy.dataId).values); const gradients = util_exports.makeZerosNestedTypedArray(x.shape, x.dtype); @@ -39098,19 +39098,19 @@ var dilation2dBackpropInputConfig = { } } const dataId = cpuBackend.write(util_exports.toTypedArray(gradients, x.dtype), x.shape, x.dtype); - return {dataId, shape: x.shape, dtype: x.dtype}; + return { dataId, shape: x.shape, dtype: x.dtype }; } }; function sum3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; assertNotComplex(x, "sum"); let $x; if (x.dtype === "bool") { - $x = cast3({inputs: {x}, backend: backend2, attrs: {dtype: "int32"}}); + $x = cast3({ inputs: { x }, backend: backend2, attrs: { dtype: "int32" } }); } else { - $x = identity2({inputs: {x}, backend: backend2}); + $x = identity2({ inputs: { x }, backend: backend2 }); } const xRank = $x.shape.length; const axes = util_exports.parseAxisParam(axis, $x.shape); @@ -39118,7 +39118,7 @@ function sum3(args) { let reductionAxes = axes; let permutedX = $x; if (permutation != null) { - permutedX = transpose2({inputs: {x: $x}, backend: backend2, attrs: {perm: permutation}}); + permutedX = transpose2({ inputs: { x: $x }, backend: backend2, attrs: { perm: permutation } }); reductionAxes = backend_util_exports.getInnerMostAxes(reductionAxes.length, xRank); } backend_util_exports.assertAxesAreInnerMostDims("sum", reductionAxes, permutedX.shape.length); @@ -39139,7 +39139,7 @@ function sum3(args) { if (keepDims) { const newShape = backend_util_exports.expandShapeToKeepDim(result.shape, axes); const oldResult = result; - result = reshape3({inputs: {x: result}, backend: backend2, attrs: {shape: newShape}}); + result = reshape3({ inputs: { x: result }, backend: backend2, attrs: { shape: newShape } }); backend2.disposeIntermediateTensorInfo(oldResult); } backend2.disposeIntermediateTensorInfo($x); @@ -39154,24 +39154,24 @@ var sumConfig = { kernelFunc: sum3 }; function einsum2(args) { - const {inputs, backend: backend2, attrs} = args; - const {equation} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { equation } = attrs; const tensors = inputs; - const {allDims, summedDims, idDims} = backend_util_exports.decodeEinsumEquation(equation, tensors.length); + const { allDims, summedDims, idDims } = backend_util_exports.decodeEinsumEquation(equation, tensors.length); backend_util_exports.checkEinsumDimSizes(allDims.length, idDims, tensors); - const {path, steps} = backend_util_exports.getEinsumComputePath(summedDims, idDims); + const { path, steps } = backend_util_exports.getEinsumComputePath(summedDims, idDims); const nSteps = steps.length; let out = null; let numDimsRemaining = allDims.length; const tensorsToDispose = []; for (let i = 0; i < nSteps; ++i) { for (const idTerm of steps[i]) { - const {permutationIndices: perm, expandDims: dimsToExpand} = backend_util_exports.getEinsumPermutation(numDimsRemaining, idDims[idTerm]); + const { permutationIndices: perm, expandDims: dimsToExpand } = backend_util_exports.getEinsumPermutation(numDimsRemaining, idDims[idTerm]); let x; if (backend_util_exports.isIdentityPermutation(perm)) { x = tensors[idTerm]; } else { - x = transpose2({inputs: {x: tensors[idTerm]}, backend: backend2, attrs: {perm}}); + x = transpose2({ inputs: { x: tensors[idTerm] }, backend: backend2, attrs: { perm } }); tensorsToDispose.push(x); } const targetShape = x.shape.slice(); @@ -39179,20 +39179,20 @@ function einsum2(args) { targetShape.splice(dimsToExpand[k], 0, 1); } if (!util_exports.arraysEqual(x.shape, targetShape)) { - x = reshape3({inputs: {x}, backend: backend2, attrs: {shape: targetShape}}); + x = reshape3({ inputs: { x }, backend: backend2, attrs: { shape: targetShape } }); tensorsToDispose.push(x); } if (out === null) { out = x; } else { - out = multiply2({inputs: {a: x, b: out}, backend: backend2}); + out = multiply2({ inputs: { a: x, b: out }, backend: backend2 }); tensorsToDispose.push(out); } } if (i < nSteps - 1) { if (path[i] >= 0) { out = sum3({ - inputs: {x: out}, + inputs: { x: out }, backend: backend2, attrs: { axis: path[i] - (allDims.length - numDimsRemaining), @@ -39218,8 +39218,8 @@ var einsumConfig = { kernelFunc: einsum2 }; function eluGrad(args) { - const {inputs, backend: backend2} = args; - const {dy, y} = inputs; + const { inputs, backend: backend2 } = args; + const { dy, y } = inputs; assertNotComplex([dy, y], "eluGrad"); const resultValues = new Float32Array(util_exports.sizeFromShape(y.shape)); const values = backend2.data.get(y.dataId).values; @@ -39264,9 +39264,9 @@ var erfConfig = { kernelFunc: erf2 }; function expandDims3(args) { - const {inputs, backend: backend2, attrs} = args; - const {input: input2} = inputs; - const {dim} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { input: input2 } = inputs; + const { dim } = attrs; const inputRank = input2.shape.length; const newShape = input2.shape.slice(); let $dim = dim; @@ -39275,7 +39275,7 @@ function expandDims3(args) { $dim = inputRank + dim + 1; } newShape.splice($dim, 0, 1); - return reshape3({inputs: {x: input2}, backend: backend2, attrs: {shape: newShape}}); + return reshape3({ inputs: { x: input2 }, backend: backend2, attrs: { shape: newShape } }); } var expandDimsConfig = { kernelName: ExpandDims, @@ -39302,17 +39302,17 @@ function fftBatch(input2, inverse, cpuBackend) { const resultImag = util_exports.getTypedArrayFromDType("float32", resultSize); for (let b = 0; b < batch; b++) { const r = slice2({ - inputs: {x: real2D}, + inputs: { x: real2D }, backend: cpuBackend, - attrs: {begin: [b, 0], size: [1, innerDim]} + attrs: { begin: [b, 0], size: [1, innerDim] } }); const i = slice2({ - inputs: {x: imag2D}, + inputs: { x: imag2D }, backend: cpuBackend, - attrs: {begin: [b, 0], size: [1, innerDim]} + attrs: { begin: [b, 0], size: [1, innerDim] } }); - const input3 = complex2({inputs: {real: r, imag: i}, backend: cpuBackend}); - const {real: real4, imag: imag4} = fftImpl(input3, inverse, cpuBackend); + const input3 = complex2({ inputs: { real: r, imag: i }, backend: cpuBackend }); + const { real: real4, imag: imag4 } = fftImpl(input3, inverse, cpuBackend); const res = backend_util_exports.mergeRealAndImagArrays(real4, imag4); for (let d = 0; d < innerDim; d++) { const c = backend_util_exports.getComplexWithIndex(res, d); @@ -39325,7 +39325,7 @@ function fftBatch(input2, inverse, cpuBackend) { } const $realInfo = cpuBackend.makeTensorInfo(resultShape, "float32", resultReal); const $imagInfo = cpuBackend.makeTensorInfo(resultShape, "float32", resultImag); - const result = complex2({inputs: {real: $realInfo, imag: $imagInfo}, backend: cpuBackend}); + const result = complex2({ inputs: { real: $realInfo, imag: $imagInfo }, backend: cpuBackend }); cpuBackend.disposeIntermediateTensorInfo($realInfo); cpuBackend.disposeIntermediateTensorInfo($imagInfo); return result; @@ -39342,9 +39342,9 @@ function fftImpl(input2, inverse, cpuBackend) { const realInfo = cpuBackend.makeTensorInfo(resultShape, "float32", result.real); const imagInfo = cpuBackend.makeTensorInfo(resultShape, "float32", result.imag); const sizeInfo = cpuBackend.makeTensorInfo([], "float32", util_exports.createScalarValue(inputSize, "float32")); - const sizeInfoCopy = identity2({inputs: {x: sizeInfo}, backend: cpuBackend}); - const divRealInfo = realDivConfig.kernelFunc({inputs: {a: realInfo, b: sizeInfo}, backend: cpuBackend}); - const divImagInfo = realDivConfig.kernelFunc({inputs: {a: imagInfo, b: sizeInfoCopy}, backend: cpuBackend}); + const sizeInfoCopy = identity2({ inputs: { x: sizeInfo }, backend: cpuBackend }); + const divRealInfo = realDivConfig.kernelFunc({ inputs: { a: realInfo, b: sizeInfo }, backend: cpuBackend }); + const divImagInfo = realDivConfig.kernelFunc({ inputs: { a: imagInfo, b: sizeInfoCopy }, backend: cpuBackend }); const divRealVals = cpuBackend.data.get(divRealInfo.dataId).values; const divImagVals = cpuBackend.data.get(divImagInfo.dataId).values; cpuBackend.disposeIntermediateTensorInfo(realInfo); @@ -39353,7 +39353,7 @@ function fftImpl(input2, inverse, cpuBackend) { cpuBackend.disposeIntermediateTensorInfo(sizeInfoCopy); cpuBackend.disposeIntermediateTensorInfo(divRealInfo); cpuBackend.disposeIntermediateTensorInfo(divImagInfo); - return {real: divRealVals, imag: divImagVals}; + return { real: divRealVals, imag: divImagVals }; } return result; } else { @@ -39367,7 +39367,7 @@ function isExponentOf2(size) { } function fftRadix2(realVals, imagVals, size, inverse, cpuBackend) { if (size === 1) { - return {real: realVals, imag: imagVals}; + return { real: realVals, imag: imagVals }; } const data = backend_util_exports.mergeRealAndImagArrays(realVals, imagVals); const half = size / 2; @@ -39377,14 +39377,14 @@ function fftRadix2(realVals, imagVals, size, inverse, cpuBackend) { const evenShape = [evenRealVals.length]; const evenRealInfo = cpuBackend.makeTensorInfo(evenShape, "float32", evenRealVals); const evenImagInfo = cpuBackend.makeTensorInfo(evenShape, "float32", evenImagVals); - const evenTensorInfo = complex2({inputs: {real: evenRealInfo, imag: evenImagInfo}, backend: cpuBackend}); + const evenTensorInfo = complex2({ inputs: { real: evenRealInfo, imag: evenImagInfo }, backend: cpuBackend }); const oddComplex = backend_util_exports.complexWithOddIndex(data); const oddRealVals = oddComplex.real; const oddImagVals = oddComplex.imag; const oddShape = [oddRealVals.length]; const oddRealInfo = cpuBackend.makeTensorInfo(oddShape, "float32", oddRealVals); const oddImagInfo = cpuBackend.makeTensorInfo(oddShape, "float32", oddImagVals); - const oddTensorInfo = complex2({inputs: {real: oddRealInfo, imag: oddImagInfo}, backend: cpuBackend}); + const oddTensorInfo = complex2({ inputs: { real: oddRealInfo, imag: oddImagInfo }, backend: cpuBackend }); const $evenComplex = fftRadix2(evenRealVals, evenImagVals, half, inverse, cpuBackend); const $evenRealVals = $evenComplex.real; const $evenImagVals = $evenComplex.imag; @@ -39392,7 +39392,7 @@ function fftRadix2(realVals, imagVals, size, inverse, cpuBackend) { const $evenRealInfo = cpuBackend.makeTensorInfo($evenShape, "float32", $evenRealVals); const $evenImagInfo = cpuBackend.makeTensorInfo($evenShape, "float32", $evenImagVals); const $evenTensorInfo = complex2({ - inputs: {real: $evenRealInfo, imag: $evenImagInfo}, + inputs: { real: $evenRealInfo, imag: $evenImagInfo }, backend: cpuBackend }); const $oddComplex = fftRadix2(oddRealVals, oddImagVals, half, inverse, cpuBackend); @@ -39401,34 +39401,34 @@ function fftRadix2(realVals, imagVals, size, inverse, cpuBackend) { const $oddShape = [$oddRealVals.length]; const $oddRealInfo = cpuBackend.makeTensorInfo($oddShape, "float32", $oddRealVals); const $oddImagInfo = cpuBackend.makeTensorInfo($oddShape, "float32", $oddImagVals); - const $oddTensorInfo = complex2({inputs: {real: $oddRealInfo, imag: $oddImagInfo}, backend: cpuBackend}); + const $oddTensorInfo = complex2({ inputs: { real: $oddRealInfo, imag: $oddImagInfo }, backend: cpuBackend }); const e = backend_util_exports.exponents(size, inverse); const eShape = [e.real.length]; const eRealInfo = cpuBackend.makeTensorInfo(eShape, "float32", e.real); const eImagInfo = cpuBackend.makeTensorInfo(eShape, "float32", e.imag); - const complexInfo = complex2({inputs: {real: eRealInfo, imag: eImagInfo}, backend: cpuBackend}); - const exponentInfo = multiply2({inputs: {a: complexInfo, b: $oddTensorInfo}, backend: cpuBackend}); + const complexInfo = complex2({ inputs: { real: eRealInfo, imag: eImagInfo }, backend: cpuBackend }); + const exponentInfo = multiply2({ inputs: { a: complexInfo, b: $oddTensorInfo }, backend: cpuBackend }); const addPart = add4({ - inputs: {a: $evenTensorInfo, b: exponentInfo}, + inputs: { a: $evenTensorInfo, b: exponentInfo }, backend: cpuBackend }); const subPart = sub2({ - inputs: {a: $evenTensorInfo, b: exponentInfo}, + inputs: { a: $evenTensorInfo, b: exponentInfo }, backend: cpuBackend }); - const addPartReal = real2({inputs: {input: addPart}, backend: cpuBackend}); - const subPartReal = real2({inputs: {input: subPart}, backend: cpuBackend}); - const addPartImag = imag2({inputs: {input: addPart}, backend: cpuBackend}); - const subPartImag = imag2({inputs: {input: subPart}, backend: cpuBackend}); + const addPartReal = real2({ inputs: { input: addPart }, backend: cpuBackend }); + const subPartReal = real2({ inputs: { input: subPart }, backend: cpuBackend }); + const addPartImag = imag2({ inputs: { input: addPart }, backend: cpuBackend }); + const subPartImag = imag2({ inputs: { input: subPart }, backend: cpuBackend }); const $real = concat2({ inputs: [addPartReal, subPartReal], backend: cpuBackend, - attrs: {axis: 0} + attrs: { axis: 0 } }); const $imag = concat2({ inputs: [addPartImag, subPartImag], backend: cpuBackend, - attrs: {axis: 0} + attrs: { axis: 0 } }); const $realVals = cpuBackend.data.get($real.dataId).values; const $imagVals = cpuBackend.data.get($imag.dataId).values; @@ -39456,7 +39456,7 @@ function fftRadix2(realVals, imagVals, size, inverse, cpuBackend) { cpuBackend.disposeIntermediateTensorInfo(subPartImag); cpuBackend.disposeIntermediateTensorInfo($real); cpuBackend.disposeIntermediateTensorInfo($imag); - return {real: $realVals, imag: $imagVals}; + return { real: $realVals, imag: $imagVals }; } function fourierTransformByMatmul(data, size, inverse) { const ret = new Float32Array(size * 2); @@ -39478,18 +39478,18 @@ function fourierTransformByMatmul(data, size, inverse) { return ret; } function fft2(args) { - const {inputs, backend: backend2} = args; - const {input: input2} = inputs; + const { inputs, backend: backend2 } = args; + const { input: input2 } = inputs; const inputSize = util_exports.sizeFromShape(input2.shape); const innerDimensionSize = input2.shape[input2.shape.length - 1]; const batch = inputSize / innerDimensionSize; const input2D = reshape3({ - inputs: {x: input2}, + inputs: { x: input2 }, backend: backend2, - attrs: {shape: [batch, innerDimensionSize]} + attrs: { shape: [batch, innerDimensionSize] } }); const result = fftBatch(input2D, false, backend2); - const resultReshaped = reshape3({inputs: {x: result}, backend: backend2, attrs: {shape: input2.shape}}); + const resultReshaped = reshape3({ inputs: { x: result }, backend: backend2, attrs: { shape: input2.shape } }); backend2.disposeIntermediateTensorInfo(input2D); backend2.disposeIntermediateTensorInfo(result); return resultReshaped; @@ -39500,8 +39500,8 @@ var fftConfig = { kernelFunc: fft2 }; function fill2(args) { - const {backend: backend2, attrs} = args; - const {shape, value, dtype} = attrs; + const { backend: backend2, attrs } = args; + const { shape, value, dtype } = attrs; const $dtype = dtype || util_exports.inferDtype(value); const values = util_exports.getArrayFromDType($dtype, util_exports.sizeFromShape(shape)); fillValues(values, value, $dtype); @@ -39522,8 +39522,8 @@ function fillValues(values, value, dtype) { var flipLeftRightConfig = { kernelName: FlipLeftRight, backendName: "cpu", - kernelFunc: ({inputs, attrs, backend: backend2}) => { - const {image: image3} = inputs; + kernelFunc: ({ inputs, attrs, backend: backend2 }) => { + const { image: image3 } = inputs; const cpuBackend = backend2; const output = util_exports.getTypedArrayFromDType(image3.dtype, util_exports.sizeFromShape(image3.shape)); const [batch, imageHeight, imageWidth, numChannels] = image3.shape; @@ -39551,7 +39551,7 @@ var flipLeftRightConfig = { } } const dataId = cpuBackend.write(output, image3.shape, image3.dtype); - return {dataId, shape: image3.shape, dtype: image3.dtype}; + return { dataId, shape: image3.shape, dtype: image3.dtype }; } }; var floorDivImpl = createSimpleBinaryKernelImpl((a, b) => Math.floor(a / b)); @@ -39562,17 +39562,17 @@ var floorDivConfig = { kernelFunc: floorDiv2 }; function fusedConv2D(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, filter, bias, preluActivationWeights} = inputs; - const {strides, pad: pad3, dataFormat, dilations, dimRoundingMode, activation: activation2, leakyreluAlpha} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, filter, bias, preluActivationWeights } = inputs; + const { strides, pad: pad3, dataFormat, dilations, dimRoundingMode, activation: activation2, leakyreluAlpha } = attrs; let result = conv2D({ - inputs: {x, filter}, + inputs: { x, filter }, backend: backend2, - attrs: {strides, pad: pad3, dataFormat, dilations, dimRoundingMode} + attrs: { strides, pad: pad3, dataFormat, dilations, dimRoundingMode } }); if (bias) { const resultOld = result; - result = add4({inputs: {a: result, b: bias}, backend: backend2}); + result = add4({ inputs: { a: result, b: bias }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(resultOld); } if (activation2) { @@ -39588,17 +39588,17 @@ var fusedConv2DConfig = { kernelFunc: fusedConv2D }; function fusedDepthwiseConv2D(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, filter, bias, preluActivationWeights} = inputs; - const {strides, pad: pad3, dataFormat, dilations, dimRoundingMode, activation: activation2, leakyreluAlpha} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, filter, bias, preluActivationWeights } = inputs; + const { strides, pad: pad3, dataFormat, dilations, dimRoundingMode, activation: activation2, leakyreluAlpha } = attrs; let result = depthwiseConv2dNative({ - inputs: {x, filter}, + inputs: { x, filter }, backend: backend2, - attrs: {strides, pad: pad3, dataFormat, dilations, dimRoundingMode} + attrs: { strides, pad: pad3, dataFormat, dilations, dimRoundingMode } }); if (bias) { const oldResult = result; - result = add4({inputs: {a: result, b: bias}, backend: backend2}); + result = add4({ inputs: { a: result, b: bias }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(oldResult); } if (activation2) { @@ -39614,8 +39614,8 @@ var fusedDepthwiseConv2DConfig = { kernelFunc: fusedDepthwiseConv2D }; function gatherNd(args) { - const {inputs, backend: backend2} = args; - const {params, indices} = inputs; + const { inputs, backend: backend2 } = args; + const { params, indices } = inputs; const paramsSize = util_exports.sizeFromShape(params.shape); const indicesShape = indices.shape; const sliceRank = indicesShape[indicesShape.length - 1]; @@ -39649,9 +39649,9 @@ var gatherNdConfig = { kernelFunc: gatherNd }; function gatherV2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, indices} = inputs; - const {axis, batchDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, indices } = inputs; + const { axis, batchDims } = attrs; assertNotComplex([x, indices], "gatherV2"); let $batchDims = batchDims; if (batchDims == null) { @@ -39661,7 +39661,7 @@ function gatherV2(args) { const parsedAxis = util_exports.parseAxisParam(axis, x.shape)[0]; const shapeInfo = backend_util_exports.segment_util.collectGatherOpShapeInfo(x, indices, parsedAxis, $batchDims); const flattenX = reshape3({ - inputs: {x}, + inputs: { x }, backend: backend2, attrs: { shape: [ @@ -39673,9 +39673,9 @@ function gatherV2(args) { } }); const flattenIndex = reshape3({ - inputs: {x: indices}, + inputs: { x: indices }, backend: backend2, - attrs: {shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize]} + attrs: { shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize] } }); const flattenOutputShape = [ shapeInfo.batchSize, @@ -39703,18 +39703,18 @@ var greaterEqualConfig = { kernelFunc: greaterEqual2 }; function ifft2(args) { - const {inputs, backend: backend2} = args; - const {input: input2} = inputs; + const { inputs, backend: backend2 } = args; + const { input: input2 } = inputs; const inputSize = util_exports.sizeFromShape(input2.shape); const innerDimensionSize = input2.shape[input2.shape.length - 1]; const batch = inputSize / innerDimensionSize; const input2D = reshape3({ - inputs: {x: input2}, + inputs: { x: input2 }, backend: backend2, - attrs: {shape: [batch, innerDimensionSize]} + attrs: { shape: [batch, innerDimensionSize] } }); const result = fftBatch(input2D, true, backend2); - const resultReshaped = reshape3({inputs: {x: result}, backend: backend2, attrs: {shape: input2.shape}}); + const resultReshaped = reshape3({ inputs: { x: result }, backend: backend2, attrs: { shape: input2.shape } }); backend2.disposeIntermediateTensorInfo(input2D); backend2.disposeIntermediateTensorInfo(result); return resultReshaped; @@ -39750,8 +39750,8 @@ var lessEqualConfig = { kernelFunc: lessEqual2 }; function linSpace(args) { - const {backend: backend2, attrs} = args; - const {start, stop, num} = attrs; + const { backend: backend2, attrs } = args; + const { start, stop, num } = attrs; const outVals = linSpaceImpl(start, stop, num); return backend2.makeTensorInfo([outVals.length], "float32", outVals); } @@ -39787,9 +39787,9 @@ var logicalOrConfig = { kernelFunc: logicalOr2 }; function lRN(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {depthRadius, bias, alpha, beta} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { depthRadius, bias, alpha, beta } = attrs; assertNotComplex(x, "LRN"); const channels = x.shape[3]; const maxD = channels - 1; @@ -39820,9 +39820,9 @@ var lRNConfig = { kernelFunc: lRN }; function lRNGrad(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, y, dy} = inputs; - const {depthRadius, bias, alpha, beta} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, y, dy } = inputs; + const { depthRadius, bias, alpha, beta } = attrs; assertNotComplex(dy, "LRNGrad"); const dySize = util_exports.sizeFromShape(dy.shape); const channels = dy.shape[3]; @@ -39857,9 +39857,9 @@ var lRNGradConfig = { kernelFunc: lRNGrad }; function max3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {reductionIndices, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { reductionIndices, keepDims } = attrs; const cpuBackend = backend2; let xShape = x.shape; const xRank = xShape.length; @@ -39887,7 +39887,7 @@ function max3(args) { const newShape = backend_util_exports.expandShapeToKeepDim(maxOutShape, origAxes); outShape = newShape; } - return {dataId, shape: outShape, dtype: x.dtype}; + return { dataId, shape: outShape, dtype: x.dtype }; } var maxConfig = { kernelName: Max, @@ -39895,16 +39895,16 @@ var maxConfig = { kernelFunc: max3 }; function maxPool2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; assertNotComplex(x, "maxPool"); - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; const dilations = 1; util_exports.assert(backend_util_exports.eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in maxPool: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); const convInfo = backend_util_exports.computePool2DInfo(x.shape, filterSize, strides, dilations, pad3, dimRoundingMode); let res; if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 && util_exports.arraysEqual(convInfo.inShape, convInfo.outShape)) { - res = identity2({inputs: {x}, backend: backend2}); + res = identity2({ inputs: { x }, backend: backend2 }); } else { const xValues = backend2.data.get(x.dataId).values; const strides2 = util_exports.computeStrides(x.shape); @@ -39919,9 +39919,9 @@ var maxPoolConfig = { kernelFunc: maxPool2 }; function maxPool3D(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {filterSize, strides, pad: pad3, dimRoundingMode, dataFormat} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { filterSize, strides, pad: pad3, dimRoundingMode, dataFormat } = attrs; assertNotComplex(x, "maxPool3d"); const convInfo = backend_util_exports.computePool3DInfo(x.shape, filterSize, strides, 1, pad3, dimRoundingMode, dataFormat); const xValues = backend2.data.get(x.dataId).values; @@ -39934,9 +39934,9 @@ var maxPool3DConfig = { kernelFunc: maxPool3D }; function maxPool3DGrad(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, input: input2} = inputs; - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { dy, input: input2 } = inputs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; assertNotComplex([dy, input2], "maxPool3DGrad"); const convInfo = backend_util_exports.computePool3DInfo(input2.shape, filterSize, strides, 1, pad3, dimRoundingMode); const inputBuf = backend2.bufferSync(input2); @@ -40004,11 +40004,11 @@ var maxPool3DGradConfig2 = { kernelFunc: maxPool3DGrad }; function maxPoolGrad2(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, input: input2, output} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { dy, input: input2, output } = inputs; const x = input2; assertNotComplex([input2, output], "maxPoolGrad"); - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; const convInfo = backend_util_exports.computePool2DInfo(x.shape, filterSize, strides, 1, pad3, dimRoundingMode); const xValues = backend2.data.get(x.dataId).values; const maxPosBuf = buffer(convInfo.outShape, x.dtype, maxPoolPositions(xValues, x.shape, x.dtype, convInfo).values); @@ -40071,9 +40071,9 @@ function maxPoolWithArgmaxImpl(xValues, xShape, dtype, includeBatchInIndex, conv var maxPoolWithArgmaxConfig = { kernelName: MaxPoolWithArgmax, backendName: "cpu", - kernelFunc: ({inputs, attrs, backend: backend2}) => { - const {x} = inputs; - const {filterSize, strides, pad: pad3, includeBatchInIndex} = attrs; + kernelFunc: ({ inputs, attrs, backend: backend2 }) => { + const { x } = inputs; + const { filterSize, strides, pad: pad3, includeBatchInIndex } = attrs; const cpuBackend = backend2; assertNotComplex(x, "MaxPoolWithArgmax"); const values = cpuBackend.data.get(x.dataId).values; @@ -40082,15 +40082,15 @@ var maxPoolWithArgmaxConfig = { const pooledDataId = cpuBackend.write(pooled, convInfo.outShape, x.dtype); const indexesDataId = cpuBackend.write(indexes, convInfo.outShape, x.dtype); return [ - {dataId: pooledDataId, shape: convInfo.outShape, dtype: x.dtype}, - {dataId: indexesDataId, shape: convInfo.outShape, dtype: "int32"} + { dataId: pooledDataId, shape: convInfo.outShape, dtype: x.dtype }, + { dataId: indexesDataId, shape: convInfo.outShape, dtype: "int32" } ]; } }; function mean2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; const axes = util_exports.parseAxisParam(axis, x.shape); const shapes = backend_util_exports.computeOutAndReduceShapes(x.shape, axes); const reduceShape = shapes[1]; @@ -40098,11 +40098,11 @@ function mean2(args) { const toDispose = []; const reduceSizeScalar = backend2.makeTensorInfo([], "float32", new Float32Array([reduceSize])); toDispose.push(reduceSizeScalar); - const $x = cast3({inputs: {x}, backend: backend2, attrs: {dtype: "float32"}}); + const $x = cast3({ inputs: { x }, backend: backend2, attrs: { dtype: "float32" } }); toDispose.push($x); - const res = div2({inputs: {a: $x, b: reduceSizeScalar}, backend: backend2}); + const res = div2({ inputs: { a: $x, b: reduceSizeScalar }, backend: backend2 }); toDispose.push(res); - const result = sum3({inputs: {x: res}, backend: backend2, attrs: {axis, keepDims}}); + const result = sum3({ inputs: { x: res }, backend: backend2, attrs: { axis, keepDims } }); toDispose.forEach((t) => backend2.disposeIntermediateTensorInfo(t)); return result; } @@ -40112,16 +40112,16 @@ var meanConfig = { kernelFunc: mean2 }; function min3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; assertNotComplex(x, "min"); const origAxes = util_exports.parseAxisParam(axis, x.shape); let axes = origAxes; const permutedAxes = backend_util_exports.getAxesPermutation(axes, x.shape.length); let $x = x; if (permutedAxes != null) { - $x = transpose2({inputs: {x}, backend: backend2, attrs: {perm: permutedAxes}}); + $x = transpose2({ inputs: { x }, backend: backend2, attrs: { perm: permutedAxes } }); axes = backend_util_exports.getInnerMostAxes(axes.length, x.shape.length); } backend_util_exports.assertAxesAreInnerMostDims("min", axes, $x.shape.length); @@ -40146,7 +40146,7 @@ function min3(args) { const result = backend2.makeTensorInfo(outShape, $x.dtype, vals); if (keepDims) { const expandedShape = backend_util_exports.expandShapeToKeepDim(outShape, origAxes); - const reshapedResult = reshape3({inputs: {x: result}, backend: backend2, attrs: {shape: expandedShape}}); + const reshapedResult = reshape3({ inputs: { x: result }, backend: backend2, attrs: { shape: expandedShape } }); backend2.disposeIntermediateTensorInfo(result); return reshapedResult; } @@ -40158,9 +40158,9 @@ var minConfig = { kernelFunc: min3 }; function mirrorPad2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {paddings, mode} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { paddings, mode } = attrs; assertNotComplex(x, "mirrorPad"); const outShape = paddings.map((p2, i) => p2[0] + x.shape[i] + p2[1]); const start = paddings.map((p2) => p2[0]); @@ -40187,7 +40187,7 @@ function mirrorPad2(args) { resVals[i] = xVals[inIndex]; } const outId = backend2.write(resVals, outShape, x.dtype); - return {dataId: outId, shape: outShape, dtype: x.dtype}; + return { dataId: outId, shape: outShape, dtype: x.dtype }; } var mirrorPadConfig = { kernelName: MirrorPad, @@ -40210,9 +40210,9 @@ var modConfig = { }; var seedrandom4 = __toModule(require_seedrandom2()); function softmax3(args) { - const {inputs, backend: backend2, attrs} = args; - const {logits} = inputs; - const {dim} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { logits } = inputs; + const { dim } = attrs; const logitsRank = logits.shape.length; let $dim = dim; if ($dim === -1) { @@ -40223,17 +40223,17 @@ function softmax3(args) { } const axes = util_exports.parseAxisParam([$dim], logits.shape); const maxLogit = max3({ - inputs: {x: logits}, + inputs: { x: logits }, backend: backend2, - attrs: {reductionIndices: axes, keepDims: false} + attrs: { reductionIndices: axes, keepDims: false } }); const expandedShape = backend_util_exports.expandShapeToKeepDim(maxLogit.shape, axes); - const maxLogitReshaped = reshape3({inputs: {x: maxLogit}, backend: backend2, attrs: {shape: expandedShape}}); - const a = sub2({inputs: {a: logits, b: maxLogitReshaped}, backend: backend2}); - const b = exp2({inputs: {x: a}, backend: backend2}); - const sumExp = sum3({inputs: {x: b}, backend: backend2, attrs: {axis: axes, keepDims: false}}); - const sumReshaped = reshape3({inputs: {x: sumExp}, backend: backend2, attrs: {shape: expandedShape}}); - const result = div2({inputs: {a: b, b: sumReshaped}, backend: backend2}); + const maxLogitReshaped = reshape3({ inputs: { x: maxLogit }, backend: backend2, attrs: { shape: expandedShape } }); + const a = sub2({ inputs: { a: logits, b: maxLogitReshaped }, backend: backend2 }); + const b = exp2({ inputs: { x: a }, backend: backend2 }); + const sumExp = sum3({ inputs: { x: b }, backend: backend2, attrs: { axis: axes, keepDims: false } }); + const sumReshaped = reshape3({ inputs: { x: sumExp }, backend: backend2, attrs: { shape: expandedShape } }); + const result = div2({ inputs: { a: b, b: sumReshaped }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(maxLogit); backend2.disposeIntermediateTensorInfo(maxLogitReshaped); backend2.disposeIntermediateTensorInfo(a); @@ -40248,11 +40248,11 @@ var softmaxConfig = { kernelFunc: softmax3 }; function multinomial2(args) { - const {inputs, backend: backend2, attrs} = args; - const {logits} = inputs; - const {numSamples, seed, normalized} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { logits } = inputs; + const { numSamples, seed, normalized } = attrs; assertNotComplex(logits, "multinomial"); - const probabilities = normalized ? logits : softmax3({inputs: {logits}, backend: backend2, attrs: {dim: -1}}); + const probabilities = normalized ? logits : softmax3({ inputs: { logits }, backend: backend2, attrs: { dim: -1 } }); const batchSize = probabilities.shape[0]; const numEvents = probabilities.shape[1]; const probVals = backend2.data.get(probabilities.dataId).values; @@ -40290,13 +40290,13 @@ var multinomialConfig = { }; var nonMaxSuppressionV3Impl2 = kernel_impls_exports.nonMaxSuppressionV3Impl; function nonMaxSuppressionV3(args) { - const {inputs, backend: backend2, attrs} = args; - const {boxes, scores} = inputs; - const {maxOutputSize, iouThreshold, scoreThreshold} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold } = attrs; assertNotComplex(boxes, "NonMaxSuppression"); const boxesVals = backend2.data.get(boxes.dataId).values; const scoresVals = backend2.data.get(scores.dataId).values; - const {selectedIndices} = nonMaxSuppressionV3Impl2(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold); + const { selectedIndices } = nonMaxSuppressionV3Impl2(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold); return backend2.makeTensorInfo([selectedIndices.length], "int32", new Int32Array(selectedIndices)); } var nonMaxSuppressionV3Config = { @@ -40306,13 +40306,13 @@ var nonMaxSuppressionV3Config = { }; var nonMaxSuppressionV4Impl2 = kernel_impls_exports.nonMaxSuppressionV4Impl; function nonMaxSuppressionV4(args) { - const {inputs, backend: backend2, attrs} = args; - const {boxes, scores} = inputs; - const {maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize } = attrs; assertNotComplex(boxes, "NonMaxSuppressionPadded"); const boxesVals = backend2.data.get(boxes.dataId).values; const scoresVals = backend2.data.get(scores.dataId).values; - const {selectedIndices, validOutputs} = nonMaxSuppressionV4Impl2(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize); + const { selectedIndices, validOutputs } = nonMaxSuppressionV4Impl2(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize); return [ backend2.makeTensorInfo([selectedIndices.length], "int32", new Int32Array(selectedIndices)), backend2.makeTensorInfo([], "int32", new Int32Array([validOutputs])) @@ -40325,9 +40325,9 @@ var nonMaxSuppressionV4Config = { }; var nonMaxSuppressionV5Impl2 = kernel_impls_exports.nonMaxSuppressionV5Impl; function nonMaxSuppressionV5(args) { - const {inputs, backend: backend2, attrs} = args; - const {boxes, scores} = inputs; - const {maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma } = attrs; assertNotComplex(boxes, "NonMaxSuppressionWithScore"); const boxesVals = backend2.data.get(boxes.dataId).values; const scoresVals = backend2.data.get(scores.dataId).values; @@ -40335,7 +40335,7 @@ function nonMaxSuppressionV5(args) { const iouThresholdVal = iouThreshold; const scoreThresholdVal = scoreThreshold; const softNmsSigmaVal = softNmsSigma; - const {selectedIndices, selectedScores} = nonMaxSuppressionV5Impl2(boxesVals, scoresVals, maxOutputSizeVal, iouThresholdVal, scoreThresholdVal, softNmsSigmaVal); + const { selectedIndices, selectedScores } = nonMaxSuppressionV5Impl2(boxesVals, scoresVals, maxOutputSizeVal, iouThresholdVal, scoreThresholdVal, softNmsSigmaVal); return [ backend2.makeTensorInfo([selectedIndices.length], "int32", new Int32Array(selectedIndices)), backend2.makeTensorInfo([selectedScores.length], "float32", new Float32Array(selectedScores)) @@ -40347,9 +40347,9 @@ var nonMaxSuppressionV5Config = { kernelFunc: nonMaxSuppressionV5 }; function oneHot2(args) { - const {inputs, backend: backend2, attrs} = args; - const {indices} = inputs; - const {depth, onValue, offValue} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { indices } = inputs; + const { depth, onValue, offValue } = attrs; assertNotComplex(indices, "oneHot"); const indicesSize = util_exports.sizeFromShape(indices.shape); const res = new Float32Array(indicesSize * depth); @@ -40368,23 +40368,23 @@ var oneHotConfig = { kernelFunc: oneHot2 }; function zerosLike2(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; if (x.dtype === "string") { throw new Error("zerosLike is not supported for string tensors"); } else if (x.dtype === "complex64") { - const realPart = real2({inputs: {input: x}, backend: backend2}); - const r = zerosLike2({inputs: {x: realPart}, backend: backend2}); - const imagPart = imag2({inputs: {input: x}, backend: backend2}); - const i = zerosLike2({inputs: {x: imagPart}, backend: backend2}); - const result = complex2({inputs: {real: r, imag: i}, backend: backend2}); + const realPart = real2({ inputs: { input: x }, backend: backend2 }); + const r = zerosLike2({ inputs: { x: realPart }, backend: backend2 }); + const imagPart = imag2({ inputs: { input: x }, backend: backend2 }); + const i = zerosLike2({ inputs: { x: imagPart }, backend: backend2 }); + const result = complex2({ inputs: { real: r, imag: i }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(realPart); backend2.disposeIntermediateTensorInfo(r); backend2.disposeIntermediateTensorInfo(imagPart); backend2.disposeIntermediateTensorInfo(i); return result; } else { - return fill2({backend: backend2, attrs: {shape: x.shape, value: 0, dtype: x.dtype}}); + return fill2({ backend: backend2, attrs: { shape: x.shape, value: 0, dtype: x.dtype } }); } } var zerosLikeConfig = { @@ -40393,23 +40393,23 @@ var zerosLikeConfig = { kernelFunc: zerosLike2 }; function onesLike2(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; if (x.dtype === "string") { throw new Error("onesLike is not supported for string tensors"); } else if (x.dtype === "complex64") { - const realPart = real2({inputs: {input: x}, backend: backend2}); - const r = onesLike2({inputs: {x: realPart}, backend: backend2}); - const imagPart = imag2({inputs: {input: x}, backend: backend2}); - const i = zerosLike2({inputs: {x: imagPart}, backend: backend2}); - const result = complex2({inputs: {real: r, imag: i}, backend: backend2}); + const realPart = real2({ inputs: { input: x }, backend: backend2 }); + const r = onesLike2({ inputs: { x: realPart }, backend: backend2 }); + const imagPart = imag2({ inputs: { input: x }, backend: backend2 }); + const i = zerosLike2({ inputs: { x: imagPart }, backend: backend2 }); + const result = complex2({ inputs: { real: r, imag: i }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(realPart); backend2.disposeIntermediateTensorInfo(r); backend2.disposeIntermediateTensorInfo(imagPart); backend2.disposeIntermediateTensorInfo(i); return result; } else { - return fill2({backend: backend2, attrs: {shape: x.shape, value: 1, dtype: x.dtype}}); + return fill2({ backend: backend2, attrs: { shape: x.shape, value: 1, dtype: x.dtype } }); } } var onesLikeConfig = { @@ -40418,10 +40418,10 @@ var onesLikeConfig = { kernelFunc: onesLike2 }; function pack(args) { - const {inputs, backend: backend2, attrs} = args; - const {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { axis } = attrs; if (inputs.length === 1) { - return expandDims3({inputs: {input: inputs[0]}, backend: backend2, attrs: {dim: axis}}); + return expandDims3({ inputs: { input: inputs[0] }, backend: backend2, attrs: { dim: axis } }); } const shape = inputs[0].shape; const dtype = inputs[0].dtype; @@ -40431,11 +40431,11 @@ function pack(args) { }); const intermediateTensorInfos = []; const expandedTensors = inputs.map((t) => { - const expandedT = expandDims3({inputs: {input: t}, backend: backend2, attrs: {dim: axis}}); + const expandedT = expandDims3({ inputs: { input: t }, backend: backend2, attrs: { dim: axis } }); intermediateTensorInfos.push(expandedT); return expandedT; }); - const result = concat2({inputs: expandedTensors, backend: backend2, attrs: {axis}}); + const result = concat2({ inputs: expandedTensors, backend: backend2, attrs: { axis } }); intermediateTensorInfos.forEach((t) => backend2.disposeIntermediateTensorInfo(t)); return result; } @@ -40445,9 +40445,9 @@ var packConfig = { kernelFunc: pack }; function padV2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {paddings, constantValue} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { paddings, constantValue } = attrs; assertNotComplex(x, "pad"); const outShape = paddings.map((p2, i) => p2[0] + x.shape[i] + p2[1]); const start = paddings.map((p2) => p2[0]); @@ -40469,7 +40469,7 @@ function padV2(args) { resVals[outIndex] = xVals[i]; } const outId = backend2.write(resVals, outShape, x.dtype); - return {dataId: outId, shape: outShape, dtype: x.dtype}; + return { dataId: outId, shape: outShape, dtype: x.dtype }; } var padV2Config = { kernelName: PadV2, @@ -40484,8 +40484,8 @@ var powConfig = { kernelFunc: pow2 }; function range3(args) { - const {backend: backend2, attrs} = args; - const {start, stop, dtype, step: step5} = attrs; + const { backend: backend2, attrs } = args; + const { start, stop, dtype, step: step5 } = attrs; const values = rangeImpl(start, stop, step5, dtype); return backend2.makeTensorInfo([values.length], dtype, values); } @@ -40501,9 +40501,9 @@ var reciprocalConfig = { kernelFunc: reciprocal2 }; function resizeBilinear2(args) { - const {inputs, backend: backend2, attrs} = args; - const {images} = inputs; - const {alignCorners, halfPixelCenters, size} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { images } = inputs; + const { alignCorners, halfPixelCenters, size } = attrs; assertNotComplex(images, "resizeBilinear"); const imagesStrides = util_exports.computeStrides(images.shape); const [newHeight, newWidth] = size; @@ -40569,9 +40569,9 @@ var resizeBilinearConfig = { kernelFunc: resizeBilinear2 }; function resizeBilinearGrad(args) { - const {inputs, backend: backend2, attrs} = args; - const {images, dy} = inputs; - const {alignCorners} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { images, dy } = inputs; + const { alignCorners } = attrs; assertNotComplex([dy, images], "resizeBilinearGrad"); const imagesStrides = util_exports.computeStrides(images.shape); const [batch, xHeight, xWidth, depth] = images.shape; @@ -40631,9 +40631,9 @@ var resizeBilinearGradConfig2 = { kernelFunc: resizeBilinearGrad }; function resizeNearestNeighbor2(args) { - const {inputs, backend: backend2, attrs} = args; - const {images} = inputs; - const {alignCorners, halfPixelCenters, size} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { images } = inputs; + const { alignCorners, halfPixelCenters, size } = attrs; assertNotComplex(images, "resizeNearestNeighbor"); const imagesStrides = util_exports.computeStrides(images.shape); const [newHeight, newWidth] = size; @@ -40682,9 +40682,9 @@ var resizeNearestNeighborConfig = { kernelFunc: resizeNearestNeighbor2 }; function resizeNearestNeighborGrad(args) { - const {inputs, backend: backend2, attrs} = args; - const {images, dy} = inputs; - const {alignCorners} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { images, dy } = inputs; + const { alignCorners } = attrs; assertNotComplex([dy, images], "resizeNearestNeighborGrad"); const imagesStrides = util_exports.computeStrides(images.shape); const dyStrides = util_exports.computeStrides(dy.shape); @@ -40755,14 +40755,14 @@ var resizeNearestNeighborGradConfig2 = { kernelFunc: resizeNearestNeighborGrad }; function reverse2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {dims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { dims } = attrs; assertNotComplex(x, "reverse"); const xRank = x.shape.length; const $dims = util_exports.parseAxisParam(dims, x.shape); if (xRank === 0) { - return identity2({inputs: {x}, backend: backend2}); + return identity2({ inputs: { x }, backend: backend2 }); } const outBuf = new TensorBuffer(x.shape, x.dtype); const xBuf = backend2.bufferSync(x); @@ -40782,9 +40782,9 @@ var reverseConfig = { var rotateWithOffsetConfig = { kernelName: RotateWithOffset, backendName: "cpu", - kernelFunc: ({inputs, attrs, backend: backend2}) => { - const {image: image3} = inputs; - const {radians, fillValue, center} = attrs; + kernelFunc: ({ inputs, attrs, backend: backend2 }) => { + const { image: image3 } = inputs; + const { radians, fillValue, center } = attrs; const cpuBackend = backend2; const output = util_exports.getTypedArrayFromDType(image3.dtype, util_exports.sizeFromShape(image3.shape)); const [batch, imageHeight, imageWidth, numChannels] = image3.shape; @@ -40828,7 +40828,7 @@ var rotateWithOffsetConfig = { } } const dataId = cpuBackend.write(output, image3.shape, image3.dtype); - return {dataId, shape: image3.shape, dtype: image3.dtype}; + return { dataId, shape: image3.shape, dtype: image3.dtype }; } }; var round3 = unaryKernelFunc(Round, (xi) => { @@ -40881,10 +40881,10 @@ function scatterImpl(indices, updates, shape, outputSize, sliceSize, numUpdates, return outBuf; } function scatterNd(args) { - const {inputs, backend: backend2, attrs} = args; - const {indices, updates} = inputs; - const {shape} = attrs; - const {sliceRank, numUpdates, sliceSize, strides, outputSize} = backend_util_exports.calculateShapes(updates, indices, shape); + const { inputs, backend: backend2, attrs } = args; + const { indices, updates } = inputs; + const { shape } = attrs; + const { sliceRank, numUpdates, sliceSize, strides, outputSize } = backend_util_exports.calculateShapes(updates, indices, shape); const sumDupeIndices = true; const indicesBuf = backend2.bufferSync(indices); const updatesBuf = backend2.bufferSync(updates); @@ -40897,8 +40897,8 @@ var scatterNdConfig = { kernelFunc: scatterNd }; function select(args) { - const {inputs, backend: backend2} = args; - const {condition, t, e} = inputs; + const { inputs, backend: backend2 } = args; + const { condition, t, e } = inputs; assertNotComplex([condition, t, e], "select"); const conditionRank = condition.shape.length; const values = backend2.data.get(condition.dataId).values; @@ -40986,9 +40986,9 @@ var softplusConfig = { kernelFunc: softplus2 }; function spaceToBatchND2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {blockShape, paddings} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { blockShape, paddings } = attrs; assertNotComplex([x], "spaceToBatchND"); const prod5 = util_exports.sizeFromShape(blockShape); const completePaddings = [[0, 0]]; @@ -40997,22 +40997,22 @@ function spaceToBatchND2(args) { completePaddings.push([0, 0]); } const paddedX = padV2Config.kernelFunc({ - inputs: {x}, + inputs: { x }, backend: backend2, - attrs: {paddings: completePaddings, constantValue: 0} + attrs: { paddings: completePaddings, constantValue: 0 } }); const reshapedPaddedShape = backend_util_exports.getReshaped(paddedX.shape, blockShape, prod5, false); const permutedReshapedPaddedPermutation = backend_util_exports.getPermuted(reshapedPaddedShape.length, blockShape.length, false); const flattenShape = backend_util_exports.getReshapedPermuted(paddedX.shape, blockShape, prod5, false); - const reshapeInputs = {x: paddedX}; - const reshapeAttrs = {shape: reshapedPaddedShape}; - const paddedXReshaped = reshape3({inputs: reshapeInputs, backend: backend2, attrs: reshapeAttrs}); - const transposeInputs = {x: paddedXReshaped}; - const transposeAttrs = {perm: permutedReshapedPaddedPermutation}; - const paddedXT = transpose2({inputs: transposeInputs, backend: backend2, attrs: transposeAttrs}); - const resultReshapeInputs = {x: paddedXT}; - const resultReshapeAttrs = {shape: flattenShape}; - const result = reshape3({inputs: resultReshapeInputs, backend: backend2, attrs: resultReshapeAttrs}); + const reshapeInputs = { x: paddedX }; + const reshapeAttrs = { shape: reshapedPaddedShape }; + const paddedXReshaped = reshape3({ inputs: reshapeInputs, backend: backend2, attrs: reshapeAttrs }); + const transposeInputs = { x: paddedXReshaped }; + const transposeAttrs = { perm: permutedReshapedPaddedPermutation }; + const paddedXT = transpose2({ inputs: transposeInputs, backend: backend2, attrs: transposeAttrs }); + const resultReshapeInputs = { x: paddedXT }; + const resultReshapeAttrs = { shape: flattenShape }; + const result = reshape3({ inputs: resultReshapeInputs, backend: backend2, attrs: resultReshapeAttrs }); backend2.disposeIntermediateTensorInfo(paddedX); backend2.disposeIntermediateTensorInfo(paddedXReshaped); backend2.disposeIntermediateTensorInfo(paddedXT); @@ -41024,8 +41024,8 @@ var spaceToBatchNDConfig = { kernelFunc: spaceToBatchND2 }; function sparseFillEmptyRows2(args) { - const {inputs, backend: backend2} = args; - const {indices, values, denseShape, defaultValue} = inputs; + const { inputs, backend: backend2 } = args; + const { indices, values, denseShape, defaultValue } = inputs; if (denseShape.shape.length !== 1) { throw new Error(`Dense shape must be a vector, saw: ${denseShape.shape}`); @@ -41060,8 +41060,8 @@ var sparseFillEmptyRowsConfig = { kernelFunc: sparseFillEmptyRows2 }; function sparseReshape2(args) { - const {inputs, backend: backend2} = args; - const {inputIndices, inputShape, newShape} = inputs; + const { inputs, backend: backend2 } = args; + const { inputIndices, inputShape, newShape } = inputs; if (inputIndices.shape.length !== 2) { throw new Error(`Input indices should be a matrix but received shape ${inputIndices.shape}`); @@ -41088,10 +41088,10 @@ var sparseReshapeConfig = { kernelFunc: sparseReshape2 }; function sparseToDense2(args) { - const {inputs, backend: backend2, attrs} = args; - const {sparseIndices, sparseValues, defaultValue} = inputs; - const {outputShape} = attrs; - const {sliceRank, numUpdates, sliceSize, strides, outputSize} = backend_util_exports.calculateShapes(sparseValues, sparseIndices, outputShape); + const { inputs, backend: backend2, attrs } = args; + const { sparseIndices, sparseValues, defaultValue } = inputs; + const { outputShape } = attrs; + const { sliceRank, numUpdates, sliceSize, strides, outputSize } = backend_util_exports.calculateShapes(sparseValues, sparseIndices, outputShape); const sumDupeIndices = false; const indicesBuf = backend2.bufferSync(sparseIndices); const updatesBuf = backend2.bufferSync(sparseValues); @@ -41105,9 +41105,9 @@ var sparseToDenseConfig = { kernelFunc: sparseToDense2 }; function splitV(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {numOrSizeSplits, axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { numOrSizeSplits, axis } = attrs; const $axis = util_exports.parseAxisParam(axis, x.shape)[0]; const splitSizes = backend_util_exports.prepareSplitSize(x, numOrSizeSplits, $axis); const begin = new Array(x.shape.length).fill(0); @@ -41115,7 +41115,7 @@ function splitV(args) { return splitSizes.map((s) => { const sliceSize = [...size]; sliceSize[$axis] = s; - const sliceT = slice2({inputs: {x}, backend: backend2, attrs: {begin, size: sliceSize}}); + const sliceT = slice2({ inputs: { x }, backend: backend2, attrs: { begin, size: sliceSize } }); begin[$axis] += s; return sliceT; }); @@ -41134,8 +41134,8 @@ var sqrtConfig = { var squareConfig = { kernelName: Square, backendName: "cpu", - kernelFunc: ({inputs, backend: backend2}) => { - const {x} = inputs; + kernelFunc: ({ inputs, backend: backend2 }) => { + const { x } = inputs; const cpuBackend = backend2; assertNotComplex(x, "square"); const values = cpuBackend.data.get(x.dataId).values; @@ -41145,7 +41145,7 @@ var squareConfig = { newValues[i] = value * value; } const dataId = cpuBackend.write(newValues, x.shape, x.dtype); - return {dataId, shape: x.shape, dtype: x.dtype}; + return { dataId, shape: x.shape, dtype: x.dtype }; } }; var step2 = unaryKernelFunc(Step, (xi, attrs) => { @@ -41162,16 +41162,16 @@ var stepConfig = { kernelFunc: step2 }; function stridedSlice2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask } = attrs; assertNotComplex(x, "stridedSlice"); - const {nonStrided, $begin, $strides, size, newShape, outShape} = slice_util_exports.sliceInfo(x.shape, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask); - const $x = reshape3({inputs: {x}, backend: backend2, attrs: {shape: newShape}}); + const { nonStrided, $begin, $strides, size, newShape, outShape } = slice_util_exports.sliceInfo(x.shape, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask); + const $x = reshape3({ inputs: { x }, backend: backend2, attrs: { shape: newShape } }); let result; if (nonStrided) { - const sliced = slice2({inputs: {x: $x}, backend: backend2, attrs: {begin: $begin, size}}); - result = reshape3({inputs: {x: sliced}, backend: backend2, attrs: {shape: outShape}}); + const sliced = slice2({ inputs: { x: $x }, backend: backend2, attrs: { begin: $begin, size } }); + result = reshape3({ inputs: { x: sliced }, backend: backend2, attrs: { shape: outShape } }); backend2.disposeIntermediateTensorInfo(sliced); } else if (outShape.some((axis) => axis === 0)) { result = backend2.makeTensorInfo(outShape, x.dtype, []); @@ -41180,7 +41180,7 @@ function stridedSlice2(args) { const outBuf = stridedSliceImpl(outShape, xBuf, $strides, $begin); result = backend2.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values); } - const resultReshaped = reshape3({inputs: {x: result}, backend: backend2, attrs: {shape: outShape}}); + const resultReshaped = reshape3({ inputs: { x: result }, backend: backend2, attrs: { shape: outShape } }); backend2.disposeIntermediateTensorInfo($x); backend2.disposeIntermediateTensorInfo(result); return resultReshaped; @@ -41203,9 +41203,9 @@ var tanhConfig = { kernelFunc: tanh3 }; function tile3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {reps} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { reps } = attrs; assertNotComplex(x, "tile"); const outBuf = tileImpl(backend2.bufferSync(x), reps); return backend2.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values); @@ -41216,9 +41216,9 @@ var tileConfig = { kernelFunc: tile3 }; function topK(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {k, sorted} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { k, sorted } = attrs; assertNotComplex(x, "topk"); const xVals = backend2.data.get(x.dataId).values; const [allTopKVals, allTopKIndices] = topKImpl(xVals, x.shape, x.dtype, k, sorted); @@ -41233,9 +41233,9 @@ var topKConfig = { kernelFunc: topK }; function transform2(args) { - const {inputs, attrs, backend: backend2} = args; - const {image: image3, transforms} = inputs; - const {interpolation, fillMode, fillValue, outputShape} = attrs; + const { inputs, attrs, backend: backend2 } = args; + const { image: image3, transforms } = inputs; + const { interpolation, fillMode, fillValue, outputShape } = attrs; const [batch, imageHeight, imageWidth, numChannels] = image3.shape; const [outHeight, outWidth] = outputShape != null ? outputShape : [imageHeight, imageWidth]; const outShape = [batch, outHeight, outWidth, numChannels]; @@ -41279,7 +41279,7 @@ function transform2(args) { return backend2.makeTensorInfo(outShape, image3.dtype, outVals); } const dataId = backend2.write(outVals, outShape, image3.dtype); - return {dataId, shape: image3.shape, dtype: image3.dtype}; + return { dataId, shape: image3.shape, dtype: image3.dtype }; } var transformConfig = { kernelName: Transform, @@ -41372,12 +41372,12 @@ function bilinearInterpolation(imageVals, imageHeight, imageWidth, batchStride, return (yCeil - y) * valueYFloor + (y - yFloor) * valueYCeil; } function unique3(args) { - const {inputs, attrs, backend: backend2} = args; - const {axis} = attrs; - const {x} = inputs; + const { inputs, attrs, backend: backend2 } = args; + const { axis } = attrs; + const { x } = inputs; assertNotComplex(x, "unique"); const values = backend2.data.get(x.dataId).values; - const {outputValues, outputShape, indices} = uniqueImpl(values, axis, x.shape, x.dtype); + const { outputValues, outputShape, indices } = uniqueImpl(values, axis, x.shape, x.dtype); return [ backend2.makeTensorInfo(outputShape, x.dtype, outputValues), backend2.makeTensorInfo([indices.length], "int32", indices) @@ -41389,9 +41389,9 @@ var uniqueConfig = { kernelFunc: unique3 }; function unpack(args) { - const {inputs, backend: backend2, attrs} = args; - const {value} = inputs; - let {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { value } = inputs; + let { axis } = attrs; if (axis < 0) { axis += value.shape.length; } @@ -41410,8 +41410,8 @@ function unpack(args) { const res = new Array(num); for (let i = 0; i < res.length; i++) { begin[axis] = i; - const tempRes = slice2({inputs: {x: value}, backend: backend2, attrs: {begin, size}}); - res[i] = reshape3({inputs: {x: tempRes}, backend: backend2, attrs: {shape: outShape}}); + const tempRes = slice2({ inputs: { x: value }, backend: backend2, attrs: { begin, size } }); + res[i] = reshape3({ inputs: { x: tempRes }, backend: backend2, attrs: { shape: outShape } }); backend2.disposeIntermediateTensorInfo(tempRes); } return res; @@ -41422,9 +41422,9 @@ var unpackConfig = { kernelFunc: unpack }; function unsortedSegmentSum2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, segmentIds} = inputs; - const {numSegments} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, segmentIds } = inputs; + const { numSegments } = attrs; assertNotComplex(x, "unsortedSegmentSum"); const xRank = x.shape.length; const segmentIdsRank = segmentIds.shape.length; @@ -41433,17 +41433,17 @@ function unsortedSegmentSum2(args) { const numIters = xRank - segmentIdsRank; let $segmentIds = segmentIds; for (let i = 0; i < numIters; ++i) { - const expanded = expandDims3({inputs: {input: $segmentIds}, backend: backend2, attrs: {dim: i + 1}}); + const expanded = expandDims3({ inputs: { input: $segmentIds }, backend: backend2, attrs: { dim: i + 1 } }); $segmentIds = expanded; intermediates.push(expanded); } for (let i = 0; i < numSegments; ++i) { const scalarValue = util_exports.createScalarValue(i, "int32"); const segmentId = backend2.makeTensorInfo([], "int32", scalarValue); - const mask = equal2({inputs: {a: segmentId, b: $segmentIds}, backend: backend2}); - const maskCasted = cast3({inputs: {x: mask}, backend: backend2, attrs: {dtype: "float32"}}); - const mul2 = multiply2({inputs: {a: maskCasted, b: x}, backend: backend2}); - const sumTensorInfo = sum3({inputs: {x: mul2}, backend: backend2, attrs: {axis: 0, keepDims: false}}); + const mask = equal2({ inputs: { a: segmentId, b: $segmentIds }, backend: backend2 }); + const maskCasted = cast3({ inputs: { x: mask }, backend: backend2, attrs: { dtype: "float32" } }); + const mul2 = multiply2({ inputs: { a: maskCasted, b: x }, backend: backend2 }); + const sumTensorInfo = sum3({ inputs: { x: mul2 }, backend: backend2, attrs: { axis: 0, keepDims: false } }); res.push(sumTensorInfo); intermediates.push(segmentId); intermediates.push(mask); @@ -41451,7 +41451,7 @@ function unsortedSegmentSum2(args) { intermediates.push(mul2); intermediates.push(sumTensorInfo); } - const result = pack({inputs: res, backend: backend2, attrs: {axis: 0}}); + const result = pack({ inputs: res, backend: backend2, attrs: { axis: 0 } }); intermediates.forEach((t) => backend2.disposeIntermediateTensorInfo(t)); return result; } @@ -42904,7 +42904,7 @@ var GPGPUContext = class { } else { isFencePassed = () => true; } - return {query, isFencePassed}; + return { query, isFencePassed }; } downloadMatrixFromPackedTexture(texture, physicalRows, physicalCols) { return this.downloadMatrixDriver(texture, () => downloadMatrixFromPackedOutputTexture(this.gl, physicalRows, physicalCols)); @@ -43082,13 +43082,13 @@ var GPGPUContext = class { pollItems() { const index = linearSearchLastTrue(this.itemsToPoll.map((x) => x.isDoneFn)); for (let i = 0; i <= index; ++i) { - const {resolveFn} = this.itemsToPoll[i]; + const { resolveFn } = this.itemsToPoll[i]; resolveFn(); } this.itemsToPoll = this.itemsToPoll.slice(index + 1); } addItemToPoll(isDoneFn, resolveFn) { - this.itemsToPoll.push({isDoneFn, resolveFn}); + this.itemsToPoll.push({ isDoneFn, resolveFn }); if (this.itemsToPoll.length > 1) { return; } @@ -43156,7 +43156,7 @@ function linearSearchLastTrue(arr) { } return i - 1; } -var {getBroadcastDims: getBroadcastDims2} = backend_util_exports; +var { getBroadcastDims: getBroadcastDims2 } = backend_util_exports; function makeShader(inputsInfo, outputShape, userCode, usesPackedTextures) { const prefixSnippets = []; inputsInfo.forEach((x) => { @@ -43771,7 +43771,7 @@ function getSampler2D(inputInfo) { } `; } - const {newShape, keptDims} = util_exports.squeezeShape(shape); + const { newShape, keptDims } = util_exports.squeezeShape(shape); const squeezedShape = newShape; if (squeezedShape.length < shape.length) { const newInputInfo = squeezeInputInfo(inputInfo, squeezedShape); @@ -43858,7 +43858,7 @@ function getSampler3D(inputInfo) { const funcName = "get" + texName.charAt(0).toUpperCase() + texName.slice(1); const stride0 = shape[1] * shape[2]; const stride1 = shape[2]; - const {newShape, keptDims} = util_exports.squeezeShape(shape); + const { newShape, keptDims } = util_exports.squeezeShape(shape); const squeezedShape = newShape; if (squeezedShape.length < shape.length) { const newInputInfo = squeezeInputInfo(inputInfo, squeezedShape); @@ -43950,7 +43950,7 @@ function getSampler4D(inputInfo) { const stride2 = shape[3]; const stride1 = shape[2] * stride2; const stride0 = shape[1] * stride1; - const {newShape, keptDims} = util_exports.squeezeShape(shape); + const { newShape, keptDims } = util_exports.squeezeShape(shape); if (newShape.length < shape.length) { const newInputInfo = squeezeInputInfo(inputInfo, newShape); const params = ["row", "col", "depth", "depth2"]; @@ -44018,7 +44018,7 @@ function getSampler5D(inputInfo) { const stride2 = shape[3] * stride3; const stride1 = shape[2] * stride2; const stride0 = shape[1] * stride1; - const {newShape, keptDims} = util_exports.squeezeShape(shape); + const { newShape, keptDims } = util_exports.squeezeShape(shape); if (newShape.length < shape.length) { const newInputInfo = squeezeInputInfo(inputInfo, newShape); const params = ["row", "col", "depth", "depth2", "depth3"]; @@ -44085,7 +44085,7 @@ function getSampler6D(inputInfo) { const shape = inputInfo.shapeInfo.logicalShape; const texName = inputInfo.name; const funcName = "get" + texName.charAt(0).toUpperCase() + texName.slice(1); - const {newShape, keptDims} = util_exports.squeezeShape(shape); + const { newShape, keptDims } = util_exports.squeezeShape(shape); if (newShape.length < shape.length) { const newInputInfo = squeezeInputInfo(inputInfo, newShape); const params = ["row", "col", "depth", "depth2", "depth3", "depth4"]; @@ -44318,7 +44318,7 @@ function compileProgram(gpgpu, program, inputs, output) { if (input2.texData != null && input2.texData.slice != null && input2.texData.slice.flatOffset > 0) { shapeInfo.flatOffset = input2.texData.slice.flatOffset; } - return {name: program.variableNames[i], shapeInfo}; + return { name: program.variableNames[i], shapeInfo }; }); const inShapeInfos = inputInfos.map((x) => x.shapeInfo); const outShapeInfo = { @@ -44434,7 +44434,7 @@ function makeShaderKey(program, inputs, output) { key += "_" + keyInputs + "_" + keyUserCode; return key; } -var {addImpl: addImplCPU, bincountImpl: bincountImplCPU, bincountReduceImpl: bincountReduceImplCPU, ceilImpl: ceilImplCPU, concatImpl: concatImplCPU, expImpl: expImplCPU, expm1Impl: expm1ImplCPU, floorImpl: floorImplCPU, gatherV2Impl: gatherV2ImplCPU, greaterImpl: greaterImplCPU, lessImpl: lessImplCPU, linSpaceImpl: linSpaceImplCPU, logImpl: logImplCPU, maxImpl: maxImplCPU, maximumImpl: maximumImplCPU, minimumImpl: minimumImplCPU, multiplyImpl: multiplyImplCPU, negImpl: negImplCPU, prodImpl: prodImplCPU, rangeImpl: rangeImplCPU, rsqrtImpl: rsqrtImplCPU, simpleAbsImpl: simpleAbsImplCPU, sliceImpl: sliceImplCPU, sparseFillEmptyRowsImpl: sparseFillEmptyRowsImplCPU, sparseReshapeImpl: sparseReshapeImplCPU, stridedSliceImpl: stridedSliceImplCPU, subImpl: subImplCPU, tileImpl: tileImplCPU, topKImpl: topKImplCPU, transposeImpl: transposeImplCPU, uniqueImpl: uniqueImplCPU} = shared_exports; +var { addImpl: addImplCPU, bincountImpl: bincountImplCPU, bincountReduceImpl: bincountReduceImplCPU, ceilImpl: ceilImplCPU, concatImpl: concatImplCPU, expImpl: expImplCPU, expm1Impl: expm1ImplCPU, floorImpl: floorImplCPU, gatherV2Impl: gatherV2ImplCPU, greaterImpl: greaterImplCPU, lessImpl: lessImplCPU, linSpaceImpl: linSpaceImplCPU, logImpl: logImplCPU, maxImpl: maxImplCPU, maximumImpl: maximumImplCPU, minimumImpl: minimumImplCPU, multiplyImpl: multiplyImplCPU, negImpl: negImplCPU, prodImpl: prodImplCPU, rangeImpl: rangeImplCPU, rsqrtImpl: rsqrtImplCPU, simpleAbsImpl: simpleAbsImplCPU, sliceImpl: sliceImplCPU, sparseFillEmptyRowsImpl: sparseFillEmptyRowsImplCPU, sparseReshapeImpl: sparseReshapeImplCPU, stridedSliceImpl: stridedSliceImplCPU, subImpl: subImplCPU, tileImpl: tileImplCPU, topKImpl: topKImplCPU, transposeImpl: transposeImplCPU, uniqueImpl: uniqueImplCPU } = shared_exports; function getVecChannels(name, rank) { return ["x", "y", "z", "w", "u", "v"].slice(0, rank).map((d) => `${name}.${d}`); } @@ -44962,8 +44962,8 @@ var MathBackendWebGL = class extends KernelBackend { if (dtype === "complex64" && values != null) { throw new Error(`Cannot write to a complex64 dtype. Please use tf.complex(real, imag).`); } - const dataId = {id: this.nextDataId()}; - this.texData.set(dataId, {shape, dtype, values, usage: TextureUsage.UPLOAD, refCount: 1}); + const dataId = { id: this.nextDataId() }; + this.texData.set(dataId, { shape, dtype, values, usage: TextureUsage.UPLOAD, refCount: 1 }); return dataId; } refCount(dataId) { @@ -44990,14 +44990,14 @@ var MathBackendWebGL = class extends KernelBackend { if (dtype === "complex64") { throw new Error(`Cannot write to a complex64 dtype. Please use tf.complex(real, imag).`); } - this.texData.set(dataId, {shape, dtype, values, usage: TextureUsage.UPLOAD, refCount}); + this.texData.set(dataId, { shape, dtype, values, usage: TextureUsage.UPLOAD, refCount }); } disposeIntermediateTensorInfo(tensorInfo) { this.disposeData(tensorInfo.dataId); } readSync(dataId) { const texData = this.texData.get(dataId); - const {values, dtype, complexTensorInfos, slice: slice5, shape, isPacked} = texData; + const { values, dtype, complexTensorInfos, slice: slice5, shape, isPacked } = texData; if (slice5 != null) { let program; if (isPacked) { @@ -45005,7 +45005,7 @@ var MathBackendWebGL = class extends KernelBackend { } else { program = new UnaryOpProgram(shape, CLONE); } - const res = this.runWebGLProgram(program, [{dataId, shape, dtype}], dtype); + const res = this.runWebGLProgram(program, [{ dataId, shape, dtype }], dtype); const data = this.readSync(res.dataId); this.disposeIntermediateTensorInfo(res); return data; @@ -45040,7 +45040,7 @@ var MathBackendWebGL = class extends KernelBackend { return new Promise((resolve) => subscribers2.push(resolve)); } const texData = this.texData.get(dataId); - const {values, shape, slice: slice5, dtype, complexTensorInfos, isPacked} = texData; + const { values, shape, slice: slice5, dtype, complexTensorInfos, isPacked } = texData; if (slice5 != null) { let program; if (isPacked) { @@ -45048,7 +45048,7 @@ var MathBackendWebGL = class extends KernelBackend { } else { program = new UnaryOpProgram(shape, CLONE); } - const res = this.runWebGLProgram(program, [{dataId, shape, dtype}], dtype); + const res = this.runWebGLProgram(program, [{ dataId, shape, dtype }], dtype); const data = this.read(res.dataId); this.disposeIntermediateTensorInfo(res); return data; @@ -45128,7 +45128,7 @@ var MathBackendWebGL = class extends KernelBackend { } } getValuesFromTexture(dataId) { - const {shape, dtype, isPacked} = this.texData.get(dataId); + const { shape, dtype, isPacked } = this.texData.get(dataId); const size = util_exports.sizeFromShape(shape); if (env().getBool("WEBGL_DOWNLOAD_FLOAT_ENABLED")) { const tmpTarget = this.decode(dataId); @@ -45140,7 +45140,7 @@ var MathBackendWebGL = class extends KernelBackend { const shouldUsePackedProgram = env().getBool("WEBGL_PACK") && isPacked === true; const outputShape = shouldUsePackedProgram ? getShapeAs3D(shape) : shape; const program = shouldUsePackedProgram ? new EncodeFloatPackedProgram(outputShape) : new EncodeFloatProgram(outputShape); - const output = this.runWebGLProgram(program, [{shape: outputShape, dtype, dataId}], "float32"); + const output = this.runWebGLProgram(program, [{ shape: outputShape, dtype, dataId }], "float32"); const tmpData = this.texData.get(output.dataId); const vals = this.gpgpu.downloadByteEncodedFloatMatrixFromOutputTexture(tmpData.texture, tmpData.texShape[0], tmpData.texShape[1]).subarray(0, size); this.disposeIntermediateTensorInfo(output); @@ -45176,7 +45176,7 @@ var MathBackendWebGL = class extends KernelBackend { if (env().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE") > 0) { const kernelMs = await Promise.all(flattenedActiveTimerQueries); res["kernelMs"] = util_exports.sum(kernelMs); - res["getExtraProfileInfo"] = () => kernelMs.map((d, i) => ({name: flattenedActiveTimerNames[i], ms: d})).map((d) => `${d.name}: ${d.ms}`).join(", "); + res["getExtraProfileInfo"] = () => kernelMs.map((d, i) => ({ name: flattenedActiveTimerNames[i], ms: d })).map((d) => `${d.name}: ${d.ms}`).join(", "); } else { res["kernelMs"] = { error: "WebGL query timers are not supported in this environment." @@ -45198,7 +45198,7 @@ var MathBackendWebGL = class extends KernelBackend { if (env().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE") > 0) { return this.gpgpu.beginQuery(); } - return {startMs: util_exports.now(), endMs: null}; + return { startMs: util_exports.now(), endMs: null }; } endTimer(query) { if (env().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE") > 0) { @@ -45236,7 +45236,7 @@ var MathBackendWebGL = class extends KernelBackend { return false; } this.releaseGPUData(dataId); - const {complexTensorInfos} = this.texData.get(dataId); + const { complexTensorInfos } = this.texData.get(dataId); if (complexTensorInfos != null) { this.disposeData(complexTensorInfos.real.dataId, force); this.disposeData(complexTensorInfos.imag.dataId, force); @@ -45245,7 +45245,7 @@ var MathBackendWebGL = class extends KernelBackend { return true; } releaseGPUData(dataId) { - const {texture, dtype, texShape, usage, isPacked, slice: slice5} = this.texData.get(dataId); + const { texture, dtype, texShape, usage, isPacked, slice: slice5 } = this.texData.get(dataId); const key = slice5 && slice5.origDataId || dataId; const refCount = this.dataRefCount.get(key); if (refCount > 1) { @@ -45307,10 +45307,10 @@ var MathBackendWebGL = class extends KernelBackend { dataId = this.write(values, shape, dtype); } this.texData.get(dataId).usage = null; - return {dataId, shape, dtype}; + return { dataId, shape, dtype }; } makeOutput(shape, dtype, values) { - const {dataId} = this.makeTensorInfo(shape, dtype, values); + const { dataId } = this.makeTensorInfo(shape, dtype, values); return engine().makeTensorFromDataId(dataId, shape, dtype, this); } unpackTensor(input2) { @@ -45339,11 +45339,11 @@ var MathBackendWebGL = class extends KernelBackend { const program = new ReshapePackedProgram(afterShapeAs3D, input3DShape); const preventEagerUnpackingOfOutput = true; const output = this.runWebGLProgram(program, [input3D], input2.dtype, null, preventEagerUnpackingOfOutput); - return {dataId: output.dataId, shape: afterShape, dtype: output.dtype}; + return { dataId: output.dataId, shape: afterShape, dtype: output.dtype }; } decode(dataId) { const texData = this.texData.get(dataId); - const {isPacked, shape, dtype} = texData; + const { isPacked, shape, dtype } = texData; const shapeAs3D = getShapeAs3D(shape); let program; if (isPacked) { @@ -45352,8 +45352,8 @@ var MathBackendWebGL = class extends KernelBackend { program = new DecodeMatrixProgram(shapeAs3D); } const preventEagerUnpackingOfOutput = true; - const out = this.runWebGLProgram(program, [{shape: shapeAs3D, dtype, dataId}], dtype, null, preventEagerUnpackingOfOutput); - return {dtype, shape, dataId: out.dataId}; + const out = this.runWebGLProgram(program, [{ shape: shapeAs3D, dtype, dataId }], dtype, null, preventEagerUnpackingOfOutput); + return { dtype, shape, dataId: out.dataId }; } runWebGLProgram(program, inputs, outputDtype, customSetup, preventEagerUnpackingOfOutput = false) { const output = this.makeTensorInfo(program.outputShape, outputDtype); @@ -45405,10 +45405,10 @@ var MathBackendWebGL = class extends KernelBackend { savedInput.shape = targetShape; } this.uploadToGPU(input2.dataId); - return {shape: input2.shape, texData, isUniform: false}; + return { shape: input2.shape, texData, isUniform: false }; }); this.uploadToGPU(output.dataId); - const outputData = {shape: output.shape, texData: outData, isUniform: false}; + const outputData = { shape: output.shape, texData: outData, isUniform: false }; const key = makeShaderKey(program, inputsData, outputData); const binary = this.getAndSaveBinary(key, () => { return compileProgram(this.gpgpu, program, inputsData, outputData); @@ -45422,7 +45422,7 @@ var MathBackendWebGL = class extends KernelBackend { dataToDispose.forEach((info) => this.disposeIntermediateTensorInfo(info)); if (shouldTimeProgram) { query = this.endTimer(query); - this.activeTimers.push({name: program.constructor.name, query: this.getQueryTime(query)}); + this.activeTimers.push({ name: program.constructor.name, query: this.getQueryTime(query) }); } const glFlushThreshold = env().get("WEBGL_FLUSH_THRESHOLD"); if (glFlushThreshold > 0) { @@ -45498,7 +45498,7 @@ var MathBackendWebGL = class extends KernelBackend { } uploadToGPU(dataId) { const texData = this.texData.get(dataId); - const {shape, dtype, values, texture, usage, isPacked} = texData; + const { shape, dtype, values, texture, usage, isPacked } = texData; if (texture != null) { return; } @@ -45550,7 +45550,7 @@ var MathBackendWebGL = class extends KernelBackend { } convertAndCacheOnCPU(dataId, float32Values) { const texData = this.texData.get(dataId); - const {dtype} = texData; + const { dtype } = texData; this.releaseGPUData(dataId); if (float32Values != null) { texData.values = float32ToTypedArray(float32Values, dtype); @@ -45674,10 +45674,10 @@ var BinaryOpPackedProgram = class { } }; function identity3(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; backend2.incRef(x.dataId); - return {dataId: x.dataId, shape: x.shape, dtype: x.dtype}; + return { dataId: x.dataId, shape: x.shape, dtype: x.dtype }; } var identityConfig2 = { kernelName: Identity, @@ -45685,13 +45685,13 @@ var identityConfig2 = { kernelFunc: identity3 }; function complex3(args) { - const {inputs, backend: backend2} = args; - const {real: real4, imag: imag4} = inputs; + const { inputs, backend: backend2 } = args; + const { real: real4, imag: imag4 } = inputs; const complexInfo = backend2.makeTensorInfo(real4.shape, "complex64"); const complex4 = backend2.texData.get(complexInfo.dataId); - const realTensorInfo = identity3({inputs: {x: real4}, backend: backend2}); - const imagTensorInfo = identity3({inputs: {x: imag4}, backend: backend2}); - complex4.complexTensorInfos = {real: realTensorInfo, imag: imagTensorInfo}; + const realTensorInfo = identity3({ inputs: { x: real4 }, backend: backend2 }); + const imagTensorInfo = identity3({ inputs: { x: imag4 }, backend: backend2 }); + complex4.complexTensorInfos = { real: realTensorInfo, imag: imagTensorInfo }; return complexInfo; } var complexConfig2 = { @@ -45705,9 +45705,9 @@ var LEAKYRELU_PACKED = ` return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a); `; function leakyRelu3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {alpha} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { alpha } = attrs; const $alpha = backend2.makeTensorInfo([], "float32", util_exports.createScalarValue(alpha, "float32")); const program = env().getBool("WEBGL_PACK_BINARY_OPERATIONS") ? new BinaryOpPackedProgram(LEAKYRELU_PACKED, x.shape, $alpha.shape) : new BinaryOpProgram(LEAKYRELU, x.shape, $alpha.shape); const result = backend2.runWebGLProgram(program, [x, $alpha], x.dtype); @@ -45725,8 +45725,8 @@ var PRELU_PACKED = ` return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a); `; function prelu4(args) { - const {inputs, backend: backend2} = args; - const {x, alpha} = inputs; + const { inputs, backend: backend2 } = args; + const { x, alpha } = inputs; const program = env().getBool("WEBGL_PACK_BINARY_OPERATIONS") ? new BinaryOpPackedProgram(PRELU_PACKED, x.shape, alpha.shape) : new BinaryOpProgram(PRELU, x.shape, alpha.shape); return backend2.runWebGLProgram(program, [x, alpha], x.dtype); } @@ -45746,9 +45746,9 @@ var CHECK_NAN_SNIPPET_BINARY_PACKED = ` result.b = isNaN.b > 0. ? NAN : result.b; result.a = isNaN.a > 0. ? NAN : result.a; `; -function unaryKernelFunc2({opSnippet, packedOpSnippet, cpuKernelImpl, dtype}) { - return ({inputs, backend: backend2}) => { - const {x} = inputs; +function unaryKernelFunc2({ opSnippet, packedOpSnippet, cpuKernelImpl, dtype }) { + return ({ inputs, backend: backend2 }) => { + const { x } = inputs; const webglBackend = backend2; const $dtype = dtype || x.dtype; if (webglBackend.shouldExecuteOnCPU([x]) && cpuKernelImpl != null) { @@ -45766,9 +45766,9 @@ function unaryKernelFunc2({opSnippet, packedOpSnippet, cpuKernelImpl, dtype}) { return webglBackend.runWebGLProgram(program, [x], $dtype); }; } -function binaryKernelFunc2({opSnippet, packedOpSnippet, checkOutOfBounds = false, supportsComplex = false, cpuKernelImpl, dtype}) { - return ({inputs, backend: backend2}) => { - const {a, b} = inputs; +function binaryKernelFunc2({ opSnippet, packedOpSnippet, checkOutOfBounds = false, supportsComplex = false, cpuKernelImpl, dtype }) { + return ({ inputs, backend: backend2 }) => { + const { a, b } = inputs; const webglBackend = backend2; if (supportsComplex && a.dtype === "complex64") { const aData = webglBackend.texData.get(a.dataId); @@ -45791,7 +45791,7 @@ function binaryKernelFunc2({opSnippet, packedOpSnippet, checkOutOfBounds = false const program2 = new BinaryOpProgram(opSnippet, a.shape, b.shape); return webglBackend.runWebGLProgram(program2, [aHandle, bHandle], upcastType(aPart.dtype, bPart.dtype)); }); - const complexOutput = complex3({inputs: {real: real4, imag: imag4}, backend: webglBackend}); + const complexOutput = complex3({ inputs: { real: real4, imag: imag4 }, backend: webglBackend }); webglBackend.disposeIntermediateTensorInfo(real4); webglBackend.disposeIntermediateTensorInfo(imag4); return complexOutput; @@ -45963,8 +45963,8 @@ var BinaryOpComplexProgram = class { }; var MUL = "return a * b;"; function multiply3(args) { - const {inputs, backend: backend2} = args; - const {a, b} = inputs; + const { inputs, backend: backend2 } = args; + const { a, b } = inputs; const dtype = backend_util_exports.upcastType(a.dtype, b.dtype); if (a.dtype === "complex64") { const aData = backend2.texData.get(a.dataId); @@ -45995,7 +45995,7 @@ function multiply3(args) { ]; const realPart = backend2.runWebGLProgram(realProgram, inputs2, "float32"); const imagPart = backend2.runWebGLProgram(imagProgram, inputs2, "float32"); - const complexOutput = complex3({inputs: {real: realPart, imag: imagPart}, backend: backend2}); + const complexOutput = complex3({ inputs: { real: realPart, imag: imagPart }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(realPart); backend2.disposeIntermediateTensorInfo(imagPart); return complexOutput; @@ -46039,12 +46039,12 @@ function packedReshape(input2, afterShape, backend2) { const program = new ReshapePackedProgram(afterShapeAs3D, input3DShape); const preventEagerUnpackingOfOutput = true; const output = backend2.runWebGLProgram(program, [input3D], input2.dtype, null, preventEagerUnpackingOfOutput); - return {dataId: output.dataId, shape: afterShape, dtype: output.dtype}; + return { dataId: output.dataId, shape: afterShape, dtype: output.dtype }; } function reshape4(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {shape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { shape } = attrs; const webglBackend = backend2; const xSize = util_exports.sizeFromShape(x.shape); const $shape = util_exports.inferFromImplicitShape(shape, xSize); @@ -46055,7 +46055,7 @@ function reshape4(args) { return packedReshape(x, $shape, webglBackend); } webglBackend.incRef(x.dataId); - return {dataId: x.dataId, shape: $shape, dtype: x.dtype}; + return { dataId: x.dataId, shape: $shape, dtype: x.dtype }; } var reshapeConfig2 = { kernelName: Reshape, @@ -46065,7 +46065,7 @@ var reshapeConfig2 = { var MeanProgram = class { constructor(reduceInfo, divisor) { this.variableNames = ["x"]; - const {windowSize, batchSize, inSize, outSize} = reduceInfo; + const { windowSize, batchSize, inSize, outSize } = reduceInfo; this.outputShape = [batchSize, outSize]; const windowSizeNearestVec4 = Math.floor(windowSize / 4) * 4; const windowSizeVec4Remainder = windowSize % 4; @@ -46137,7 +46137,7 @@ var MeanProgram = class { var ReduceProgram = class { constructor(reduceInfo, reduceType) { this.variableNames = ["x"]; - const {windowSize, batchSize, inSize, outSize} = reduceInfo; + const { windowSize, batchSize, inSize, outSize } = reduceInfo; this.outputShape = [batchSize, outSize]; let initializationValue = "0.0"; let compareOp = ``; @@ -46282,13 +46282,13 @@ function reduce(x, dtype, reductionType, backend2) { const reductionStages = getReductionStages(x.shape); let result = x; for (let i = 0; i < reductionStages.length; i++) { - const {inSize, windowSize, outSize} = reductionStages[i]; + const { inSize, windowSize, outSize } = reductionStages[i]; let program; let previousResult; if (reductionType === "mean") { - program = i === 0 ? new MeanProgram({windowSize, inSize, batchSize: x.shape[0], outSize}, inSize) : new MeanProgram({windowSize, inSize, batchSize: x.shape[0], outSize}); + program = i === 0 ? new MeanProgram({ windowSize, inSize, batchSize: x.shape[0], outSize }, inSize) : new MeanProgram({ windowSize, inSize, batchSize: x.shape[0], outSize }); } else { - program = new ReduceProgram({windowSize, inSize, batchSize: x.shape[0], outSize}, reductionType); + program = new ReduceProgram({ windowSize, inSize, batchSize: x.shape[0], outSize }, reductionType); } previousResult = result; result = backend2.runWebGLProgram(program, [result], dtype); @@ -46397,10 +46397,10 @@ function sumImpl(x, axis, keepDims, backend2) { const inSize = util_exports.sizeFromShape(reduceShape); const xSize = util_exports.sizeFromShape(x.shape); const batchSize = xSize / inSize; - const reshapedInput = reshape4({inputs: {x: sumInput}, attrs: {shape: [batchSize, inSize]}, backend: backend2}); + const reshapedInput = reshape4({ inputs: { x: sumInput }, attrs: { shape: [batchSize, inSize] }, backend: backend2 }); const outType = sumOutType(x.dtype); const reduced = reduce(reshapedInput, outType, "sum", backend2); - const out = reshape4({inputs: {x: reduced}, attrs: {shape: outShape}, backend: backend2}); + const out = reshape4({ inputs: { x: reduced }, attrs: { shape: outShape }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(reshapedInput); backend2.disposeIntermediateTensorInfo(reduced); if (sumInputIsTransposed) { @@ -46409,9 +46409,9 @@ function sumImpl(x, axis, keepDims, backend2) { return out; } function sum4(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; return sumImpl(x, axis, keepDims, backend2); } var sumConfig2 = { @@ -46420,9 +46420,9 @@ var sumConfig2 = { kernelFunc: sum4 }; function transpose3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {perm} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { perm } = attrs; const webglBackend = backend2; const xRank = x.shape.length; const newShape = new Array(xRank); @@ -46448,7 +46448,7 @@ var transposeConfig2 = { kernelFunc: transpose3 }; var MATMUL_SHARED_DIM_THRESHOLD = 1e3; -function batchMatMulImpl({a, b, transposeA, transposeB, backend: backend2, bias = null, preluActivationWeights = null, leakyreluAlpha = 0, activation: activation2 = null}) { +function batchMatMulImpl({ a, b, transposeA, transposeB, backend: backend2, bias = null, preluActivationWeights = null, leakyreluAlpha = 0, activation: activation2 = null }) { const aRank = a.shape.length; const bRank = b.shape.length; const innerShapeA = transposeA ? a.shape[aRank - 2] : a.shape[aRank - 1]; @@ -46466,8 +46466,8 @@ function batchMatMulImpl({a, b, transposeA, transposeB, backend: backend2, bias util_exports.assert(innerShapeA === innerShapeB, () => `Error in matMul: inner shapes (${innerShapeA}) and (${innerShapeB}) of Tensors with shapes ${a.shape} and ${b.shape} and transposeA=${transposeA} and transposeB=${transposeB} must match.`); const a3dShape = transposeA ? [batchDimA, innerShapeA, outerShapeA] : [batchDimA, outerShapeA, innerShapeA]; const b3dShape = transposeB ? [batchDimB, outerShapeB, innerShapeB] : [batchDimB, innerShapeB, outerShapeB]; - const a3d = reshape4({inputs: {x: a}, backend: backend2, attrs: {shape: a3dShape}}); - const b3d = reshape4({inputs: {x: b}, backend: backend2, attrs: {shape: b3dShape}}); + const a3d = reshape4({ inputs: { x: a }, backend: backend2, attrs: { shape: a3dShape } }); + const b3d = reshape4({ inputs: { x: b }, backend: backend2, attrs: { shape: b3dShape } }); const intermediates = [a3d, b3d]; const batchDim = Math.max(batchDimA, batchDimB); const sharedDim = transposeA ? a3d.shape[1] : a3d.shape[2]; @@ -46481,11 +46481,11 @@ function batchMatMulImpl({a, b, transposeA, transposeB, backend: backend2, bias let aVec = a3d; let bVec = b3d; if (transposeA) { - aVec = transpose3({inputs: {x: a3d}, backend: backend2, attrs: {perm: [0, 2, 1]}}); + aVec = transpose3({ inputs: { x: a3d }, backend: backend2, attrs: { perm: [0, 2, 1] } }); intermediates.push(aVec); } if (transposeB) { - bVec = transpose3({inputs: {x: b3d}, backend: backend2, attrs: {perm: [0, 2, 1]}}); + bVec = transpose3({ inputs: { x: b3d }, backend: backend2, attrs: { perm: [0, 2, 1] } }); intermediates.push(bVec); } const shouldReshapeA = outerShapeB !== 1; @@ -46493,9 +46493,9 @@ function batchMatMulImpl({a, b, transposeA, transposeB, backend: backend2, bias let aVec3d = aVec; if (shouldReshapeA) { aVec3d = reshape4({ - inputs: {x: aVec}, + inputs: { x: aVec }, backend: backend2, - attrs: {shape: [batchDim, sharedDim, 1]} + attrs: { shape: [batchDim, sharedDim, 1] } }); intermediates.push(aVec3d); } @@ -46503,14 +46503,14 @@ function batchMatMulImpl({a, b, transposeA, transposeB, backend: backend2, bias let bVec3d = bVec; if (shouldReshapeB) { bVec3d = reshape4({ - inputs: {x: bVec}, + inputs: { x: bVec }, backend: backend2, - attrs: {shape: [batchDim, 1, sharedDim]} + attrs: { shape: [batchDim, 1, sharedDim] } }); intermediates.push(bVec3d); } - const product = multiply3({inputs: {a: aVec3d, b: bVec3d}, backend: backend2}); - out = sum4({inputs: {x: product}, backend: backend2, attrs: {axis, keepDims: true}}); + const product = multiply3({ inputs: { a: aVec3d, b: bVec3d }, backend: backend2 }); + out = sum4({ inputs: { x: product }, backend: backend2, attrs: { axis, keepDims: true } }); intermediates.push(product); } else { const dtype = upcastType(a.dtype, b.dtype); @@ -46529,7 +46529,7 @@ function batchMatMulImpl({a, b, transposeA, transposeB, backend: backend2, bias } out = backend2.runWebGLProgram(program, inputs, dtype); } - const outReshaped = reshape4({inputs: {x: out}, backend: backend2, attrs: {shape: outShape}}); + const outReshaped = reshape4({ inputs: { x: out }, backend: backend2, attrs: { shape: outShape } }); intermediates.push(out); for (const i of intermediates) { backend2.disposeIntermediateTensorInfo(i); @@ -46537,9 +46537,9 @@ function batchMatMulImpl({a, b, transposeA, transposeB, backend: backend2, bias return outReshaped; } function _fusedMatMul2(args) { - const {inputs, backend: backend2, attrs} = args; - const {a, b, bias, preluActivationWeights} = inputs; - const {transposeA, transposeB, activation: activation2, leakyreluAlpha} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { a, b, bias, preluActivationWeights } = inputs; + const { transposeA, transposeB, activation: activation2, leakyreluAlpha } = attrs; return batchMatMulImpl({ a, b, @@ -46559,8 +46559,8 @@ var _fusedMatMulConfig2 = { }; var ABS2 = `return abs(x);`; function abs3(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; if (backend2.shouldExecuteOnCPU([x]) && x.dtype !== "complex64") { const xData = backend2.texData.get(x.dataId); const outValues = simpleAbsImplCPU(xData.values); @@ -46585,7 +46585,7 @@ var ACOS = CHECK_NAN_SNIPPET + ` } return acos(x); `; -var acos3 = unaryKernelFunc2({opSnippet: ACOS}); +var acos3 = unaryKernelFunc2({ opSnippet: ACOS }); var acosConfig2 = { kernelName: Acos, backendName: "webgl", @@ -46594,7 +46594,7 @@ var acosConfig2 = { var ACOSH = CHECK_NAN_SNIPPET + ` if (x < 1.0) return NAN; return log(x + sqrt(x * x - 1.0));`; -var acosh3 = unaryKernelFunc2({opSnippet: ACOSH}); +var acosh3 = unaryKernelFunc2({ opSnippet: ACOSH }); var acoshConfig2 = { kernelName: Acosh, backendName: "webgl", @@ -46659,16 +46659,16 @@ var AddNPackedProgram = class { } }; function addN3(args) { - const {inputs, backend: backend2} = args; + const { inputs, backend: backend2 } = args; const tensors = inputs; if (tensors.length === 1) { - return identity3({inputs: {x: tensors[0]}, backend: backend2}); + return identity3({ inputs: { x: tensors[0] }, backend: backend2 }); } if (tensors.length > env().get("WEBGL_MAX_TEXTURES_IN_SHADER")) { const midIndex = Math.floor(tensors.length / 2); - const leftSide = addN3({inputs: tensors.slice(0, midIndex), backend: backend2}); - const rightSide = addN3({inputs: tensors.slice(midIndex), backend: backend2}); - return addN3({inputs: [leftSide, rightSide], backend: backend2}); + const leftSide = addN3({ inputs: tensors.slice(0, midIndex), backend: backend2 }); + const rightSide = addN3({ inputs: tensors.slice(midIndex), backend: backend2 }); + return addN3({ inputs: [leftSide, rightSide], backend: backend2 }); } const dtype = tensors.map((t) => t.dtype).reduce((d1, d2) => upcastType(d1, d2)); const shapes = tensors.map((t) => t.shape); @@ -46682,29 +46682,29 @@ var addNConfig2 = { kernelFunc: addN3 }; function all3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; const xRank = x.shape.length; const origAxes = util_exports.parseAxisParam(axis, x.shape); let axes = origAxes; const permutedAxes = backend_util_exports.getAxesPermutation(axes, xRank); let permutedX = x; if (permutedAxes != null) { - permutedX = transpose3({inputs: {x}, backend: backend2, attrs: {perm: permutedAxes}}); + permutedX = transpose3({ inputs: { x }, backend: backend2, attrs: { perm: permutedAxes } }); axes = backend_util_exports.getInnerMostAxes(axes.length, xRank); } backend_util_exports.assertAxesAreInnerMostDims("all", axes, xRank); const [outShape, reduceShape] = backend_util_exports.computeOutAndReduceShapes(permutedX.shape, axes); const inSize = util_exports.sizeFromShape(reduceShape); - const a2D = reshape4({inputs: {x: permutedX}, backend: backend2, attrs: {shape: [-1, inSize]}}); + const a2D = reshape4({ inputs: { x: permutedX }, backend: backend2, attrs: { shape: [-1, inSize] } }); const reduced = reduce(a2D, a2D.dtype, "all", backend2); let res; if (keepDims) { const newShape = backend_util_exports.expandShapeToKeepDim(outShape, origAxes); - res = reshape4({inputs: {x: reduced}, backend: backend2, attrs: {shape: newShape}}); + res = reshape4({ inputs: { x: reduced }, backend: backend2, attrs: { shape: newShape } }); } else { - res = reshape4({inputs: {x: reduced}, backend: backend2, attrs: {shape: outShape}}); + res = reshape4({ inputs: { x: reduced }, backend: backend2, attrs: { shape: outShape } }); } backend2.disposeIntermediateTensorInfo(a2D); backend2.disposeIntermediateTensorInfo(reduced); @@ -46719,29 +46719,29 @@ var allConfig2 = { kernelFunc: all3 }; function any3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; const xRank = x.shape.length; const origAxes = util_exports.parseAxisParam(axis, x.shape); let axes = origAxes; const permutedAxes = backend_util_exports.getAxesPermutation(axes, xRank); let permutedX = x; if (permutedAxes != null) { - permutedX = transpose3({inputs: {x}, backend: backend2, attrs: {perm: permutedAxes}}); + permutedX = transpose3({ inputs: { x }, backend: backend2, attrs: { perm: permutedAxes } }); axes = backend_util_exports.getInnerMostAxes(axes.length, xRank); } backend_util_exports.assertAxesAreInnerMostDims("any", axes, xRank); const [outShape, reduceShape] = backend_util_exports.computeOutAndReduceShapes(permutedX.shape, axes); const inSize = util_exports.sizeFromShape(reduceShape); - const a2D = reshape4({inputs: {x: permutedX}, backend: backend2, attrs: {shape: [-1, inSize]}}); + const a2D = reshape4({ inputs: { x: permutedX }, backend: backend2, attrs: { shape: [-1, inSize] } }); const reduced = reduce(a2D, a2D.dtype, "any", backend2); let res; if (keepDims) { const newShape = backend_util_exports.expandShapeToKeepDim(outShape, origAxes); - res = reshape4({inputs: {x: reduced}, backend: backend2, attrs: {shape: newShape}}); + res = reshape4({ inputs: { x: reduced }, backend: backend2, attrs: { shape: newShape } }); } else { - res = reshape4({inputs: {x: reduced}, backend: backend2, attrs: {shape: outShape}}); + res = reshape4({ inputs: { x: reduced }, backend: backend2, attrs: { shape: outShape } }); } backend2.disposeIntermediateTensorInfo(a2D); backend2.disposeIntermediateTensorInfo(reduced); @@ -46758,7 +46758,7 @@ var anyConfig2 = { var ArgMinMaxProgram = class { constructor(reduceInfo, op2, firstPass) { this.variableNames = ["A"]; - const {windowSize, batchSize, outSize} = reduceInfo; + const { windowSize, batchSize, outSize } = reduceInfo; if (!firstPass) { this.variableNames.push("bestIndicesA"); } @@ -46901,7 +46901,7 @@ function argReduce(backend2, x, reduceType, bestIndicesA = null) { inSize = bestIndicesA.shape[1]; } const windowSize = backend_util_exports.computeOptimalWindowSize(inSize); - const reduceInfo = {windowSize, inSize, batchSize, outSize: Math.ceil(inSize / windowSize)}; + const reduceInfo = { windowSize, inSize, batchSize, outSize: Math.ceil(inSize / windowSize) }; const program = new ArgMinMaxProgram(reduceInfo, reduceType, bestIndicesA == null); const inputs = [x]; if (bestIndicesA != null) { @@ -46936,26 +46936,26 @@ function argMinMaxReduce(backend2, x, axis, reduceType) { const intermediateTensorInfos = []; const [outShape, reduceShape] = backend_util_exports.computeOutAndReduceShapes(x.shape, axes); const inSize = util_exports.sizeFromShape(reduceShape); - const a2D = reshape4({inputs: {x}, backend: backend2, attrs: {shape: [-1, inSize]}}); + const a2D = reshape4({ inputs: { x }, backend: backend2, attrs: { shape: [-1, inSize] } }); intermediateTensorInfos.push(a2D); const reduced = argReduce(backend2, a2D, reduceType); intermediateTensorInfos.push(reduced); - const reshaped = reshape4({inputs: {x: reduced}, backend: backend2, attrs: {shape: outShape}}); + const reshaped = reshape4({ inputs: { x: reduced }, backend: backend2, attrs: { shape: outShape } }); intermediateTensorInfos.forEach((t) => backend2.disposeIntermediateTensorInfo(t)); return reshaped; } return argReducePacked(backend2, x, reduceType); } function argMax3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis } = attrs; let axes = util_exports.parseAxisParam(axis, x.shape); const permutedAxes = backend_util_exports.getAxesPermutation(axes, x.shape.length); let $x = x; const intermediateTensorInfos = []; if (permutedAxes != null) { - $x = transpose3({inputs: {x}, backend: backend2, attrs: {perm: permutedAxes}}); + $x = transpose3({ inputs: { x }, backend: backend2, attrs: { perm: permutedAxes } }); intermediateTensorInfos.push($x); axes = backend_util_exports.getInnerMostAxes(axes.length, $x.shape.length); } @@ -46970,15 +46970,15 @@ var argMaxConfig2 = { kernelFunc: argMax3 }; function argMin3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis } = attrs; let axes = util_exports.parseAxisParam(axis, x.shape); const permutedAxes = backend_util_exports.getAxesPermutation(axes, x.shape.length); let $x = x; const intermediateTensorInfos = []; if (permutedAxes != null) { - $x = transpose3({inputs: {x}, backend: backend2, attrs: {perm: permutedAxes}}); + $x = transpose3({ inputs: { x }, backend: backend2, attrs: { perm: permutedAxes } }); intermediateTensorInfos.push($x); axes = backend_util_exports.getInnerMostAxes(axes.length, $x.shape.length); } @@ -46998,14 +46998,14 @@ var ASIN = CHECK_NAN_SNIPPET + ` } return asin(x); `; -var asin3 = unaryKernelFunc2({opSnippet: ASIN}); +var asin3 = unaryKernelFunc2({ opSnippet: ASIN }); var asinConfig2 = { kernelName: Asin, backendName: "webgl", kernelFunc: asin3 }; var ASINH = CHECK_NAN_SNIPPET + `return log(x + sqrt(x * x + 1.0));`; -var asinh3 = unaryKernelFunc2({opSnippet: ASINH}); +var asinh3 = unaryKernelFunc2({ opSnippet: ASINH }); var asinhConfig2 = { kernelName: Asinh, backendName: "webgl", @@ -47014,7 +47014,7 @@ var asinhConfig2 = { var ATAN = CHECK_NAN_SNIPPET + ` return atan(x); `; -var atan4 = unaryKernelFunc2({opSnippet: ATAN}); +var atan4 = unaryKernelFunc2({ opSnippet: ATAN }); var atanConfig2 = { kernelName: Atan, backendName: "webgl", @@ -47029,7 +47029,7 @@ var ATAN2_PACKED = ` ` + CHECK_NAN_SNIPPET_BINARY_PACKED + ` return result; `; -var atan23 = binaryKernelFunc2({opSnippet: ATAN2, packedOpSnippet: ATAN2_PACKED}); +var atan23 = binaryKernelFunc2({ opSnippet: ATAN2, packedOpSnippet: ATAN2_PACKED }); var atan2Config2 = { kernelName: Atan2, backendName: "webgl", @@ -47038,7 +47038,7 @@ var atan2Config2 = { var ATANH = CHECK_NAN_SNIPPET + ` if ((x < -1.0) || (x > 1.0)) return NAN; return (log(1.0 + x) - log(1.0 - x)) / 2.0;`; -var atanh3 = unaryKernelFunc2({opSnippet: ATANH}); +var atanh3 = unaryKernelFunc2({ opSnippet: ATANH }); var atanhConfig2 = { kernelName: Atanh, backendName: "webgl", @@ -47429,15 +47429,15 @@ var Pool3DProgram = class { } }; function avgPool3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; assertNotComplex2(x, "avgPool"); - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; const dilations = 1; util_exports.assert(backend_util_exports.eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in avgPool: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); const convInfo = backend_util_exports.computePool2DInfo(x.shape, filterSize, strides, dilations, pad3, dimRoundingMode); if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 && util_exports.arraysEqual(convInfo.inShape, convInfo.outShape)) { - return identity3({inputs: {x}, backend: backend2}); + return identity3({ inputs: { x }, backend: backend2 }); } const avgPoolProgram = new Pool2DProgram(convInfo, "avg", false); return backend2.runWebGLProgram(avgPoolProgram, [x], "float32"); @@ -47448,9 +47448,9 @@ var avgPoolConfig2 = { kernelFunc: avgPool3 }; function avgPool3D2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {filterSize, strides, pad: pad3, dimRoundingMode, dataFormat} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { filterSize, strides, pad: pad3, dimRoundingMode, dataFormat } = attrs; const dilations = [1, 1, 1]; const convInfo = backend_util_exports.computePool3DInfo(x.shape, filterSize, strides, dilations, pad3, dimRoundingMode, dataFormat); const avgPoolProgram = new Pool3DProgram(convInfo, "avg", false); @@ -47601,10 +47601,10 @@ var AvgPool3DBackpropProgram = class { } }; function avgPool3DGrad2(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, input: input2} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { dy, input: input2 } = inputs; const x = input2; - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; const dilations = [1, 1, 1]; const convInfo = backend_util_exports.computePool3DInfo(x.shape, filterSize, strides, dilations, pad3, dimRoundingMode); const avgPoolBackpropProgram = new AvgPool3DBackpropProgram(convInfo); @@ -47616,11 +47616,11 @@ var avgPoolGrad3DConfig = { kernelFunc: avgPool3DGrad2 }; function avgPoolGrad3(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, input: input2} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { dy, input: input2 } = inputs; const x = input2; assertNotComplex2([dy, input2], "avgPoolGrad"); - const {filterSize, strides, pad: pad3} = attrs; + const { filterSize, strides, pad: pad3 } = attrs; const convInfo = backend_util_exports.computePool2DInfo(x.shape, filterSize, strides, 1, pad3); const avgPoolBackpropProgram = new AvgPool2DBackpropProgram(convInfo); return backend2.runWebGLProgram(avgPoolBackpropProgram, [dy], x.dtype); @@ -47631,10 +47631,10 @@ var avgPoolGradConfig3 = { kernelFunc: avgPoolGrad3 }; function batchMatMul2(args) { - const {inputs, backend: backend2, attrs} = args; - const {a, b} = inputs; - const {transposeA, transposeB} = attrs; - return batchMatMulImpl({a, b, transposeA, transposeB, backend: backend2}); + const { inputs, backend: backend2, attrs } = args; + const { a, b } = inputs; + const { transposeA, transposeB } = attrs; + return batchMatMulImpl({ a, b, transposeA, transposeB, backend: backend2 }); } var batchMatMulConfig2 = { kernelName: BatchMatMul, @@ -47709,12 +47709,12 @@ var BatchNormPackedProgram = class { `; } }; -var batchNorm3 = ({inputs, backend: backend2, attrs}) => { - const {x, mean: mean4, variance, offset, scale: scale22} = inputs; +var batchNorm3 = ({ inputs, backend: backend2, attrs }) => { + const { x, mean: mean4, variance, offset, scale: scale22 } = inputs; util_exports.assert(mean4.shape.length === variance.shape.length, () => "Batch normalization gradient requires mean and variance to have equal ranks."); util_exports.assert(offset == null || mean4.shape.length === offset.shape.length, () => "Batch normalization gradient requires mean and offset to have equal ranks."); util_exports.assert(scale22 == null || mean4.shape.length === scale22.shape.length, () => "Batch normalization gradient requires mean and scale to have equal ranks."); - let {varianceEpsilon} = attrs; + let { varianceEpsilon } = attrs; if (varianceEpsilon == null) { varianceEpsilon = 1e-3; } @@ -47870,9 +47870,9 @@ function shallowSlice(x, begin, size, backend2) { return t; } function slice3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {begin, size} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { begin, size } = attrs; const [$begin, $size] = slice_util_exports.parseSliceParams(x, begin, size); slice_util_exports.assertParamsValid(x, $begin, $size); if (util_exports.sizeFromShape($size) === 0) { @@ -47883,7 +47883,7 @@ function slice3(args) { const outValues = sliceImplCPU(xTexData.values, $begin, $size, x.shape, x.dtype); return backend2.makeTensorInfo($size, x.dtype, outValues); } - const {isPacked} = backend2.texData.get(x.dataId); + const { isPacked } = backend2.texData.get(x.dataId); const isContinous = slice_util_exports.isSliceContinous(x.shape, $begin, $size); if (isPacked || !isContinous) { const program = env().getBool("WEBGL_PACK_ARRAY_OPERATIONS") ? new SlicePackedProgram($size) : new SliceProgram($size); @@ -47899,9 +47899,9 @@ var sliceConfig2 = { kernelFunc: slice3 }; var batchToSpaceND3 = (args) => { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {blockShape, crops} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { blockShape, crops } = attrs; util_exports.assert(x.shape.length <= 4, () => "batchToSpaceND for rank > 4 with a WebGL backend not implemented yet"); const prod5 = blockShape.reduce((a, b) => a * b); const reshaped = backend_util_exports.getReshaped(x.shape, blockShape, prod5); @@ -47910,17 +47910,17 @@ var batchToSpaceND3 = (args) => { const sliceBeginCoords = backend_util_exports.getSliceBeginCoords(crops, blockShape.length); const sliceSize = backend_util_exports.getSliceSize(reshapedPermuted, crops, blockShape.length); const toDispose = []; - const reshapedIntermediate = reshape4({inputs: {x}, backend: backend2, attrs: {shape: reshaped}}); - const transposedIntermediate = transpose3({inputs: {x: reshapedIntermediate}, backend: backend2, attrs: {perm: permuted}}); + const reshapedIntermediate = reshape4({ inputs: { x }, backend: backend2, attrs: { shape: reshaped } }); + const transposedIntermediate = transpose3({ inputs: { x: reshapedIntermediate }, backend: backend2, attrs: { perm: permuted } }); const reshapedIntermediate2 = reshape4({ - inputs: {x: transposedIntermediate}, + inputs: { x: transposedIntermediate }, backend: backend2, - attrs: {shape: reshapedPermuted} + attrs: { shape: reshapedPermuted } }); const sliced = slice3({ - inputs: {x: reshapedIntermediate2}, + inputs: { x: reshapedIntermediate2 }, backend: backend2, - attrs: {begin: sliceBeginCoords, size: sliceSize} + attrs: { begin: sliceBeginCoords, size: sliceSize } }); toDispose.push(reshapedIntermediate); toDispose.push(transposedIntermediate); @@ -47934,9 +47934,9 @@ var batchToSpaceNDConfig2 = { kernelFunc: batchToSpaceND3 }; function bincount3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, weights} = inputs; - const {size} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, weights } = inputs; + const { size } = attrs; const xVals = backend2.readSync(x.dataId); const weightsVals = backend2.readSync(weights.dataId); const outVals = bincountImplCPU(xVals, weightsVals, weights.dtype, weights.shape, size); @@ -47948,17 +47948,17 @@ var bincountConfig2 = { kernelFunc: bincount3 }; var NOT_EQUAL = `return float(a != b);`; -var notEqual3 = binaryKernelFunc2({opSnippet: NOT_EQUAL, dtype: "bool"}); +var notEqual3 = binaryKernelFunc2({ opSnippet: NOT_EQUAL, dtype: "bool" }); var notEqualConfig2 = { kernelName: NotEqual, backendName: "webgl", kernelFunc: notEqual3 }; function real3(args) { - const {inputs, backend: backend2} = args; - const {input: input2} = inputs; + const { inputs, backend: backend2 } = args; + const { input: input2 } = inputs; const inputData = backend2.texData.get(input2.dataId); - return identity3({inputs: {x: inputData.complexTensorInfos.real}, backend: backend2}); + return identity3({ inputs: { x: inputData.complexTensorInfos.real }, backend: backend2 }); } var realConfig2 = { kernelName: Real, @@ -47969,40 +47969,40 @@ var TO_INT = `return float(int(x));`; function int(input2, backend2) { const program = new UnaryOpProgram(input2.shape, TO_INT); const output = backend2.runWebGLProgram(program, [input2], "int32"); - return {dataId: output.dataId, shape: output.shape, dtype: output.dtype}; + return { dataId: output.dataId, shape: output.shape, dtype: output.dtype }; } function cast4(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {dtype} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { dtype } = attrs; if (dtype === "complex64") { if (x.dtype === "complex64") { - return identity3({inputs: {x}, backend: backend2}); + return identity3({ inputs: { x }, backend: backend2 }); } const zerosTensor = zeros(x.shape); - const floatX = cast4({inputs: {x}, backend: backend2, attrs: {dtype: "float32"}}); - const result = complex3({inputs: {real: floatX, imag: zerosTensor}, backend: backend2}); + const floatX = cast4({ inputs: { x }, backend: backend2, attrs: { dtype: "float32" } }); + const result = complex3({ inputs: { real: floatX, imag: zerosTensor }, backend: backend2 }); zerosTensor.dispose(); backend2.disposeIntermediateTensorInfo(floatX); return result; } if (x.dtype === "complex64") { - const realPart = real3({inputs: {input: x}, backend: backend2}); - const result = cast4({inputs: {x: realPart}, backend: backend2, attrs: {dtype}}); + const realPart = real3({ inputs: { input: x }, backend: backend2 }); + const result = cast4({ inputs: { x: realPart }, backend: backend2, attrs: { dtype } }); backend2.disposeIntermediateTensorInfo(realPart); return result; } if (!util_exports.hasEncodingLoss(x.dtype, dtype)) { - const result = identity3({inputs: {x}, backend: backend2}); - return {dataId: result.dataId, shape: result.shape, dtype}; + const result = identity3({ inputs: { x }, backend: backend2 }); + return { dataId: result.dataId, shape: result.shape, dtype }; } if (dtype === "int32") { return int(x, backend2); } if (dtype === "bool") { const zerosTensorInfo = backend2.makeTensorInfo([], "bool", util_exports.getTypedArrayFromDType("bool", 1)); - const binaryInputs = {a: x, b: zerosTensorInfo}; - const result = notEqual3({inputs: binaryInputs, backend: backend2}); + const binaryInputs = { a: x, b: zerosTensorInfo }; + const result = notEqual3({ inputs: binaryInputs, backend: backend2 }); backend2.disposeIntermediateTensorInfo(zerosTensorInfo); return result; } @@ -48014,7 +48014,7 @@ var castConfig2 = { kernelFunc: cast4 }; var CEIL = `return ceil(x);`; -var ceil3 = unaryKernelFunc2({opSnippet: CEIL, packedOpSnippet: CEIL, cpuKernelImpl: ceilImplCPU}); +var ceil3 = unaryKernelFunc2({ opSnippet: CEIL, packedOpSnippet: CEIL, cpuKernelImpl: ceilImplCPU }); var ceilConfig2 = { kernelName: Ceil, backendName: "webgl", @@ -48084,9 +48084,9 @@ var ClipPackedProgram = class { } }; function clipByValue2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {clipValueMin, clipValueMax} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { clipValueMin, clipValueMax } = attrs; let program; if (env().getBool("WEBGL_PACK_CLIP")) { program = new ClipPackedProgram(x.shape); @@ -48129,8 +48129,8 @@ function makeComplexComponentTensorInfo(complexTensor, complexPart) { }; } function complexAbs2(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; const xData = backend2.texData.get(x.dataId); const program = new ComplexAbsProgram(x.shape); const programInputs = [ @@ -48253,10 +48253,10 @@ function shiftedChannels(channels, channel, shift) { return res.join(); } function imag3(args) { - const {inputs, backend: backend2} = args; - const {input: input2} = inputs; + const { inputs, backend: backend2 } = args; + const { input: input2 } = inputs; const inputData = backend2.texData.get(input2.dataId); - return identity3({inputs: {x: inputData.complexTensorInfos.imag}, backend: backend2}); + return identity3({ inputs: { x: inputData.complexTensorInfos.imag }, backend: backend2 }); } var imagConfig2 = { kernelName: Imag, @@ -48266,11 +48266,11 @@ var imagConfig2 = { function concatImpl2(inputs, axis, backend2) { const dtype = inputs[0].dtype; if (dtype === "complex64") { - const reals = inputs.map((t) => real3({inputs: {input: t}, backend: backend2})); - const imags = inputs.map((t) => imag3({inputs: {input: t}, backend: backend2})); + const reals = inputs.map((t) => real3({ inputs: { input: t }, backend: backend2 })); + const imags = inputs.map((t) => imag3({ inputs: { input: t }, backend: backend2 })); const realConcated = concatImpl2(reals, axis, backend2); const imagConcated = concatImpl2(imags, axis, backend2); - const result2 = complex3({inputs: {real: realConcated, imag: imagConcated}, backend: backend2}); + const result2 = complex3({ inputs: { real: realConcated, imag: imagConcated }, backend: backend2 }); reals.forEach((r) => backend2.disposeIntermediateTensorInfo(r)); imags.forEach((i) => backend2.disposeIntermediateTensorInfo(i)); backend2.disposeIntermediateTensorInfo(realConcated); @@ -48285,10 +48285,10 @@ function concatImpl2(inputs, axis, backend2) { const tensors2D2 = inputs.map((t) => { const innerSize = util_exports.sizeFromShape(t.shape.slice(axis)); const shape = [-1, innerSize]; - return reshape4({inputs: {x: t}, backend: backend2, attrs: {shape}}); + return reshape4({ inputs: { x: t }, backend: backend2, attrs: { shape } }); }); const inputsValShapes = tensors2D2.map((t) => { - return {vals: backend2.readSync(t.dataId), shape: t.shape}; + return { vals: backend2.readSync(t.dataId), shape: t.shape }; }); const outShape2 = backend_util_exports.computeOutShape(tensors2D2.map((t) => t.shape), 1); const simplyConcat = tensors2D2[0].shape[0] === 1; @@ -48311,26 +48311,26 @@ function concatImpl2(inputs, axis, backend2) { const program2 = new ConcatPackedProgram(inputs.map((t) => t.shape), axis); return backend2.runWebGLProgram(program2, inputs, dtype); } - const {tensors2D, outShape} = computeTensors2D(inputs, axis, backend2); + const { tensors2D, outShape } = computeTensors2D(inputs, axis, backend2); const program = new ConcatProgram(tensors2D.map((t) => t.shape)); const result = backend2.runWebGLProgram(program, tensors2D, dtype); tensors2D.forEach((r) => backend2.disposeIntermediateTensorInfo(r)); - const reshapedResult = reshape4({inputs: {x: result}, attrs: {shape: outShape}, backend: backend2}); + const reshapedResult = reshape4({ inputs: { x: result }, attrs: { shape: outShape }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(result); return reshapedResult; } function computeTensors2D(inputs, axis, backend2) { const outShape = backend_util_exports.computeOutShape(inputs.map((t) => t.shape), axis); const tensors2D = inputs.map((x) => reshape4({ - inputs: {x}, - attrs: {shape: [-1, util_exports.sizeFromShape(x.shape.slice(axis))]}, + inputs: { x }, + attrs: { shape: [-1, util_exports.sizeFromShape(x.shape.slice(axis))] }, backend: backend2 })); - return {tensors2D, outShape}; + return { tensors2D, outShape }; } function concat3(args) { - const {inputs, backend: backend2, attrs} = args; - const {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { axis } = attrs; const $axis = util_exports.parseAxisParam(axis, inputs[0].shape)[0]; const outShape = backend_util_exports.computeOutShape(inputs.map((t) => t.shape), $axis); if (util_exports.sizeFromShape(outShape) === 0) { @@ -48338,7 +48338,7 @@ function concat3(args) { } const $inputs = inputs.filter((t) => util_exports.sizeFromShape(t.shape) > 0); if ($inputs.length === 1) { - return identity3({inputs: {x: $inputs[0]}, backend: backend2}); + return identity3({ inputs: { x: $inputs[0] }, backend: backend2 }); } const shapes = $inputs.map((t) => t.shape); backend_util_exports.assertParamsConsistent(shapes, $axis); @@ -48640,8 +48640,8 @@ var Im2ColPackedProgram = class { this.packedInputs = true; this.packedOutput = true; this.outputShape = outputShape; - const {filterWidth, inChannels, strideWidth, strideHeight, padInfo, outWidth, dilationWidth, dilationHeight, dataFormat} = convInfo; - const {left, top} = padInfo; + const { filterWidth, inChannels, strideWidth, strideHeight, padInfo, outWidth, dilationWidth, dilationHeight, dataFormat } = convInfo; + const { left, top } = padInfo; const itemsPerBlockRow = inChannels * filterWidth; const glsl = getGlslDifferences(); const isChannelsLast = dataFormat === "channelsLast"; @@ -48700,7 +48700,7 @@ var Im2ColPackedProgram = class { `; } }; -function conv2dByMatMul({x, filter, convInfo, backend: backend2, bias = null, preluActivationWeights = null, leakyreluAlpha = 0, activation: activation2 = null}) { +function conv2dByMatMul({ x, filter, convInfo, backend: backend2, bias = null, preluActivationWeights = null, leakyreluAlpha = 0, activation: activation2 = null }) { const xShape = x.shape; const xTexData = backend2.texData.get(x.dataId); const sharedMatMulDim = convInfo.inChannels; @@ -48716,14 +48716,14 @@ function conv2dByMatMul({x, filter, convInfo, backend: backend2, bias = null, pr if (batchMatMulWillBeUnpacked || !env().getBool("WEBGL_LAZILY_UNPACK") || !env().getBool("WEBGL_PACK_BINARY_OPERATIONS") || !reshapeWillBeExpensive) { const targetShape = isChannelsLast ? xShape[0] * xShape[1] * xShape[2] : xShape[0] * xShape[2] * xShape[3]; const xReshaped = reshape4({ - inputs: {x}, + inputs: { x }, backend: backend2, - attrs: {shape: [1, targetShape, convInfo.inChannels]} + attrs: { shape: [1, targetShape, convInfo.inChannels] } }); const filterReshaped = reshape4({ - inputs: {x: filter}, + inputs: { x: filter }, backend: backend2, - attrs: {shape: [1, convInfo.inChannels, convInfo.outChannels]} + attrs: { shape: [1, convInfo.inChannels, convInfo.outChannels] } }); const result = batchMatMulImpl({ a: xReshaped, @@ -48736,7 +48736,7 @@ function conv2dByMatMul({x, filter, convInfo, backend: backend2, bias = null, pr preluActivationWeights, leakyreluAlpha }); - out = reshape4({inputs: {x: result}, backend: backend2, attrs: {shape: convInfo.outShape}}); + out = reshape4({ inputs: { x: result }, backend: backend2, attrs: { shape: convInfo.outShape } }); intermediates.push(xReshaped); intermediates.push(filterReshaped); intermediates.push(result); @@ -48752,9 +48752,9 @@ function conv2dByMatMul({x, filter, convInfo, backend: backend2, bias = null, pr xTexData.shape[xTexData.shape.length - 2]++; util_exports.assert(isReshapeFree(xTexData.shape, xReshaped.shape), () => `packed reshape ${xTexData.shape} to ${xReshaped.shape} isn't free`); const filterReshaped = reshape4({ - inputs: {x: filter}, + inputs: { x: filter }, backend: backend2, - attrs: {shape: [1, convInfo.inChannels, convInfo.outChannels]} + attrs: { shape: [1, convInfo.inChannels, convInfo.outChannels] } }); intermediates.push(filterReshaped); const pointwiseConv = batchMatMulImpl({ @@ -48772,7 +48772,7 @@ function conv2dByMatMul({x, filter, convInfo, backend: backend2, bias = null, pr util_exports.assert(pointwiseConvTexData.isPacked, () => "batchMatMul result is expected to be packed"); xTexData.shape = originalXTexDataShape; pointwiseConvTexData.shape = convInfo.outShape; - out = identity3({inputs: {x: pointwiseConv}, backend: backend2}); + out = identity3({ inputs: { x: pointwiseConv }, backend: backend2 }); out.shape = convInfo.outShape; intermediates.push(pointwiseConv); } @@ -48781,8 +48781,8 @@ function conv2dByMatMul({x, filter, convInfo, backend: backend2, bias = null, pr } return out; } -function conv2dWithIm2Row({x, filter, convInfo, backend: backend2, bias = null, preluActivationWeights = null, leakyreluAlpha = 0, activation: activation2 = null}) { - const {filterWidth, filterHeight, inChannels, outWidth, outHeight, dataFormat} = convInfo; +function conv2dWithIm2Row({ x, filter, convInfo, backend: backend2, bias = null, preluActivationWeights = null, leakyreluAlpha = 0, activation: activation2 = null }) { + const { filterWidth, filterHeight, inChannels, outWidth, outHeight, dataFormat } = convInfo; const isChannelsLast = dataFormat === "channelsLast"; const sharedDim = filterWidth * filterHeight * inChannels; const numCols = outHeight * outWidth; @@ -48790,20 +48790,20 @@ function conv2dWithIm2Row({x, filter, convInfo, backend: backend2, bias = null, const transposeA = true; const transposeB = false; const intermediates = []; - const xSqueezed = reshape4({inputs: {x}, backend: backend2, attrs: {shape: x.shape.slice(1)}}); + const xSqueezed = reshape4({ inputs: { x }, backend: backend2, attrs: { shape: x.shape.slice(1) } }); const w2Row = reshape4({ - inputs: {x: filter}, + inputs: { x: filter }, backend: backend2, - attrs: {shape: [1, sharedDim, util_exports.sizeFromShape(filter.shape) / sharedDim]} + attrs: { shape: [1, sharedDim, util_exports.sizeFromShape(filter.shape) / sharedDim] } }); intermediates.push(xSqueezed); intermediates.push(w2Row); const im2ColProgram = new Im2ColPackedProgram(x2ColShape, xSqueezed.shape, convInfo); const im2Col = backend2.runWebGLProgram(im2ColProgram, [xSqueezed], "float32"); const im2ColReshaped = reshape4({ - inputs: {x: im2Col}, + inputs: { x: im2Col }, backend: backend2, - attrs: {shape: [1, x2ColShape[0], x2ColShape[1]]} + attrs: { shape: [1, x2ColShape[0], x2ColShape[1]] } }); intermediates.push(im2Col); intermediates.push(im2ColReshaped); @@ -48826,7 +48826,7 @@ function conv2dWithIm2Row({x, filter, convInfo, backend: backend2, bias = null, } const product = backend2.runWebGLProgram(matmulProgram, inputs, "float32"); const outShape = isChannelsLast ? [1, outHeight, outWidth, convInfo.outChannels] : [1, convInfo.outChannels, outHeight, outWidth]; - const out = reshape4({inputs: {x: product}, backend: backend2, attrs: {shape: outShape}}); + const out = reshape4({ inputs: { x: product }, backend: backend2, attrs: { shape: outShape } }); intermediates.push(product); for (const i of intermediates) { backend2.disposeIntermediateTensorInfo(i); @@ -48834,21 +48834,21 @@ function conv2dWithIm2Row({x, filter, convInfo, backend: backend2, bias = null, return out; } function conv2d4(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, filter} = inputs; - const {strides, pad: pad3, dataFormat, dilations, dimRoundingMode} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, filter } = inputs; + const { strides, pad: pad3, dataFormat, dilations, dimRoundingMode } = attrs; const $dataFormat = backend_util_exports.convertConv2DDataFormat(dataFormat); const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad3, dimRoundingMode, false, $dataFormat); let out; if (convInfo.filterHeight === 1 && convInfo.filterWidth === 1 && convInfo.dilationHeight === 1 && convInfo.dilationWidth === 1 && convInfo.strideHeight === 1 && convInfo.strideWidth === 1 && (convInfo.padInfo.type === "SAME" || convInfo.padInfo.type === "VALID")) { - out = conv2dByMatMul({x, filter, convInfo, backend: backend2}); + out = conv2dByMatMul({ x, filter, convInfo, backend: backend2 }); } else if (env().getBool("WEBGL_CONV_IM2COL") && x.shape[0] === 1) { - out = conv2dWithIm2Row({x, filter, convInfo, backend: backend2}); + out = conv2dWithIm2Row({ x, filter, convInfo, backend: backend2 }); } else { const program = new Conv2DProgram(convInfo); out = backend2.runWebGLProgram(program, [x, filter], "float32"); } - const outReshaped = reshape4({inputs: {x: out}, backend: backend2, attrs: {shape: convInfo.outShape}}); + const outReshaped = reshape4({ inputs: { x: out }, backend: backend2, attrs: { shape: convInfo.outShape } }); backend2.disposeIntermediateTensorInfo(out); return outReshaped; } @@ -49110,9 +49110,9 @@ var Conv3DDerInputProgram = class { } }; function conv2DBackpropFilter3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, dy} = inputs; - const {strides, pad: pad3, dataFormat, dimRoundingMode, filterShape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, dy } = inputs; + const { strides, pad: pad3, dataFormat, dimRoundingMode, filterShape } = attrs; const $dataFormat = backend_util_exports.convertConv2DDataFormat(dataFormat); const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filterShape, strides, 1, pad3, dimRoundingMode, false, $dataFormat); const program = new Conv2DDerFilterProgram(convInfo); @@ -49124,9 +49124,9 @@ var conv2DBackpropFilterConfig2 = { kernelFunc: conv2DBackpropFilter3 }; function conv2DBackpropInput3(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, filter} = inputs; - const {inputShape, strides, pad: pad3, dataFormat, dimRoundingMode} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { dy, filter } = inputs; + const { inputShape, strides, pad: pad3, dataFormat, dimRoundingMode } = attrs; const $dataFormat = backend_util_exports.convertConv2DDataFormat(dataFormat); const convInfo = backend_util_exports.computeConv2DInfo(inputShape, filter.shape, strides, 1, pad3, dimRoundingMode, false, $dataFormat); const program = new Conv2DDerInputProgram(convInfo); @@ -49138,9 +49138,9 @@ var conv2DBackpropInputConfig2 = { kernelFunc: conv2DBackpropInput3 }; function conv3D2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, filter} = inputs; - const {strides, pad: pad3, dilations} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, filter } = inputs; + const { strides, pad: pad3, dilations } = attrs; const convInfo = backend_util_exports.computeConv3DInfo(x.shape, filter.shape, strides, dilations, pad3); const program = new Conv3DProgram(convInfo); return backend2.runWebGLProgram(program, [x, filter], "float32"); @@ -49151,9 +49151,9 @@ var conv3DConfig2 = { kernelFunc: conv3D2 }; function conv3DBackpropFilterV22(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, dy} = inputs; - const {strides, pad: pad3, filterShape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, dy } = inputs; + const { strides, pad: pad3, filterShape } = attrs; const convInfo = backend_util_exports.computeConv3DInfo(x.shape, filterShape, strides, 1, pad3); const program = new Conv3DDerFilterProgram(convInfo); return backend2.runWebGLProgram(program, [x, dy], "float32"); @@ -49164,9 +49164,9 @@ var conv3DBackpropFilterV2Config2 = { kernelFunc: conv3DBackpropFilterV22 }; function conv3DBackpropInput2(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, filter} = inputs; - const {pad: pad3, strides, inputShape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { dy, filter } = inputs; + const { pad: pad3, strides, inputShape } = attrs; const convInfo = backend_util_exports.computeConv3DInfo(inputShape, filter.shape, strides, 1, pad3); const program = new Conv3DDerInputProgram(convInfo); return backend2.runWebGLProgram(program, [dy, filter], "float32"); @@ -49179,7 +49179,7 @@ var conv3DBackpropInputConfig = { var COS = CHECK_NAN_SNIPPET_UNARY + ` return cos(x); `; -var cos3 = unaryKernelFunc2({opSnippet: COS}); +var cos3 = unaryKernelFunc2({ opSnippet: COS }); var cosConfig2 = { kernelName: Cos, backendName: "webgl", @@ -49189,7 +49189,7 @@ var COSH = ` float e2x = exp(-x); return (e2x + 1.0 / e2x) / 2.0; `; -var cosh3 = unaryKernelFunc2({opSnippet: COSH}); +var cosh3 = unaryKernelFunc2({ opSnippet: COSH }); var coshConfig2 = { kernelName: Cosh, backendName: "webgl", @@ -49288,9 +49288,9 @@ var CropAndResizeProgram = class { } }; var cropAndResize3 = (args) => { - const {inputs, backend: backend2, attrs} = args; - const {image: image3, boxes, boxInd} = inputs; - const {cropSize, method, extrapolationValue} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { image: image3, boxes, boxInd } = inputs; + const { cropSize, method, extrapolationValue } = attrs; const program = new CropAndResizeProgram(image3.shape, boxes.shape, cropSize, method, extrapolationValue); return backend2.runWebGLProgram(program, [image3, boxes, boxInd], "float32"); }; @@ -49367,21 +49367,21 @@ function getFinalCoord(rank, name) { } } function cumsum3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, exclusive, reverse: reverse5} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, exclusive, reverse: reverse5 } = attrs; const xRank = x.shape.length; const permutation = backend_util_exports.getAxesPermutation([axis], xRank); let permutedX = x; if (permutation != null) { - permutedX = transpose3({inputs: {x}, backend: backend2, attrs: {perm: permutation}}); + permutedX = transpose3({ inputs: { x }, backend: backend2, attrs: { perm: permutation } }); } const permutedAxis = backend_util_exports.getInnerMostAxes(1, xRank)[0]; if (permutedAxis !== xRank - 1) { throw new Error(`WebGL cumsum shader expects an inner-most axis=${x.shape.length - 1} but got axis=${axis}`); } const size = permutedX.shape[permutedAxis]; - let result = identity3({inputs: {x: permutedX}, backend: backend2}); + let result = identity3({ inputs: { x: permutedX }, backend: backend2 }); for (let i = 0; i <= Math.ceil(Math.log2(size)) - 1; i++) { const program = new CumSumProgram(permutedX.shape, false, reverse5); const customSetup = program.getCustomSetupFunc(i); @@ -49397,7 +49397,7 @@ function cumsum3(args) { } if (permutation != null) { const reversePermutation = backend_util_exports.getUndoAxesPermutation(permutation); - const reverseTransposedResult = transpose3({inputs: {x: result}, backend: backend2, attrs: {perm: reversePermutation}}); + const reverseTransposedResult = transpose3({ inputs: { x: result }, backend: backend2, attrs: { perm: reversePermutation } }); backend2.disposeIntermediateTensorInfo(result); backend2.disposeIntermediateTensorInfo(permutedX); return reverseTransposedResult; @@ -49410,9 +49410,9 @@ var cumsumConfig2 = { kernelFunc: cumsum3 }; function denseBincount3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, weights} = inputs; - const {size, binaryOutput} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, weights } = inputs; + const { size, binaryOutput } = attrs; if (x.shape.length === 1) { const xVals = backend2.readSync(x.dataId); const weightsVals = backend2.readSync(weights.dataId); @@ -49496,9 +49496,9 @@ var DepthToSpaceProgram = class { } }; function depthToSpace3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {blockSize, dataFormat} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { blockSize, dataFormat } = attrs; util_exports.assert(blockSize > 1, () => `blockSize should be > 1 for depthToSpace, but was: ${blockSize}`); const batchSize = x.shape[0]; const inputHeight = dataFormat === "NHWC" ? x.shape[1] : x.shape[2]; @@ -49903,9 +49903,9 @@ var DepthwiseConvPacked2DProgram = class { } }; function depthwiseConv2dNative2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, filter} = inputs; - const {strides, pad: pad3, dilations, dimRoundingMode} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, filter } = inputs; + const { strides, pad: pad3, dilations, dimRoundingMode } = attrs; let $dilations = dilations; if ($dilations == null) { $dilations = [1, 1]; @@ -50032,9 +50032,9 @@ var DepthwiseConv2DDerInputProgram = class { } }; function depthwiseConv2dNativeBackpropFilter3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, dy} = inputs; - const {strides, dilations, pad: pad3, dimRoundingMode, filterShape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, dy } = inputs; + const { strides, dilations, pad: pad3, dimRoundingMode, filterShape } = attrs; const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filterShape, strides, dilations, pad3, dimRoundingMode, true); const program = new DepthwiseConv2DDerFilterProgram(convInfo); return backend2.runWebGLProgram(program, [x, dy], "float32"); @@ -50045,9 +50045,9 @@ var depthwiseConv2dNativeBackpropFilterConfig2 = { kernelFunc: depthwiseConv2dNativeBackpropFilter3 }; function depthwiseConv2dNativeBackpropInput3(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, filter} = inputs; - const {strides, dilations, pad: pad3, dimRoundingMode, inputShape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { dy, filter } = inputs; + const { strides, dilations, pad: pad3, dimRoundingMode, inputShape } = attrs; const convInfo = backend_util_exports.computeConv2DInfo(inputShape, filter.shape, strides, dilations, pad3, dimRoundingMode, true); const program = new DepthwiseConv2DDerInputProgram(convInfo); return backend2.runWebGLProgram(program, [dy, filter], "float32"); @@ -50071,14 +50071,14 @@ var DiagProgram = class { } }; function diag3(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; const outShape = [...x.shape, ...x.shape]; const xSize = util_exports.sizeFromShape(x.shape); - const flat = reshape4({inputs: {x}, backend: backend2, attrs: {shape: [xSize]}}); + const flat = reshape4({ inputs: { x }, backend: backend2, attrs: { shape: [xSize] } }); const program = new DiagProgram(xSize); const res = backend2.runWebGLProgram(program, [flat], flat.dtype); - const out = reshape4({inputs: {x: res}, backend: backend2, attrs: {shape: outShape}}); + const out = reshape4({ inputs: { x: res }, backend: backend2, attrs: { shape: outShape } }); backend2.disposeIntermediateTensorInfo(flat); backend2.disposeIntermediateTensorInfo(res); return out; @@ -50092,8 +50092,8 @@ var Dilation2DProgram = class { constructor(convInfo) { this.variableNames = ["x", "W"]; this.outputShape = convInfo.outShape; - const {inHeight, inWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth} = convInfo; - const {top: padTop, left: padLeft} = padInfo; + const { inHeight, inWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth } = convInfo; + const { top: padTop, left: padLeft } = padInfo; this.userCode = ` const ivec2 strides = ivec2(${strideHeight}, ${strideWidth}); const ivec2 pads = ivec2(${padTop}, ${padLeft}); @@ -50136,14 +50136,14 @@ var Dilation2DProgram = class { } }; function dilation2D(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, filter} = inputs; - const {strides, pad: pad3, dilations} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, filter } = inputs; + const { strides, pad: pad3, dilations } = attrs; const convInfo = backend_util_exports.computeDilation2DInfo(x.shape, filter.shape, strides, pad3, "NHWC", dilations); let out; const program = new Dilation2DProgram(convInfo); out = backend2.runWebGLProgram(program, [x, filter], "float32"); - const outReshaped = reshape4({inputs: {x: out}, backend: backend2, attrs: {shape: convInfo.outShape}}); + const outReshaped = reshape4({ inputs: { x: out }, backend: backend2, attrs: { shape: convInfo.outShape } }); backend2.disposeIntermediateTensorInfo(out); return outReshaped; } @@ -50153,24 +50153,24 @@ var dilation2DConfig = { kernelFunc: dilation2D }; function einsum3(args) { - const {inputs, backend: backend2, attrs} = args; - const {equation} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { equation } = attrs; const tensors = inputs; - const {allDims, summedDims, idDims} = backend_util_exports.decodeEinsumEquation(equation, tensors.length); + const { allDims, summedDims, idDims } = backend_util_exports.decodeEinsumEquation(equation, tensors.length); backend_util_exports.checkEinsumDimSizes(allDims.length, idDims, tensors); - const {path, steps} = backend_util_exports.getEinsumComputePath(summedDims, idDims); + const { path, steps } = backend_util_exports.getEinsumComputePath(summedDims, idDims); const nSteps = steps.length; let out = null; let numDimsRemaining = allDims.length; const tensorsToDispose = []; for (let i = 0; i < nSteps; ++i) { for (const idTerm of steps[i]) { - const {permutationIndices: perm, expandDims: dimsToExpand} = backend_util_exports.getEinsumPermutation(numDimsRemaining, idDims[idTerm]); + const { permutationIndices: perm, expandDims: dimsToExpand } = backend_util_exports.getEinsumPermutation(numDimsRemaining, idDims[idTerm]); let x; if (backend_util_exports.isIdentityPermutation(perm)) { x = tensors[idTerm]; } else { - x = transpose3({inputs: {x: tensors[idTerm]}, backend: backend2, attrs: {perm}}); + x = transpose3({ inputs: { x: tensors[idTerm] }, backend: backend2, attrs: { perm } }); tensorsToDispose.push(x); } const targetShape = x.shape.slice(); @@ -50178,20 +50178,20 @@ function einsum3(args) { targetShape.splice(dimsToExpand[k], 0, 1); } if (!util_exports.arraysEqual(x.shape, targetShape)) { - x = reshape4({inputs: {x}, backend: backend2, attrs: {shape: targetShape}}); + x = reshape4({ inputs: { x }, backend: backend2, attrs: { shape: targetShape } }); tensorsToDispose.push(x); } if (out === null) { out = x; } else { - out = multiply3({inputs: {a: x, b: out}, backend: backend2}); + out = multiply3({ inputs: { a: x, b: out }, backend: backend2 }); tensorsToDispose.push(out); } } if (i < nSteps - 1) { if (path[i] >= 0) { out = sum4({ - inputs: {x: out}, + inputs: { x: out }, backend: backend2, attrs: { axis: path[i] - (allDims.length - numDimsRemaining), @@ -50227,7 +50227,7 @@ var ELU_PACKED = ` return result; `; -var elu5 = unaryKernelFunc2({opSnippet: ELU4, packedOpSnippet: ELU_PACKED}); +var elu5 = unaryKernelFunc2({ opSnippet: ELU4, packedOpSnippet: ELU_PACKED }); var eluConfig2 = { kernelName: Elu, backendName: "webgl", @@ -50239,8 +50239,8 @@ var ELU_DER_PACKED = ` return (bGTEZero * a) + ((vec4(1.0) - bGTEZero) * (a * (b + vec4(1.0)))); `; var eluGrad2 = (args) => { - const {inputs, backend: backend2} = args; - const {dy, y} = inputs; + const { inputs, backend: backend2 } = args; + const { dy, y } = inputs; const program = env().getBool("WEBGL_PACK_BINARY_OPERATIONS") ? new BinaryOpPackedProgram(ELU_DER_PACKED, dy.shape, y.shape) : new BinaryOpProgram(ELU_DER, dy.shape, y.shape); return backend2.runWebGLProgram(program, [dy, y], dy.dtype); }; @@ -50253,7 +50253,7 @@ var PACKED_EQUAL = ` return vec4(equal(a, b)); `; var EQUAL = `return float(a == b);`; -var equal3 = binaryKernelFunc2({opSnippet: EQUAL, packedOpSnippet: PACKED_EQUAL, dtype: "bool"}); +var equal3 = binaryKernelFunc2({ opSnippet: EQUAL, packedOpSnippet: PACKED_EQUAL, dtype: "bool" }); var equalConfig2 = { kernelName: Equal, backendName: "webgl", @@ -50275,23 +50275,23 @@ var ERF = ` float t = 1.0 / (1.0 + p * x); return sign * (1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x)); `; -var erf3 = unaryKernelFunc2({opSnippet: ERF}); +var erf3 = unaryKernelFunc2({ opSnippet: ERF }); var erfConfig2 = { kernelName: Erf, backendName: "webgl", kernelFunc: erf3 }; var EXP = `return exp(x);`; -var exp3 = unaryKernelFunc2({opSnippet: EXP, packedOpSnippet: EXP, cpuKernelImpl: expImplCPU}); +var exp3 = unaryKernelFunc2({ opSnippet: EXP, packedOpSnippet: EXP, cpuKernelImpl: expImplCPU }); var expConfig2 = { kernelName: Exp, backendName: "webgl", kernelFunc: exp3 }; function expandDims4(args) { - const {inputs, attrs, backend: backend2} = args; - const {dim} = attrs; - const {input: input2} = inputs; + const { inputs, attrs, backend: backend2 } = args; + const { dim } = attrs; + const { input: input2 } = inputs; const inputRank = input2.shape.length; const newShape = input2.shape.slice(); let $dim = dim; @@ -50300,7 +50300,7 @@ function expandDims4(args) { $dim = inputRank + dim + 1; } newShape.splice($dim, 0, 1); - return reshape4({inputs: {x: input2}, backend: backend2, attrs: {shape: newShape}}); + return reshape4({ inputs: { x: input2 }, backend: backend2, attrs: { shape: newShape } }); } var expandDimsConfig2 = { kernelName: ExpandDims, @@ -50308,7 +50308,7 @@ var expandDimsConfig2 = { kernelFunc: expandDims4 }; var EXPM1 = `return exp(x) - 1.0;`; -var expm13 = unaryKernelFunc2({opSnippet: EXPM1, packedOpSnippet: EXPM1, cpuKernelImpl: expm1ImplCPU}); +var expm13 = unaryKernelFunc2({ opSnippet: EXPM1, packedOpSnippet: EXPM1, cpuKernelImpl: expm1ImplCPU }); var expm1Config2 = { kernelName: Expm1, backendName: "webgl", @@ -50370,7 +50370,7 @@ function fftImpl2(x, inverse, backend2) { const inputSize = util_exports.sizeFromShape(x.shape); const innerDimensionSize = x.shape[x.shape.length - 1]; const batch = inputSize / innerDimensionSize; - const input2D = reshape4({inputs: {x}, backend: backend2, attrs: {shape: [batch, innerDimensionSize]}}); + const input2D = reshape4({ inputs: { x }, backend: backend2, attrs: { shape: [batch, innerDimensionSize] } }); const xShape = input2D.shape; const realProgram = new FFTProgram("real", xShape, inverse); const imagProgram = new FFTProgram("imag", xShape, inverse); @@ -50388,17 +50388,17 @@ function fftImpl2(x, inverse, backend2) { ]; const realPart = backend2.runWebGLProgram(realProgram, inputs, "float32"); const imagPart = backend2.runWebGLProgram(imagProgram, inputs, "float32"); - const complexOutput = complex3({inputs: {real: realPart, imag: imagPart}, backend: backend2}); + const complexOutput = complex3({ inputs: { real: realPart, imag: imagPart }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(realPart); backend2.disposeIntermediateTensorInfo(imagPart); - const complexOutputReshaped = reshape4({inputs: {x: complexOutput}, backend: backend2, attrs: {shape: x.shape}}); + const complexOutputReshaped = reshape4({ inputs: { x: complexOutput }, backend: backend2, attrs: { shape: x.shape } }); backend2.disposeIntermediateTensorInfo(input2D); backend2.disposeIntermediateTensorInfo(complexOutput); return complexOutputReshaped; } function fft3(args) { - const {inputs, backend: backend2} = args; - const {input: input2} = inputs; + const { inputs, backend: backend2 } = args; + const { input: input2 } = inputs; return fftImpl2(input2, false, backend2); } var fftConfig2 = { @@ -50429,9 +50429,9 @@ var FillProgram = class { } }; function fill3(args) { - const {backend: backend2, attrs} = args; - const {shape, value} = attrs; - let {dtype} = attrs; + const { backend: backend2, attrs } = args; + const { shape, value } = attrs; + let { dtype } = attrs; dtype = dtype || util_exports.inferDtype(value); if (dtype === "string") { const values = util_exports.getArrayFromDType(dtype, util_exports.sizeFromShape(shape)); @@ -50474,8 +50474,8 @@ var FlipLeftRightProgram = class { var flipLeftRightConfig2 = { kernelName: FlipLeftRight, backendName: "webgl", - kernelFunc: ({inputs, backend: backend2}) => { - const {image: image3} = inputs; + kernelFunc: ({ inputs, backend: backend2 }) => { + const { image: image3 } = inputs; const webglBackend = backend2; const program = new FlipLeftRightProgram(image3.shape); const output = webglBackend.runWebGLProgram(program, [image3], image3.dtype); @@ -50483,7 +50483,7 @@ var flipLeftRightConfig2 = { } }; var FLOOR = `return floor(x);`; -var floor3 = unaryKernelFunc2({opSnippet: FLOOR, packedOpSnippet: FLOOR, cpuKernelImpl: floorImplCPU}); +var floor3 = unaryKernelFunc2({ opSnippet: FLOOR, packedOpSnippet: FLOOR, cpuKernelImpl: floorImplCPU }); var floorConfig2 = { kernelName: Floor, backendName: "webgl", @@ -50522,7 +50522,7 @@ var INT_DIV_PACKED = ` } return vec4(result); `; -var floorDiv3 = binaryKernelFunc2({opSnippet: INT_DIV, packedOpSnippet: INT_DIV_PACKED, dtype: "int32"}); +var floorDiv3 = binaryKernelFunc2({ opSnippet: INT_DIV, packedOpSnippet: INT_DIV_PACKED, dtype: "int32" }); var floorDivConfig2 = { kernelName: FloorDiv, backendName: "webgl", @@ -50611,9 +50611,9 @@ var fromPixelsConfig = { }; var fromPixels2DContext2; function fromPixels2(args) { - const {inputs, backend: backend2, attrs} = args; - let {pixels} = inputs; - const {numChannels} = attrs; + const { inputs, backend: backend2, attrs } = args; + let { pixels } = inputs; + const { numChannels } = attrs; const isVideo = typeof HTMLVideoElement !== "undefined" && pixels instanceof HTMLVideoElement; const isImage = typeof HTMLImageElement !== "undefined" && pixels instanceof HTMLImageElement; const [width, height] = isVideo ? [ @@ -50640,9 +50640,9 @@ function fromPixels2(args) { return res; } function fusedConv2d(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, filter, bias, preluActivationWeights} = inputs; - const {strides, pad: pad3, dataFormat, dilations, dimRoundingMode, activation: activation2, leakyreluAlpha} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, filter, bias, preluActivationWeights } = inputs; + const { strides, pad: pad3, dataFormat, dilations, dimRoundingMode, activation: activation2, leakyreluAlpha } = attrs; const $dataFormat = backend_util_exports.convertConv2DDataFormat(dataFormat); const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad3, dimRoundingMode, false, $dataFormat); let out; @@ -50689,7 +50689,7 @@ function fusedConv2d(args) { } out = backend2.runWebGLProgram(program, inputs2, "float32"); } - const outReshaped = reshape4({inputs: {x: out}, backend: backend2, attrs: {shape: convInfo.outShape}}); + const outReshaped = reshape4({ inputs: { x: out }, backend: backend2, attrs: { shape: convInfo.outShape } }); intermediates.push(out); intermediates.forEach((t) => backend2.disposeIntermediateTensorInfo(t)); return outReshaped; @@ -50700,9 +50700,9 @@ var fusedConv2DConfig2 = { kernelFunc: fusedConv2d }; function fusedDepthwiseConv2D2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, filter, bias, preluActivationWeights} = inputs; - const {strides, pad: pad3, dilations, dimRoundingMode, activation: activation2, leakyreluAlpha} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, filter, bias, preluActivationWeights } = inputs; + const { strides, pad: pad3, dilations, dimRoundingMode, activation: activation2, leakyreluAlpha } = attrs; const intermediates = []; let $dilations = dilations; if ($dilations == null) { @@ -50766,20 +50766,20 @@ var GatherNDProgram = class { } }; function gatherNd2(args) { - const {inputs, backend: backend2} = args; - const {params, indices} = inputs; + const { inputs, backend: backend2 } = args; + const { params, indices } = inputs; const indicesShape = indices.shape; const sliceRank = indicesShape[indicesShape.length - 1]; const [resultShape, numSlices, sliceSize, strides] = backend_util_exports.prepareAndValidate(params, indices); - const flattenIndices = reshape4({inputs: {x: indices}, backend: backend2, attrs: {shape: [numSlices, sliceRank]}}); + const flattenIndices = reshape4({ inputs: { x: indices }, backend: backend2, attrs: { shape: [numSlices, sliceRank] } }); const flattenX = reshape4({ - inputs: {x: params}, + inputs: { x: params }, backend: backend2, - attrs: {shape: [util_exports.sizeFromShape(params.shape) / sliceSize, sliceSize]} + attrs: { shape: [util_exports.sizeFromShape(params.shape) / sliceSize, sliceSize] } }); const program = new GatherNDProgram(sliceRank, strides, [numSlices, sliceSize]); const res = backend2.runWebGLProgram(program, [flattenX, flattenIndices], flattenX.dtype); - const reshaped = reshape4({inputs: {x: res}, backend: backend2, attrs: {shape: resultShape}}); + const reshaped = reshape4({ inputs: { x: res }, backend: backend2, attrs: { shape: resultShape } }); backend2.disposeIntermediateTensorInfo(flattenIndices); backend2.disposeIntermediateTensorInfo(flattenX); backend2.disposeIntermediateTensorInfo(res); @@ -50818,15 +50818,15 @@ function getSourceCoords2(aShape, axis) { return sourceCoords.join(); } function gatherV22(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, indices} = inputs; - const {axis, batchDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, indices } = inputs; + const { axis, batchDims } = attrs; const parsedAxis = util_exports.parseAxisParam(axis, x.shape)[0]; const shapeInfo = backend_util_exports.segment_util.collectGatherOpShapeInfo(x, indices, parsedAxis, batchDims); const indicesSize = util_exports.sizeFromShape(indices.shape); const toDispose = []; const flattenX = reshape4({ - inputs: {x}, + inputs: { x }, backend: backend2, attrs: { shape: [ @@ -50838,9 +50838,9 @@ function gatherV22(args) { } }); const flattenIndex = reshape4({ - inputs: {x: indices}, + inputs: { x: indices }, backend: backend2, - attrs: {shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize]} + attrs: { shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize] } }); toDispose.push(flattenX); toDispose.push(flattenIndex); @@ -50860,7 +50860,7 @@ function gatherV22(args) { const program = new GatherProgram(flattenX.shape, flattenOutputShape); const res = backend2.runWebGLProgram(program, [flattenX, flattenIndex], flattenX.dtype); toDispose.push(res); - const reshaped = reshape4({inputs: {x: res}, backend: backend2, attrs: {shape: shapeInfo.outputShape}}); + const reshaped = reshape4({ inputs: { x: res }, backend: backend2, attrs: { shape: shapeInfo.outputShape } }); toDispose.forEach((t) => backend2.disposeIntermediateTensorInfo(t)); return reshaped; } @@ -50899,8 +50899,8 @@ var greaterEqualConfig2 = { kernelFunc: greaterEqual3 }; function ifft3(args) { - const {inputs, backend: backend2} = args; - const {input: input2} = inputs; + const { inputs, backend: backend2 } = args; + const { input: input2 } = inputs; return fftImpl2(input2, true, backend2); } var ifftConfig2 = { @@ -50909,21 +50909,21 @@ var ifftConfig2 = { kernelFunc: ifft3 }; var IS_FINITE = `return float(!isnan(x) && !isinf(x));`; -var isFinite4 = unaryKernelFunc2({opSnippet: IS_FINITE, dtype: "bool"}); +var isFinite4 = unaryKernelFunc2({ opSnippet: IS_FINITE, dtype: "bool" }); var isFiniteConfig2 = { kernelName: IsFinite, backendName: "webgl", kernelFunc: isFinite4 }; var IS_INF = `return float(isinf(x));`; -var isInf3 = unaryKernelFunc2({opSnippet: IS_INF, dtype: "bool"}); +var isInf3 = unaryKernelFunc2({ opSnippet: IS_INF, dtype: "bool" }); var isInfConfig2 = { kernelName: IsInf, backendName: "webgl", kernelFunc: isInf3 }; var IS_NAN = `return float(isnan(x));`; -var isNaN4 = unaryKernelFunc2({opSnippet: IS_NAN, dtype: "bool"}); +var isNaN4 = unaryKernelFunc2({ opSnippet: IS_NAN, dtype: "bool" }); var isNaNConfig2 = { kernelName: IsNan, backendName: "webgl", @@ -50948,15 +50948,15 @@ var LESS_EQUAL = `return float(a <= b);`; var LESS_EQUAL_PACKED = ` return vec4(lessThanEqual(a, b)); `; -var lessEqual3 = binaryKernelFunc2({opSnippet: LESS_EQUAL, packedOpSnippet: LESS_EQUAL_PACKED, dtype: "bool"}); +var lessEqual3 = binaryKernelFunc2({ opSnippet: LESS_EQUAL, packedOpSnippet: LESS_EQUAL_PACKED, dtype: "bool" }); var lessEqualConfig2 = { kernelName: LessEqual, backendName: "webgl", kernelFunc: lessEqual3 }; function linSpace2(args) { - const {backend: backend2, attrs} = args; - const {start, stop, num} = attrs; + const { backend: backend2, attrs } = args; + const { start, stop, num } = attrs; const outVals = linSpaceImplCPU(start, stop, num); return backend2.makeTensorInfo([outVals.length], "float32", outVals); } @@ -50977,14 +50977,14 @@ var LOG_PACKED = ` return result; `; -var log4 = unaryKernelFunc2({opSnippet: LOG, packedOpSnippet: LOG_PACKED, cpuKernelImpl: logImplCPU}); +var log4 = unaryKernelFunc2({ opSnippet: LOG, packedOpSnippet: LOG_PACKED, cpuKernelImpl: logImplCPU }); var logConfig2 = { kernelName: Log, backendName: "webgl", kernelFunc: log4 }; var LOG1P = `return log(1.0 + x);`; -var log1p3 = unaryKernelFunc2({opSnippet: LOG1P}); +var log1p3 = unaryKernelFunc2({ opSnippet: LOG1P }); var log1pConfig2 = { kernelName: Log1p, backendName: "webgl", @@ -51007,7 +51007,7 @@ var logicalAndConfig2 = { kernelFunc: logicalAnd3 }; var LOGICAL_NOT = `return float(!(x >= 1.0));`; -var logicalNot3 = unaryKernelFunc2({opSnippet: LOGICAL_NOT}); +var logicalNot3 = unaryKernelFunc2({ opSnippet: LOGICAL_NOT }); var logicalNotConfig2 = { kernelName: LogicalNot, backendName: "webgl", @@ -51020,7 +51020,7 @@ var LOGICAL_OR_PACKED = ` vec4(greaterThanEqual(b, vec4(1.0))), vec4(1.0)); `; -var logicalOr3 = binaryKernelFunc2({opSnippet: LOGICAL_OR, packedOpSnippet: LOGICAL_OR_PACKED, dtype: "bool"}); +var logicalOr3 = binaryKernelFunc2({ opSnippet: LOGICAL_OR, packedOpSnippet: LOGICAL_OR_PACKED, dtype: "bool" }); var logicalOrConfig2 = { kernelName: LogicalOr, backendName: "webgl", @@ -51148,9 +51148,9 @@ var LRNPackedProgram = class { } }; var lrn = (args) => { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {depthRadius, bias, alpha, beta} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { depthRadius, bias, alpha, beta } = attrs; const program = env().getBool("WEBGL_PACK_NORMALIZATION") ? new LRNPackedProgram(x.shape, depthRadius, bias, alpha, beta) : new LRNProgram(x.shape, depthRadius, bias, alpha, beta); return backend2.runWebGLProgram(program, [x], x.dtype); }; @@ -51228,9 +51228,9 @@ var LRNGradProgram = class { } }; var lrnGrad = (args) => { - const {inputs, backend: backend2, attrs} = args; - const {x, y, dy} = inputs; - const {depthRadius, bias, alpha, beta} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, y, dy } = inputs; + const { depthRadius, bias, alpha, beta } = attrs; const program = new LRNGradProgram(x.shape, depthRadius, bias, alpha, beta); return backend2.runWebGLProgram(program, [x, y, dy], x.dtype); }; @@ -51243,17 +51243,17 @@ function maxImpl2(x, reduceShape, outShape, backend2) { const inSize = util_exports.sizeFromShape(reduceShape); const xSize = util_exports.sizeFromShape(x.shape); const batchSize = xSize / inSize; - const reshapedInput = reshape4({inputs: {x}, attrs: {shape: [batchSize, inSize]}, backend: backend2}); + const reshapedInput = reshape4({ inputs: { x }, attrs: { shape: [batchSize, inSize] }, backend: backend2 }); const reduced = reduce(reshapedInput, x.dtype, "max", backend2); - const reshapedOutput = reshape4({inputs: {x: reduced}, attrs: {shape: outShape}, backend: backend2}); + const reshapedOutput = reshape4({ inputs: { x: reduced }, attrs: { shape: outShape }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(reshapedInput); backend2.disposeIntermediateTensorInfo(reduced); return reshapedOutput; } function max4(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {reductionIndices, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { reductionIndices, keepDims } = attrs; const xRank = x.shape.length; const origAxes = util_exports.parseAxisParam(reductionIndices, x.shape); let axes = origAxes; @@ -51325,15 +51325,15 @@ var maximumConfig2 = { kernelFunc: maximum4 }; function maxPool3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; assertNotComplex2(x, "maxPool"); - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; const dilations = 1; util_exports.assert(backend_util_exports.eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in maxPool: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); const convInfo = backend_util_exports.computePool2DInfo(x.shape, filterSize, strides, dilations, pad3, dimRoundingMode); if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 && util_exports.arraysEqual(convInfo.inShape, convInfo.outShape)) { - return identity3({inputs: {x}, backend: backend2}); + return identity3({ inputs: { x }, backend: backend2 }); } const maxPoolProgram = new Pool2DProgram(convInfo, "max", false); return backend2.runWebGLProgram(maxPoolProgram, [x], x.dtype); @@ -51344,9 +51344,9 @@ var maxPoolConfig2 = { kernelFunc: maxPool3 }; function maxPool3d2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {filterSize, strides, pad: pad3, dataFormat, dimRoundingMode} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { filterSize, strides, pad: pad3, dataFormat, dimRoundingMode } = attrs; const dilations = [1, 1, 1]; const convInfo = backend_util_exports.computePool3DInfo(x.shape, filterSize, strides, dilations, pad3, dimRoundingMode, dataFormat); const maxPoolProgram = new Pool3DProgram(convInfo, "max", false); @@ -51503,10 +51503,10 @@ var MaxPool3DBackpropProgram = class { } }; function maxPool3DGrad2(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, input: input2} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { dy, input: input2 } = inputs; const x = input2; - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; const dilations = [1, 1, 1]; const convInfo = backend_util_exports.computePool3DInfo(x.shape, filterSize, strides, dilations, pad3, dimRoundingMode); const maxPool3dPositionsProgram = new Pool3DProgram(convInfo, "max", true); @@ -51522,11 +51522,11 @@ var maxPoolGrad3DConfig = { kernelFunc: maxPool3DGrad2 }; function maxPoolGrad3(args) { - const {inputs, backend: backend2, attrs} = args; - const {dy, input: input2, output} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { dy, input: input2, output } = inputs; const x = input2; assertNotComplex2([input2, output], "maxPoolGrad"); - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; const convInfo = backend_util_exports.computePool2DInfo(x.shape, filterSize, strides, 1, pad3, dimRoundingMode); const getPositions = true; const maxPoolPositionsProgram = new Pool2DProgram(convInfo, "max", getPositions); @@ -51551,9 +51551,9 @@ function maxPoolWithArgmaxImpl2(x, includeBatchInIndex, convInfo, backend2) { var maxPoolWithArgmaxConfig2 = { kernelName: MaxPoolWithArgmax, backendName: "webgl", - kernelFunc: ({inputs, attrs, backend: backend2}) => { - const {x} = inputs; - const {filterSize, strides, pad: pad3, includeBatchInIndex} = attrs; + kernelFunc: ({ inputs, attrs, backend: backend2 }) => { + const { x } = inputs; + const { filterSize, strides, pad: pad3, includeBatchInIndex } = attrs; const webglBackend = backend2; util_exports.assert(x.shape.length === 4, () => `Error in maxPool: input must be rank 4 but got rank ${x.shape.length}.`); const dilations = [1, 1]; @@ -51567,9 +51567,9 @@ function meanImpl(x, reduceShape, outShape, backend2) { const inSize = util_exports.sizeFromShape(reduceShape); const xSize = util_exports.sizeFromShape(x.shape); const batchSize = xSize / inSize; - const reshapedInput = reshape4({inputs: {x}, attrs: {shape: [batchSize, inSize]}, backend: backend2}); + const reshapedInput = reshape4({ inputs: { x }, attrs: { shape: [batchSize, inSize] }, backend: backend2 }); const reduced = reduce(reshapedInput, "float32", "mean", backend2); - const reshapedOutput = reshape4({inputs: {x: reduced}, attrs: {shape: outShape}, backend: backend2}); + const reshapedOutput = reshape4({ inputs: { x: reduced }, attrs: { shape: outShape }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(reshapedInput); backend2.disposeIntermediateTensorInfo(reduced); return reshapedOutput; @@ -51577,9 +51577,9 @@ function meanImpl(x, reduceShape, outShape, backend2) { var meanConfig2 = { kernelName: Mean, backendName: "webgl", - kernelFunc: ({inputs, attrs, backend: backend2}) => { - const {x} = inputs; - const {keepDims, axis} = attrs; + kernelFunc: ({ inputs, attrs, backend: backend2 }) => { + const { x } = inputs; + const { keepDims, axis } = attrs; const webglBackend = backend2; const xRank = x.shape.length; const origAxes = util_exports.parseAxisParam(axis, x.shape); @@ -51621,29 +51621,29 @@ var meanConfig2 = { } }; function min4(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; const xRank = x.shape.length; const origAxes = util_exports.parseAxisParam(axis, x.shape); let axes = origAxes; const permutedAxes = backend_util_exports.getAxesPermutation(axes, xRank); let permutedX = x; if (permutedAxes != null) { - permutedX = transpose3({inputs: {x}, backend: backend2, attrs: {perm: permutedAxes}}); + permutedX = transpose3({ inputs: { x }, backend: backend2, attrs: { perm: permutedAxes } }); axes = backend_util_exports.getInnerMostAxes(axes.length, x.shape.length); } backend_util_exports.assertAxesAreInnerMostDims("min", axes, xRank); const [outShape, reduceShape] = backend_util_exports.computeOutAndReduceShapes(permutedX.shape, axes); const inSize = util_exports.sizeFromShape(reduceShape); - const a2D = reshape4({inputs: {x: permutedX}, backend: backend2, attrs: {shape: [-1, inSize]}}); + const a2D = reshape4({ inputs: { x: permutedX }, backend: backend2, attrs: { shape: [-1, inSize] } }); const reduced = reduce(a2D, a2D.dtype, "min", backend2); let res; if (keepDims) { const newShape = backend_util_exports.expandShapeToKeepDim(outShape, origAxes); - res = reshape4({inputs: {x: reduced}, backend: backend2, attrs: {shape: newShape}}); + res = reshape4({ inputs: { x: reduced }, backend: backend2, attrs: { shape: newShape } }); } else { - res = reshape4({inputs: {x: reduced}, backend: backend2, attrs: {shape: outShape}}); + res = reshape4({ inputs: { x: reduced }, backend: backend2, attrs: { shape: outShape } }); } backend2.disposeIntermediateTensorInfo(a2D); backend2.disposeIntermediateTensorInfo(reduced); @@ -51804,9 +51804,9 @@ var MirrorPadPackedProgram = class { `; } }; -var mirrorPadKernelFunc = ({inputs, backend: backend2, attrs}) => { - const {x} = inputs; - const {paddings, mode} = attrs; +var mirrorPadKernelFunc = ({ inputs, backend: backend2, attrs }) => { + const { x } = inputs; + const { paddings, mode } = attrs; const program = env().getBool("WEBGL_PACK_ARRAY_OPERATIONS") ? new MirrorPadPackedProgram(x.shape, paddings, mode) : new MirrorPadProgram(x.shape, paddings, mode); const output = backend2.runWebGLProgram(program, [x], x.dtype); return output; @@ -51894,7 +51894,7 @@ var DIV_PACKED = ` return result; `; -var realDiv = binaryKernelFunc2({opSnippet: DIV, packedOpSnippet: DIV_PACKED, checkOutOfBounds: true}); +var realDiv = binaryKernelFunc2({ opSnippet: DIV, packedOpSnippet: DIV_PACKED, checkOutOfBounds: true }); var realDivConfig2 = { kernelName: RealDiv, backendName: "webgl", @@ -51913,22 +51913,22 @@ var subConfig2 = { kernelFunc: sub3 }; function softmax4(args) { - const {inputs, backend: backend2, attrs} = args; - const {logits} = inputs; - const {dim} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { logits } = inputs; + const { dim } = attrs; const axes = util_exports.parseAxisParam([dim], logits.shape); const maxLogit = max4({ - inputs: {x: logits}, + inputs: { x: logits }, backend: backend2, - attrs: {reductionIndices: axes, keepDims: false} + attrs: { reductionIndices: axes, keepDims: false } }); const expandedShape = backend_util_exports.expandShapeToKeepDim(maxLogit.shape, axes); - const maxLogitsReshaped = reshape4({inputs: {x: maxLogit}, backend: backend2, attrs: {shape: expandedShape}}); - const a = sub3({inputs: {a: logits, b: maxLogitsReshaped}, backend: backend2}); - const b = exp3({inputs: {x: a}, backend: backend2}); - const sumExp = sum4({inputs: {x: b}, backend: backend2, attrs: {axis: axes, keepDims: false}}); - const sumExpReshaped = reshape4({inputs: {x: sumExp}, backend: backend2, attrs: {shape: expandedShape}}); - const res = realDiv({inputs: {a: b, b: sumExpReshaped}, backend: backend2}); + const maxLogitsReshaped = reshape4({ inputs: { x: maxLogit }, backend: backend2, attrs: { shape: expandedShape } }); + const a = sub3({ inputs: { a: logits, b: maxLogitsReshaped }, backend: backend2 }); + const b = exp3({ inputs: { x: a }, backend: backend2 }); + const sumExp = sum4({ inputs: { x: b }, backend: backend2, attrs: { axis: axes, keepDims: false } }); + const sumExpReshaped = reshape4({ inputs: { x: sumExp }, backend: backend2, attrs: { shape: expandedShape } }); + const res = realDiv({ inputs: { a: b, b: sumExpReshaped }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(maxLogit); backend2.disposeIntermediateTensorInfo(maxLogitsReshaped); backend2.disposeIntermediateTensorInfo(a); @@ -51943,10 +51943,10 @@ var softmaxConfig2 = { kernelFunc: softmax4 }; function multinomial3(args) { - const {inputs, backend: backend2, attrs} = args; - const {logits} = inputs; - const {numSamples, seed, normalized} = attrs; - const probs = normalized ? logits : softmax4({inputs: {logits}, backend: backend2, attrs: {dim: logits.shape.length - 1}}); + const { inputs, backend: backend2, attrs } = args; + const { logits } = inputs; + const { numSamples, seed, normalized } = attrs; + const probs = normalized ? logits : softmax4({ inputs: { logits }, backend: backend2, attrs: { dim: logits.shape.length - 1 } }); const batchSize = probs.shape[0]; const numOutcomes = probs.shape[1]; const program = new MultinomialProgram(batchSize, numOutcomes, numSamples); @@ -51964,8 +51964,8 @@ var multinomialConfig2 = { }; var NEG = `return -x;`; function neg3(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; if (backend2.shouldExecuteOnCPU([x])) { const xData = backend2.texData.get(x.dataId); const [outValues, newShape] = negImplCPU(xData.values, x.shape, x.dtype); @@ -51987,12 +51987,12 @@ var negConfig2 = { var nonMaxSuppressionV3Impl3 = kernel_impls_exports.nonMaxSuppressionV3Impl; function nonMaxSuppressionV32(args) { backend_util_exports.warn("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead"); - const {inputs, backend: backend2, attrs} = args; - const {boxes, scores} = inputs; - const {maxOutputSize, iouThreshold, scoreThreshold} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold } = attrs; const boxesVals = backend2.readSync(boxes.dataId); const scoresVals = backend2.readSync(scores.dataId); - const {selectedIndices} = nonMaxSuppressionV3Impl3(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold); + const { selectedIndices } = nonMaxSuppressionV3Impl3(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold); return backend2.makeTensorInfo([selectedIndices.length], "int32", new Int32Array(selectedIndices)); } var nonMaxSuppressionV3Config2 = { @@ -52003,12 +52003,12 @@ var nonMaxSuppressionV3Config2 = { var nonMaxSuppressionV4Impl3 = kernel_impls_exports.nonMaxSuppressionV4Impl; function nonMaxSuppressionV42(args) { backend_util_exports.warn("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead"); - const {inputs, backend: backend2, attrs} = args; - const {boxes, scores} = inputs; - const {maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize } = attrs; const boxesVals = backend2.readSync(boxes.dataId); const scoresVals = backend2.readSync(scores.dataId); - const {selectedIndices, validOutputs} = nonMaxSuppressionV4Impl3(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize); + const { selectedIndices, validOutputs } = nonMaxSuppressionV4Impl3(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize); return [ backend2.makeTensorInfo([selectedIndices.length], "int32", new Int32Array(selectedIndices)), backend2.makeTensorInfo([], "int32", new Int32Array([validOutputs])) @@ -52022,16 +52022,16 @@ var nonMaxSuppressionV4Config2 = { var nonMaxSuppressionV5Impl3 = kernel_impls_exports.nonMaxSuppressionV5Impl; function nonMaxSuppressionV52(args) { backend_util_exports.warn("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead"); - const {inputs, backend: backend2, attrs} = args; - const {boxes, scores} = inputs; - const {maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma } = attrs; const boxesVals = backend2.readSync(boxes.dataId); const scoresVals = backend2.readSync(scores.dataId); const maxOutputSizeVal = maxOutputSize; const iouThresholdVal = iouThreshold; const scoreThresholdVal = scoreThreshold; const softNmsSigmaVal = softNmsSigma; - const {selectedIndices, selectedScores} = nonMaxSuppressionV5Impl3(boxesVals, scoresVals, maxOutputSizeVal, iouThresholdVal, scoreThresholdVal, softNmsSigmaVal); + const { selectedIndices, selectedScores } = nonMaxSuppressionV5Impl3(boxesVals, scoresVals, maxOutputSizeVal, iouThresholdVal, scoreThresholdVal, softNmsSigmaVal); return [ backend2.makeTensorInfo([selectedIndices.length], "int32", new Int32Array(selectedIndices)), backend2.makeTensorInfo([selectedScores.length], "float32", new Float32Array(selectedScores)) @@ -52057,16 +52057,16 @@ var OneHotProgram = class { } }; var oneHot3 = (args) => { - const {inputs, backend: backend2, attrs} = args; - const {indices} = inputs; - const {depth, onValue, offValue} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { indices } = inputs; + const { depth, onValue, offValue } = attrs; const indicesSize = util_exports.sizeFromShape(indices.shape); const program = new OneHotProgram(indicesSize, depth, onValue, offValue); - const reshaped = reshape4({inputs: {x: indices}, backend: backend2, attrs: {shape: [indicesSize]}}); + const reshaped = reshape4({ inputs: { x: indices }, backend: backend2, attrs: { shape: [indicesSize] } }); const result = backend2.runWebGLProgram(program, [reshaped], indices.dtype); backend2.disposeIntermediateTensorInfo(reshaped); const outShape = [...indices.shape, depth]; - const out = reshape4({inputs: {x: result}, backend: backend2, attrs: {shape: outShape}}); + const out = reshape4({ inputs: { x: result }, backend: backend2, attrs: { shape: outShape } }); backend2.disposeIntermediateTensorInfo(result); return out; }; @@ -52076,14 +52076,14 @@ var oneHotConfig2 = { kernelFunc: oneHot3 }; function zerosLike3(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; if (x.dtype === "complex64") { - const realPart = real3({inputs: {input: x}, backend: backend2}); - const r = zerosLike3({inputs: {x: realPart}, backend: backend2}); - const imagPart = imag3({inputs: {input: x}, backend: backend2}); - const i = zerosLike3({inputs: {x: imagPart}, backend: backend2}); - const result = complex3({inputs: {real: r, imag: i}, backend: backend2}); + const realPart = real3({ inputs: { input: x }, backend: backend2 }); + const r = zerosLike3({ inputs: { x: realPart }, backend: backend2 }); + const imagPart = imag3({ inputs: { input: x }, backend: backend2 }); + const i = zerosLike3({ inputs: { x: imagPart }, backend: backend2 }); + const result = complex3({ inputs: { real: r, imag: i }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(realPart); backend2.disposeIntermediateTensorInfo(r); backend2.disposeIntermediateTensorInfo(imagPart); @@ -52106,23 +52106,23 @@ var zerosLikeConfig2 = { kernelFunc: zerosLike3 }; function onesLike3(args) { - const {inputs, backend: backend2} = args; - const {x} = inputs; + const { inputs, backend: backend2 } = args; + const { x } = inputs; if (x.dtype === "string") { throw new Error("onesLike is not supported under string dtype"); } else if (x.dtype === "complex64") { - const realPart = real3({inputs: {input: x}, backend: backend2}); - const r = onesLike3({inputs: {x: realPart}, backend: backend2}); - const imagPart = imag3({inputs: {input: x}, backend: backend2}); - const i = zerosLike3({inputs: {x: imagPart}, backend: backend2}); - const result = complex3({inputs: {real: r, imag: i}, backend: backend2}); + const realPart = real3({ inputs: { input: x }, backend: backend2 }); + const r = onesLike3({ inputs: { x: realPart }, backend: backend2 }); + const imagPart = imag3({ inputs: { input: x }, backend: backend2 }); + const i = zerosLike3({ inputs: { x: imagPart }, backend: backend2 }); + const result = complex3({ inputs: { real: r, imag: i }, backend: backend2 }); backend2.disposeIntermediateTensorInfo(realPart); backend2.disposeIntermediateTensorInfo(r); backend2.disposeIntermediateTensorInfo(imagPart); backend2.disposeIntermediateTensorInfo(i); return result; } else { - return fill3({attrs: {shape: x.shape, dtype: x.dtype, value: 1}, backend: backend2}); + return fill3({ attrs: { shape: x.shape, dtype: x.dtype, value: 1 }, backend: backend2 }); } } var onesLikeConfig2 = { @@ -52131,10 +52131,10 @@ var onesLikeConfig2 = { kernelFunc: onesLike3 }; function pack2(args) { - const {inputs, backend: backend2, attrs} = args; - const {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { axis } = attrs; if (inputs.length === 1) { - return expandDims4({inputs: {input: inputs[0]}, backend: backend2, attrs: {dim: axis}}); + return expandDims4({ inputs: { input: inputs[0] }, backend: backend2, attrs: { dim: axis } }); } const shape = inputs[0].shape; const dtype = inputs[0].dtype; @@ -52144,11 +52144,11 @@ function pack2(args) { }); const intermediateTensorInfos = []; const expandedTensors = inputs.map((t) => { - const expandedT = expandDims4({inputs: {input: t}, backend: backend2, attrs: {dim: axis}}); + const expandedT = expandDims4({ inputs: { input: t }, backend: backend2, attrs: { dim: axis } }); intermediateTensorInfos.push(expandedT); return expandedT; }); - const result = concat3({inputs: expandedTensors, backend: backend2, attrs: {axis}}); + const result = concat3({ inputs: expandedTensors, backend: backend2, attrs: { axis } }); intermediateTensorInfos.forEach((t) => backend2.disposeIntermediateTensorInfo(t)); return result; } @@ -52271,9 +52271,9 @@ var PadPackedProgram = class { } }; var padV22 = (args) => { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {paddings, constantValue} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { paddings, constantValue } = attrs; const program = env().getBool("WEBGL_PACK_ARRAY_OPERATIONS") ? new PadPackedProgram(x.shape, paddings, constantValue) : new PadProgram(x.shape, paddings, constantValue); const customSetup = program.getCustomSetupFunc(constantValue); return backend2.runWebGLProgram(program, [x], x.dtype, customSetup); @@ -52310,16 +52310,16 @@ var POW_PACKED = ` ` + CHECK_NAN_SNIPPET3 + ` return result; `; -var pow3 = binaryKernelFunc2({opSnippet: POW, packedOpSnippet: POW_PACKED}); +var pow3 = binaryKernelFunc2({ opSnippet: POW, packedOpSnippet: POW_PACKED }); var powConfig2 = { kernelName: Pow, backendName: "webgl", kernelFunc: pow3 }; function prod3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, keepDims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; const xRank = x.shape.length; const toDispose = []; const origAxes = util_exports.parseAxisParam(axis, x.shape); @@ -52327,7 +52327,7 @@ function prod3(args) { const permutedAxes = backend_util_exports.getAxesPermutation(axes, xRank); let permutedX = x; if (permutedAxes != null) { - permutedX = transpose3({inputs: {x}, backend: backend2, attrs: {perm: permutedAxes}}); + permutedX = transpose3({ inputs: { x }, backend: backend2, attrs: { perm: permutedAxes } }); axes = backend_util_exports.getInnerMostAxes(axes.length, xRank); toDispose.push(permutedX); } @@ -52335,22 +52335,22 @@ function prod3(args) { let res; if (backend2.shouldExecuteOnCPU([permutedX])) { const xVals = backend2.texData.get(permutedX.dataId).values; - const {outVals, outShape, outDtype} = prodImplCPU(permutedX.shape, permutedX.dtype, xVals, axes); + const { outVals, outShape, outDtype } = prodImplCPU(permutedX.shape, permutedX.dtype, xVals, axes); res = backend2.makeTensorInfo(outShape, outDtype, outVals); } else { const [outShape, reduceShape] = backend_util_exports.computeOutAndReduceShapes(permutedX.shape, axes); const inSize = util_exports.sizeFromShape(reduceShape); - const a2D = reshape4({inputs: {x: permutedX}, backend: backend2, attrs: {shape: [-1, inSize]}}); + const a2D = reshape4({ inputs: { x: permutedX }, backend: backend2, attrs: { shape: [-1, inSize] } }); const outputDType = sumOutType(x.dtype); const reduced = reduce(a2D, outputDType, "prod", backend2); - res = reshape4({inputs: {x: reduced}, backend: backend2, attrs: {shape: outShape}}); + res = reshape4({ inputs: { x: reduced }, backend: backend2, attrs: { shape: outShape } }); toDispose.push(a2D); toDispose.push(reduced); } if (keepDims) { toDispose.push(res); const newShape = backend_util_exports.expandShapeToKeepDim(res.shape, origAxes); - res = reshape4({inputs: {x: res}, backend: backend2, attrs: {shape: newShape}}); + res = reshape4({ inputs: { x: res }, backend: backend2, attrs: { shape: newShape } }); } toDispose.forEach((t) => backend2.disposeIntermediateTensorInfo(t)); return res; @@ -52361,8 +52361,8 @@ var prodConfig2 = { kernelFunc: prod3 }; var range4 = (args) => { - const {backend: backend2, attrs} = args; - const {start, stop, step: step5, dtype} = attrs; + const { backend: backend2, attrs } = args; + const { start, stop, step: step5, dtype } = attrs; const values = rangeImplCPU(start, stop, step5, dtype); return backend2.makeTensorInfo([values.length], dtype, values); }; @@ -52372,7 +52372,7 @@ var rangeConfig2 = { kernelFunc: range4 }; var RECIPROCAL = `return 1.0 / x;`; -var reciprocal3 = unaryKernelFunc2({opSnippet: RECIPROCAL}); +var reciprocal3 = unaryKernelFunc2({ opSnippet: RECIPROCAL }); var reciprocalConfig2 = { kernelName: Reciprocal, backendName: "webgl", @@ -52392,7 +52392,7 @@ var RELU_PACKED = ` return result; `; -var relu3 = unaryKernelFunc2({opSnippet: RELU3, packedOpSnippet: RELU_PACKED}); +var relu3 = unaryKernelFunc2({ opSnippet: RELU3, packedOpSnippet: RELU_PACKED }); var reluConfig2 = { kernelName: Relu, backendName: "webgl", @@ -52412,7 +52412,7 @@ var RELU6_PACKED = ` return result; `; -var relu63 = unaryKernelFunc2({opSnippet: RELU63, packedOpSnippet: RELU6_PACKED}); +var relu63 = unaryKernelFunc2({ opSnippet: RELU63, packedOpSnippet: RELU6_PACKED }); var relu6Config2 = { kernelName: Relu6, backendName: "webgl", @@ -52577,9 +52577,9 @@ var ResizeBilinearPackedProgram = class { } }; function resizeBilinear3(args) { - const {inputs, backend: backend2, attrs} = args; - const {images} = inputs; - const {alignCorners, halfPixelCenters, size} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { images } = inputs; + const { alignCorners, halfPixelCenters, size } = attrs; const [newHeight, newWidth] = size; const program = env().getBool("WEBGL_PACK_IMAGE_OPERATIONS") ? new ResizeBilinearPackedProgram(images.shape, newHeight, newWidth, alignCorners, halfPixelCenters) : new ResizeBilinearProgram(images.shape, newHeight, newWidth, alignCorners, halfPixelCenters); return backend2.runWebGLProgram(program, [images], "float32"); @@ -52695,9 +52695,9 @@ var ResizeBilinearBackpropProgram = class { } }; function resizeBilinearGrad2(args) { - const {inputs, backend: backend2, attrs} = args; - const {images, dy} = inputs; - const {alignCorners} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { images, dy } = inputs; + const { alignCorners } = attrs; const program = new ResizeBilinearBackpropProgram(dy.shape, images.shape, alignCorners); return backend2.runWebGLProgram(program, [dy], dy.dtype); } @@ -52820,9 +52820,9 @@ var ResizeNearestNeighborPackedProgram = class { } }; function resizeNearestNeighbor3(args) { - const {inputs, backend: backend2, attrs} = args; - const {images} = inputs; - const {alignCorners, halfPixelCenters, size} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { images } = inputs; + const { alignCorners, halfPixelCenters, size } = attrs; const [newHeight, newWidth] = size; const program = env().getBool("WEBGL_PACK_IMAGE_OPERATIONS") ? new ResizeNearestNeighborPackedProgram(images.shape, newHeight, newWidth, alignCorners, halfPixelCenters) : new ResizeNearestNeighborProgram(images.shape, newHeight, newWidth, alignCorners, halfPixelCenters); return backend2.runWebGLProgram(program, [images], images.dtype); @@ -52927,9 +52927,9 @@ var ResizeNearestNeigborBackpropProgram = class { } }; function resizeNearestNeighborGrad2(args) { - const {inputs, backend: backend2, attrs} = args; - const {images, dy} = inputs; - const {alignCorners} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { images, dy } = inputs; + const { alignCorners } = attrs; const program = new ResizeNearestNeigborBackpropProgram(dy.shape, images.shape, alignCorners); return backend2.runWebGLProgram(program, [dy], dy.dtype); } @@ -53050,13 +53050,13 @@ var ReversePackedProgram = class { } }; function reverse3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {dims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { dims } = attrs; const xRank = x.shape.length; const $dims = util_exports.parseAxisParam(dims, x.shape); if (xRank === 0) { - return identity3({inputs: {x}, backend: backend2}); + return identity3({ inputs: { x }, backend: backend2 }); } const program = env().getBool("WEBGL_PACK_ARRAY_OPERATIONS") ? new ReversePackedProgram(x.shape, $dims) : new ReverseProgram(x.shape, $dims); return backend2.runWebGLProgram(program, [x], x.dtype); @@ -53113,9 +53113,9 @@ var RotateProgram = class { var rotateWithOffsetConfig2 = { kernelName: RotateWithOffset, backendName: "webgl", - kernelFunc: ({inputs, attrs, backend: backend2}) => { - const {image: image3} = inputs; - const {radians, fillValue, center} = attrs; + kernelFunc: ({ inputs, attrs, backend: backend2 }) => { + const { image: image3 } = inputs; + const { radians, fillValue, center } = attrs; const webglBackend = backend2; const program = new RotateProgram(image3.shape, fillValue); const [centerX, centerY] = backend_util_exports.getImageCenter(center, image3.shape[1], image3.shape[2]); @@ -53140,14 +53140,14 @@ var ROUND = ` } } `; -var round4 = unaryKernelFunc2({opSnippet: ROUND}); +var round4 = unaryKernelFunc2({ opSnippet: ROUND }); var roundConfig2 = { kernelName: Round, backendName: "webgl", kernelFunc: round4 }; var RSQRT = `return inversesqrt(x);`; -var rsqrt3 = unaryKernelFunc2({opSnippet: RSQRT, cpuKernelImpl: rsqrtImplCPU}); +var rsqrt3 = unaryKernelFunc2({ opSnippet: RSQRT, cpuKernelImpl: rsqrtImplCPU }); var rsqrtConfig2 = { kernelName: Rsqrt, backendName: "webgl", @@ -53198,20 +53198,20 @@ var ScatterProgram = class { } }; function scatterNd2(args) { - const {inputs, backend: backend2, attrs} = args; - const {indices, updates} = inputs; - const {shape} = attrs; - const {sliceRank, numUpdates, sliceSize, strides, outputSize} = backend_util_exports.calculateShapes(updates, indices, shape); + const { inputs, backend: backend2, attrs } = args; + const { indices, updates } = inputs; + const { shape } = attrs; + const { sliceRank, numUpdates, sliceSize, strides, outputSize } = backend_util_exports.calculateShapes(updates, indices, shape); const flattenShape = [outputSize / sliceSize, sliceSize]; if (outputSize === 0) { return backend2.makeTensorInfo(shape, indices.dtype); } - const flattenIndices = reshape4({inputs: {x: indices}, backend: backend2, attrs: {shape: [numUpdates, sliceRank]}}); - const flattenX = reshape4({inputs: {x: updates}, backend: backend2, attrs: {shape: [numUpdates, sliceSize]}}); + const flattenIndices = reshape4({ inputs: { x: indices }, backend: backend2, attrs: { shape: [numUpdates, sliceRank] } }); + const flattenX = reshape4({ inputs: { x: updates }, backend: backend2, attrs: { shape: [numUpdates, sliceSize] } }); const defaultValue = backend2.makeTensorInfo([], "float32", new Float32Array([0])); const program = new ScatterProgram(numUpdates, sliceRank, flattenIndices.shape.length, flattenX.shape.length, strides, flattenShape); const res = backend2.runWebGLProgram(program, [flattenX, flattenIndices, defaultValue], flattenX.dtype); - const reshaped = reshape4({inputs: {x: res}, backend: backend2, attrs: {shape}}); + const reshaped = reshape4({ inputs: { x: res }, backend: backend2, attrs: { shape } }); backend2.disposeIntermediateTensorInfo(flattenIndices); backend2.disposeIntermediateTensorInfo(flattenX); backend2.disposeIntermediateTensorInfo(res); @@ -53263,8 +53263,8 @@ var SelectProgram = class { } }; function select2(args) { - const {inputs, backend: backend2} = args; - const {condition, t, e} = inputs; + const { inputs, backend: backend2 } = args; + const { condition, t, e } = inputs; const program = new SelectProgram(condition.shape.length, t.shape, t.shape.length); return backend2.runWebGLProgram(program, [condition, t, e], upcastType(t.dtype, e.dtype)); } @@ -53280,14 +53280,14 @@ var SELU = ` float scale = ${backend_util_exports.SELU_SCALE}; return (x >= 0.0) ? scale * x : scaleAlpha * (exp(x) - 1.0); `; -var selu3 = unaryKernelFunc2({opSnippet: SELU}); +var selu3 = unaryKernelFunc2({ opSnippet: SELU }); var seluConfig2 = { kernelName: Selu, backendName: "webgl", kernelFunc: selu3 }; var SIGMOID3 = `return 1.0 / (1.0 + exp(-1.0 * x));`; -var sigmoid3 = unaryKernelFunc2({opSnippet: SIGMOID3}); +var sigmoid3 = unaryKernelFunc2({ opSnippet: SIGMOID3 }); var sigmoidConfig2 = { kernelName: Sigmoid, backendName: "webgl", @@ -53297,7 +53297,7 @@ var SIGN = ` if (isnan(x)) { return 0.0; } return sign(x); `; -var sign3 = unaryKernelFunc2({opSnippet: SIGN}); +var sign3 = unaryKernelFunc2({ opSnippet: SIGN }); var signConfig2 = { kernelName: Sign, backendName: "webgl", @@ -53306,7 +53306,7 @@ var signConfig2 = { var SIN = CHECK_NAN_SNIPPET_UNARY + ` return sin(x); `; -var sin3 = unaryKernelFunc2({opSnippet: SIN}); +var sin3 = unaryKernelFunc2({ opSnippet: SIN }); var sinConfig2 = { kernelName: Sin, backendName: "webgl", @@ -53316,7 +53316,7 @@ var SINH = ` float e2x = exp(x); return (e2x - 1.0 / e2x) / 2.0; `; -var sinh3 = unaryKernelFunc2({opSnippet: SINH}); +var sinh3 = unaryKernelFunc2({ opSnippet: SINH }); var sinhConfig2 = { kernelName: Sinh, backendName: "webgl", @@ -53343,16 +53343,16 @@ var SOFTPLUS = ` } return result; `; -var softplus3 = unaryKernelFunc2({opSnippet: SOFTPLUS}); +var softplus3 = unaryKernelFunc2({ opSnippet: SOFTPLUS }); var softplusConfig2 = { kernelName: Softplus, backendName: "webgl", kernelFunc: softplus3 }; var spaceToBatchND3 = (args) => { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {blockShape, paddings} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { blockShape, paddings } = attrs; util_exports.assert(x.shape.length <= 4, () => "spaceToBatchND for rank > 4 with a WebGL backend not implemented yet"); const prod5 = blockShape.reduce((a, b) => a * b); const completePaddings = [[0, 0]]; @@ -53362,20 +53362,20 @@ var spaceToBatchND3 = (args) => { } const toDispose = []; const paddedX = padV22({ - inputs: {x}, + inputs: { x }, backend: backend2, - attrs: {paddings: completePaddings, constantValue: 0} + attrs: { paddings: completePaddings, constantValue: 0 } }); const reshapedPaddedShape = backend_util_exports.getReshaped(paddedX.shape, blockShape, prod5, false); const permutedReshapedPaddedPermutation = backend_util_exports.getPermuted(reshapedPaddedShape.length, blockShape.length, false); const flattenShape = backend_util_exports.getReshapedPermuted(paddedX.shape, blockShape, prod5, false); - const reshapedPaddedX = reshape4({inputs: {x: paddedX}, backend: backend2, attrs: {shape: reshapedPaddedShape}}); + const reshapedPaddedX = reshape4({ inputs: { x: paddedX }, backend: backend2, attrs: { shape: reshapedPaddedShape } }); const paddedXT = transpose3({ - inputs: {x: reshapedPaddedX}, + inputs: { x: reshapedPaddedX }, backend: backend2, - attrs: {perm: permutedReshapedPaddedPermutation} + attrs: { perm: permutedReshapedPaddedPermutation } }); - const result = reshape4({inputs: {x: paddedXT}, backend: backend2, attrs: {shape: flattenShape}}); + const result = reshape4({ inputs: { x: paddedXT }, backend: backend2, attrs: { shape: flattenShape } }); toDispose.push(paddedX); toDispose.push(reshapedPaddedX); toDispose.push(paddedXT); @@ -53388,8 +53388,8 @@ var spaceToBatchNDConfig2 = { kernelFunc: spaceToBatchND3 }; function sparseFillEmptyRows3(args) { - const {inputs, backend: backend2} = args; - const {indices, values, denseShape, defaultValue} = inputs; + const { inputs, backend: backend2 } = args; + const { indices, values, denseShape, defaultValue } = inputs; if (denseShape.shape.length !== 1) { throw new Error(`Dense shape must be a vector, saw: ${denseShape.shape}`); @@ -53424,8 +53424,8 @@ var sparseFillEmptyRowsConfig2 = { kernelFunc: sparseFillEmptyRows3 }; function sparseReshape3(args) { - const {inputs, backend: backend2} = args; - const {inputIndices, inputShape, newShape} = inputs; + const { inputs, backend: backend2 } = args; + const { inputIndices, inputShape, newShape } = inputs; if (inputIndices.shape.length !== 2) { throw new Error(`Input indices should be a matrix but received shape ${inputIndices.shape}`); } @@ -53450,14 +53450,14 @@ var sparseReshapeConfig2 = { kernelFunc: sparseReshape3 }; function sparseToDense3(args) { - const {inputs, backend: backend2, attrs} = args; - const {sparseIndices, sparseValues, defaultValue} = inputs; - const {outputShape} = attrs; - const {sliceRank, numUpdates, strides, outputSize} = backend_util_exports.calculateShapes(sparseValues, sparseIndices, outputShape); + const { inputs, backend: backend2, attrs } = args; + const { sparseIndices, sparseValues, defaultValue } = inputs; + const { outputShape } = attrs; + const { sliceRank, numUpdates, strides, outputSize } = backend_util_exports.calculateShapes(sparseValues, sparseIndices, outputShape); const sumDupeIndices = false; const program = new ScatterProgram(numUpdates, sliceRank, sparseIndices.shape.length, sparseValues.shape.length, strides, [outputSize, 1], sumDupeIndices); const res = backend2.runWebGLProgram(program, [sparseValues, sparseIndices, defaultValue], sparseValues.dtype); - const reshaped = reshape4({inputs: {x: res}, backend: backend2, attrs: {shape: outputShape}}); + const reshaped = reshape4({ inputs: { x: res }, backend: backend2, attrs: { shape: outputShape } }); backend2.disposeIntermediateTensorInfo(res); return reshaped; } @@ -53467,9 +53467,9 @@ var sparseToDenseConfig2 = { kernelFunc: sparseToDense3 }; function splitV2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {numOrSizeSplits, axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { numOrSizeSplits, axis } = attrs; const $axis = util_exports.parseAxisParam(axis, x.shape)[0]; const splitSizes = backend_util_exports.prepareSplitSize(x, numOrSizeSplits, $axis); const xRank = x.shape.length; @@ -53478,7 +53478,7 @@ function splitV2(args) { return splitSizes.map((s) => { const sliceSize = [...size]; sliceSize[$axis] = s; - const sliceT = slice3({inputs: {x}, backend: backend2, attrs: {begin, size: sliceSize}}); + const sliceT = slice3({ inputs: { x }, backend: backend2, attrs: { begin, size: sliceSize } }); begin[$axis] += s; return sliceT; }); @@ -53489,28 +53489,28 @@ var splitVConfig2 = { kernelFunc: splitV2 }; var SQRT = `return sqrt(x);`; -var sqrt3 = unaryKernelFunc2({opSnippet: SQRT}); +var sqrt3 = unaryKernelFunc2({ opSnippet: SQRT }); var sqrtConfig2 = { kernelName: Sqrt, backendName: "webgl", kernelFunc: sqrt3 }; var SQUARE = `return x * x;`; -var square3 = unaryKernelFunc2({opSnippet: SQUARE}); +var square3 = unaryKernelFunc2({ opSnippet: SQUARE }); var squareConfig2 = { kernelName: Square, backendName: "webgl", kernelFunc: square3 }; var SQUARED_DIFFERENCE = "return (a - b) * (a - b);"; -var squaredDifference3 = binaryKernelFunc2({opSnippet: SQUARED_DIFFERENCE, packedOpSnippet: SQUARED_DIFFERENCE}); +var squaredDifference3 = binaryKernelFunc2({ opSnippet: SQUARED_DIFFERENCE, packedOpSnippet: SQUARED_DIFFERENCE }); var squaredDifferenceConfig2 = { kernelName: SquaredDifference, backendName: "webgl", kernelFunc: squaredDifference3 }; -function step3({inputs, attrs, backend: backend2}) { - const {x} = inputs; +function step3({ inputs, attrs, backend: backend2 }) { + const { x } = inputs; const opSnippet = CHECK_NAN_SNIPPET + ` return x > 0.0 ? 1.0 : float(${attrs.alpha}); `; @@ -53551,15 +53551,15 @@ var StridedSliceProgram = class { } }; function stridedSlice3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask} = attrs; - const {nonStrided, $begin, $strides, size, newShape, outShape} = slice_util_exports.sliceInfo(x.shape, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask); - const $x = reshape4({inputs: {x}, backend: backend2, attrs: {shape: newShape}}); + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask } = attrs; + const { nonStrided, $begin, $strides, size, newShape, outShape } = slice_util_exports.sliceInfo(x.shape, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask); + const $x = reshape4({ inputs: { x }, backend: backend2, attrs: { shape: newShape } }); let result; if (nonStrided) { - const sliced = slice3({inputs: {x: $x}, backend: backend2, attrs: {begin: $begin, size}}); - result = reshape4({inputs: {x: sliced}, backend: backend2, attrs: {shape: outShape}}); + const sliced = slice3({ inputs: { x: $x }, backend: backend2, attrs: { begin: $begin, size } }); + result = reshape4({ inputs: { x: sliced }, backend: backend2, attrs: { shape: outShape } }); backend2.disposeIntermediateTensorInfo(sliced); } else if (outShape.some((axis) => axis === 0)) { result = backend2.makeTensorInfo(outShape, x.dtype, []); @@ -53576,7 +53576,7 @@ function stridedSlice3(args) { result = backend2.runWebGLProgram(program, [$x], $x.dtype); } } - const resultReshaped = reshape4({inputs: {x: result}, backend: backend2, attrs: {shape: outShape}}); + const resultReshaped = reshape4({ inputs: { x: result }, backend: backend2, attrs: { shape: outShape } }); backend2.disposeIntermediateTensorInfo($x); backend2.disposeIntermediateTensorInfo(result); return resultReshaped; @@ -53587,7 +53587,7 @@ var stridedSliceConfig2 = { kernelFunc: stridedSlice3 }; var TAN = `return tan(x);`; -var tan3 = unaryKernelFunc2({opSnippet: TAN}); +var tan3 = unaryKernelFunc2({ opSnippet: TAN }); var tanConfig2 = { kernelName: Tan, backendName: "webgl", @@ -53597,7 +53597,7 @@ var TANH = ` float e2x = exp(-2.0 * abs(x)); return sign(x) * (1.0 - e2x) / (1.0 + e2x); `; -var tanh4 = unaryKernelFunc2({opSnippet: TANH}); +var tanh4 = unaryKernelFunc2({ opSnippet: TANH }); var tanhConfig2 = { kernelName: Tanh, backendName: "webgl", @@ -53638,9 +53638,9 @@ function getSourceCoords3(aShape) { return sourceCoords.join(); } function tile4(params) { - const {inputs, backend: backend2, attrs} = params; - const {x} = inputs; - const {reps} = attrs; + const { inputs, backend: backend2, attrs } = params; + const { x } = inputs; + const { reps } = attrs; if (x.dtype === "string" || x.shape.length > 5) { const data = backend2.readSync(x.dataId); const value = x.dtype === "string" ? data.map((d) => util_exports.decodeString(d)) : data; @@ -53658,9 +53658,9 @@ var tileConfig2 = { kernelFunc: tile4 }; function topK2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {k, sorted} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { k, sorted } = attrs; const xVals = backend2.readSync(x.dataId); const [allTopKVals, allTopKIndices] = topKImplCPU(xVals, x.shape, x.dtype, k, sorted); return [ @@ -53812,9 +53812,9 @@ var TransformProgram = class { } }; function transform3(args) { - const {inputs, backend: backend2, attrs} = args; - const {image: image3, transforms} = inputs; - const {interpolation, fillMode, fillValue, outputShape} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { image: image3, transforms } = inputs; + const { interpolation, fillMode, fillValue, outputShape } = attrs; const [batch, imageHeight, imageWidth, numChannels] = image3.shape; const [outHeight, outWidth] = outputShape != null ? outputShape : [imageHeight, imageWidth]; const outShape = [ @@ -53832,13 +53832,13 @@ var transformConfig2 = { kernelFunc: transform3 }; function unique4(args) { - const {inputs, attrs, backend: backend2} = args; - const {axis} = attrs; - const {x} = inputs; + const { inputs, attrs, backend: backend2 } = args; + const { axis } = attrs; + const { x } = inputs; assertNotComplex2(x, "unique"); console.warn("WARNING: ", "UI might be locked temporarily as data is being downloaded"); const values = backend2.readSync(x.dataId); - const {outputValues, outputShape, indices} = uniqueImplCPU(values, axis, x.shape, x.dtype); + const { outputValues, outputShape, indices } = uniqueImplCPU(values, axis, x.shape, x.dtype); return [ backend2.makeTensorInfo(outputShape, x.dtype, outputValues), backend2.makeTensorInfo([indices.length], "int32", indices) @@ -53850,9 +53850,9 @@ var uniqueConfig2 = { kernelFunc: unique4 }; function unpack2(args) { - const {inputs, backend: backend2, attrs} = args; - const {value} = inputs; - let {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { value } = inputs; + let { axis } = attrs; if (axis < 0) { axis += value.shape.length; } @@ -53873,8 +53873,8 @@ function unpack2(args) { const res = new Array(num); for (let i = 0; i < res.length; i++) { begin[axis] = i; - const sliced = slice3({inputs: {x}, backend: backend2, attrs: {begin, size}}); - const reshaped = reshape4({inputs: {x: sliced}, backend: backend2, attrs: {shape: outShape}}); + const sliced = slice3({ inputs: { x }, backend: backend2, attrs: { begin, size } }); + const reshaped = reshape4({ inputs: { x: sliced }, backend: backend2, attrs: { shape: outShape } }); res[i] = reshaped; toDispose.push(sliced); } @@ -54018,29 +54018,29 @@ var SegmentOpProgram = class { } }; function unsortedSegmentSum3(args) { - const {inputs, backend: backend2, attrs} = args; - const {x, segmentIds} = inputs; - const {numSegments} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x, segmentIds } = inputs; + const { numSegments } = attrs; const xRank = x.shape.length; const toDispose = []; let axis = 0; const permutation = backend_util_exports.getAxesPermutation([axis], xRank); let permutedX = x; if (permutation != null) { - permutedX = transpose3({inputs: {x}, backend: backend2, attrs: {perm: permutation}}); + permutedX = transpose3({ inputs: { x }, backend: backend2, attrs: { perm: permutation } }); toDispose.push(permutedX); axis = backend_util_exports.getInnerMostAxes(1, xRank)[0]; } const outShape = backend_util_exports.segment_util.computeOutShape(permutedX.shape, axis, numSegments); const inSize = util_exports.sizeFromShape([permutedX.shape[axis]]); - const a2D = reshape4({inputs: {x: permutedX}, backend: backend2, attrs: {shape: [-1, inSize]}}); + const a2D = reshape4({ inputs: { x: permutedX }, backend: backend2, attrs: { shape: [-1, inSize] } }); toDispose.push(a2D); const outputDType = sumOutType(x.dtype); const segOpCompute = (x2, segOpType, segmentIds2, dtype, numSegments2) => { const batchSize = x2.shape[0]; const inSize2 = x2.shape[1]; const windowSize = backend_util_exports.segment_util.segOpComputeOptimalWindowSize(inSize2, numSegments2); - const segOpInfo = {windowSize, inSize: inSize2, batchSize, numSegments: numSegments2}; + const segOpInfo = { windowSize, inSize: inSize2, batchSize, numSegments: numSegments2 }; const program = new SegmentOpProgram(segOpInfo, segOpType); const output = backend2.compileAndRun(program, [x2, segmentIds2], dtype); toDispose.push(output); @@ -54049,12 +54049,12 @@ function unsortedSegmentSum3(args) { } const rangeInfo = range4({ backend: backend2, - attrs: {start: 0, stop: numSegments2, step: 1, dtype: "float32"} + attrs: { start: 0, stop: numSegments2, step: 1, dtype: "float32" } }); const tileInfo = tile4({ - inputs: {x: rangeInfo}, + inputs: { x: rangeInfo }, backend: backend2, - attrs: {reps: [inSize2 / windowSize]} + attrs: { reps: [inSize2 / windowSize] } }); toDispose.push(rangeInfo); toDispose.push(tileInfo); @@ -54062,12 +54062,12 @@ function unsortedSegmentSum3(args) { return result2; }; const segOpResult = segOpCompute(a2D, "unsortedSegmentSum", segmentIds, outputDType, numSegments); - const reshaped = reshape4({inputs: {x: segOpResult}, backend: backend2, attrs: {shape: outShape}}); + const reshaped = reshape4({ inputs: { x: segOpResult }, backend: backend2, attrs: { shape: outShape } }); let result = reshaped; if (permutation != null) { toDispose.push(reshaped); const perm = backend_util_exports.getUndoAxesPermutation(permutation); - result = transpose3({inputs: {x: result}, backend: backend2, attrs: {perm}}); + result = transpose3({ inputs: { x: result }, backend: backend2, attrs: { perm } }); } toDispose.forEach((t) => backend2.disposeIntermediateTensorInfo(t)); return result; @@ -54284,12 +54284,12 @@ function setup(backend2) { ]); } function fusedBatchMatMul(args) { - const {inputs, backend: backend2, attrs} = args; - const {a, b, bias, preluActivationWeights} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { a, b, bias, preluActivationWeights } = inputs; if (a.dtype !== "float32" || b.dtype !== "float32") { throw new Error(`_FusedMatMul for non non-float32 tensors not yet supported.`); } - const {transposeA, transposeB, activation: activation2, leakyreluAlpha} = attrs; + const { transposeA, transposeB, activation: activation2, leakyreluAlpha } = attrs; const aId = backend2.dataIdMap.get(a.dataId).id; const bId = backend2.dataIdMap.get(b.dataId).id; let biasId = 0; @@ -54327,7 +54327,7 @@ function createUnaryKernelConfig(kernelName) { wasmFunc9 = backend2.wasm.cwrap(kernelName, null, ["number", "number"]); } function kernelFunc3(args) { - const {backend: backend2, inputs: {x}} = args; + const { backend: backend2, inputs: { x } } = args; const xId = backend2.dataIdMap.get(x.dataId).id; const out = backend2.makeOutput(x.shape, x.dtype); const outId = backend2.dataIdMap.get(out.dataId).id; @@ -54337,7 +54337,7 @@ function createUnaryKernelConfig(kernelName) { wasmFunc9(xId, outId); return out; } - return {kernelName, backendName: "wasm", setupFunc: setupFunc3, kernelFunc: kernelFunc3}; + return { kernelName, backendName: "wasm", setupFunc: setupFunc3, kernelFunc: kernelFunc3 }; } var absConfig3 = createUnaryKernelConfig(Abs); function createBinaryKernelConfig(kernelName, supportsFullBroadcast17, dtype) { @@ -54355,8 +54355,8 @@ function createBinaryKernelConfig(kernelName, supportsFullBroadcast17, dtype) { ]); } function kernelFunc3(args) { - const {backend: backend2, inputs} = args; - const {a, b} = inputs; + const { backend: backend2, inputs } = args; + const { a, b } = inputs; const aId = backend2.dataIdMap.get(a.dataId).id; const bId = backend2.dataIdMap.get(b.dataId).id; const outputType = dtype != null ? dtype : a.dtype; @@ -54384,7 +54384,7 @@ function createBinaryKernelConfig(kernelName, supportsFullBroadcast17, dtype) { throw new Error(`Broadcasting along outer dims is not yet supported for ${a.dtype} ${kernelName}.`); } } - return {kernelName, backendName: "wasm", setupFunc: setupFunc3, kernelFunc: kernelFunc3}; + return { kernelName, backendName: "wasm", setupFunc: setupFunc3, kernelFunc: kernelFunc3 }; } var supportsFullBroadcast = true; var addConfig3 = createBinaryKernelConfig(Add, supportsFullBroadcast); @@ -54398,7 +54398,7 @@ function setupFunc(backend2) { ]); } function addn(args) { - const {inputs, backend: backend2} = args; + const { inputs, backend: backend2 } = args; const out = backend2.makeOutput(inputs[0].shape, inputs[0].dtype); if (util_exports.sizeFromShape(out.shape) === 0) { return out; @@ -54416,7 +54416,7 @@ var addNConfig3 = { kernelFunc: addn }; function identity4(args) { - const {inputs: {x}, backend: backend2} = args; + const { inputs: { x }, backend: backend2 } = args; const out = backend2.makeOutput(x.shape, x.dtype); const inVals = backend2.typedArrayFromHeap(x); const outVals = backend2.typedArrayFromHeap(out); @@ -54441,7 +54441,7 @@ function setup2(backend2) { ]); } function transpose4(args) { - const {inputs, backend: backend2, attrs} = args; + const { inputs, backend: backend2, attrs } = args; const [reducedShape, perm] = removeOneSizeDims(inputs.x.shape, attrs.perm); let permIsNoOp = true; for (let i = 0; i < perm.length; i++) { @@ -54456,7 +54456,7 @@ function transpose4(args) { dtype: inputs.x.dtype }; if (permIsNoOp) { - const cloned = identity4({inputs, backend: backend2}); + const cloned = identity4({ inputs, backend: backend2 }); cloned.shape = outShape; return cloned; } @@ -54517,27 +54517,27 @@ function permuteAxesAndTranspose(x, axis, backend2) { newShape[i] = xShape[permutedAxes[i]]; } axes = backend_util_exports.getInnerMostAxes(axes.length, xRank); - xTransposed = transpose4({inputs: {x}, attrs: {perm: permutedAxes}, backend: backend2}); + xTransposed = transpose4({ inputs: { x }, attrs: { perm: permutedAxes }, backend: backend2 }); const xId = backend2.dataIdMap.get(x.dataId).id; const transposedId = backend2.dataIdMap.get(xTransposed.dataId).id; if (transposedId !== xId) { inputWasTransposed = true; } } - return {transposed: xTransposed, originalAxes, axes, inputWasTransposed}; + return { transposed: xTransposed, originalAxes, axes, inputWasTransposed }; } var wasmAll; function setup3(backend2) { wasmAll = backend2.wasm.cwrap(All, null, ["number, number, number"]); } function all4(args) { - const {backend: backend2, inputs, attrs} = args; - const {axis, keepDims} = attrs; - const {x} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { axis, keepDims } = attrs; + const { x } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; let inputId = xId; let input2 = x; - const {transposed, axes, originalAxes, inputWasTransposed} = permuteAxesAndTranspose(x, axis, backend2); + const { transposed, axes, originalAxes, inputWasTransposed } = permuteAxesAndTranspose(x, axis, backend2); if (inputWasTransposed) { const transposedId = backend2.dataIdMap.get(transposed.dataId).id; input2 = transposed; @@ -54572,13 +54572,13 @@ function setup4(backend2) { wasmAny = backend2.wasm.cwrap(Any, null, ["number, number, number"]); } function any4(args) { - const {backend: backend2, inputs, attrs} = args; - const {axis, keepDims} = attrs; - const {x} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { axis, keepDims } = attrs; + const { x } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; let inputId = xId; let input2 = x; - const {transposed, axes, originalAxes, inputWasTransposed} = permuteAxesAndTranspose(x, axis, backend2); + const { transposed, axes, originalAxes, inputWasTransposed } = permuteAxesAndTranspose(x, axis, backend2); if (inputWasTransposed) { const transposedId = backend2.dataIdMap.get(transposed.dataId).id; input2 = transposed; @@ -54619,13 +54619,13 @@ function setup5(backend2) { ]); } function argmax(args) { - const {backend: backend2, inputs, attrs} = args; - const {axis} = attrs; - const {x} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { axis } = attrs; + const { x } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; let inputId = xId; let input2 = x; - const {transposed, axes, inputWasTransposed} = permuteAxesAndTranspose(x, axis, backend2); + const { transposed, axes, inputWasTransposed } = permuteAxesAndTranspose(x, axis, backend2); if (inputWasTransposed) { const transposedId = backend2.dataIdMap.get(transposed.dataId).id; if (transposedId !== xId) { @@ -54670,10 +54670,10 @@ function setup6(backend2) { ]); } function avgPool4(args) { - const {inputs, attrs, backend: backend2} = args; + const { inputs, attrs, backend: backend2 } = args; const x = inputs.x; const xId = backend2.dataIdMap.get(x.dataId).id; - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; const convInfo = backend_util_exports.computePool2DInfo(x.shape, filterSize, strides, 1, pad3, dimRoundingMode); const filterHeight = convInfo.filterHeight; const filterWidth = convInfo.filterWidth; @@ -54702,14 +54702,14 @@ var avgPoolConfig3 = { kernelFunc: avgPool4 }; function reshape5(args) { - const {inputs, attrs} = args; - const {x} = inputs; - const {shape} = attrs; + const { inputs, attrs } = args; + const { x } = inputs; + const { shape } = attrs; const xSize = util_exports.sizeFromShape(x.shape); const $shape = util_exports.inferFromImplicitShape(shape, xSize); util_exports.assert(xSize === util_exports.sizeFromShape($shape), () => `new shape: ${$shape}, old shape: ${x.shape}. New shape and old shape must have the same number of elements.`); args.backend.incRef(x.dataId); - return {dataId: x.dataId, shape: $shape, dtype: x.dtype}; + return { dataId: x.dataId, shape: $shape, dtype: x.dtype }; } var reshapeConfig3 = { kernelName: Reshape, @@ -54731,9 +54731,9 @@ function setup7(backend2) { ]); } function batchMatMul3(args) { - const {inputs, backend: backend2, attrs} = args; - const {a, b} = inputs; - const {transposeA, transposeB} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { a, b } = inputs; + const { transposeA, transposeB } = attrs; if (a.dtype !== "float32" || b.dtype !== "float32") { throw new Error(`BatchMatMul for non non-float32 tensors not yet supported.`); } @@ -54754,8 +54754,8 @@ function batchMatMul3(args) { util_exports.assert(innerShapeA === innerShapeB, () => `Error in matMul: inner shapes (${innerShapeA}) and (${innerShapeB}) of Tensors with shapes ${a.shape} and ${b.shape} and transposeA=${transposeA} and transposeB=${transposeB} must match.`); const a3dShape = transposeA ? [batchDimA, innerShapeA, outerShapeA] : [batchDimA, outerShapeA, innerShapeA]; const b3dShape = transposeB ? [batchDimB, outerShapeB, innerShapeB] : [batchDimB, innerShapeB, outerShapeB]; - const a3d = reshape5({inputs: {x: a}, backend: backend2, attrs: {shape: a3dShape}}); - const b3d = reshape5({inputs: {x: b}, backend: backend2, attrs: {shape: b3dShape}}); + const a3d = reshape5({ inputs: { x: a }, backend: backend2, attrs: { shape: a3dShape } }); + const b3d = reshape5({ inputs: { x: b }, backend: backend2, attrs: { shape: b3dShape } }); const a3dId = backend2.dataIdMap.get(a3d.dataId).id; const b3dId = backend2.dataIdMap.get(b3d.dataId).id; const leftDim = transposeA ? a3d.shape[2] : a3d.shape[1]; @@ -54778,7 +54778,7 @@ var batchMatMulConfig3 = { kernelFunc: batchMatMul3 }; function cast5(args) { - const {inputs: {x}, attrs: {dtype}, backend: backend2} = args; + const { inputs: { x }, attrs: { dtype }, backend: backend2 } = args; const out = backend2.makeOutput(x.shape, dtype); const inVals = backend2.typedArrayFromHeap(x); const outVals = backend2.typedArrayFromHeap(out); @@ -54801,9 +54801,9 @@ function setup8(backend2) { ]); } function clip2(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {clipValueMin, clipValueMax} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { clipValueMin, clipValueMax } = attrs; const xId = backend2.dataIdMap.get(x.dataId).id; const out = backend2.makeOutput(x.shape, x.dtype); const outId = backend2.dataIdMap.get(out.dataId).id; @@ -54884,12 +54884,12 @@ function sliceImpl2(vals, begin, size, shape, dtype) { return outBuf.values; } function concat4(args) { - const {inputs, backend: backend2} = args; + const { inputs, backend: backend2 } = args; const axis = util_exports.parseAxisParam(args.attrs.axis, inputs[0].shape)[0]; let outShape = backend_util_exports.computeOutShape(inputs.map((t) => t.shape), axis); const $inputs = inputs.filter((t) => util_exports.sizeFromShape(t.shape) > 0); if ($inputs.length === 1) { - return identity4({inputs: {x: $inputs[0]}, backend: backend2}); + return identity4({ inputs: { x: $inputs[0] }, backend: backend2 }); } const out = backend2.makeOutput(outShape, inputs[0].dtype); if (util_exports.sizeFromShape(outShape) === 0) { @@ -54901,10 +54901,10 @@ function concat4(args) { const inputs2D = $inputs.map((t) => { const innerSize = util_exports.sizeFromShape(t.shape.slice(axis)); const shape = [-1, innerSize]; - return reshape5({inputs: {x: t}, backend: backend2, attrs: {shape}}); + return reshape5({ inputs: { x: t }, backend: backend2, attrs: { shape } }); }); const inputsValShapes = inputs2D.map((t) => { - return {vals: backend2.readSync(t.dataId), shape: t.shape}; + return { vals: backend2.readSync(t.dataId), shape: t.shape }; }); outShape = backend_util_exports.computeOutShape(inputs2D.map((t) => t.shape), 1); const simplyConcat = inputs2D[0].shape[0] === 1; @@ -54967,11 +54967,11 @@ function setup9(backend2) { ]); } function conv2d5(args) { - const {inputs, attrs, backend: backend2} = args; - const {x, filter} = inputs; + const { inputs, attrs, backend: backend2 } = args; + const { x, filter } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; const filterId = backend2.dataIdMap.get(filter.dataId).id; - const {strides, dilations, pad: pad3, dimRoundingMode, dataFormat} = attrs; + const { strides, dilations, pad: pad3, dimRoundingMode, dataFormat } = attrs; const $dataFormat = backend_util_exports.convertConv2DDataFormat(dataFormat); const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad3, dimRoundingMode, false, $dataFormat); const filterHeight = convInfo.filterHeight; @@ -55034,13 +55034,13 @@ function setup10(backend2) { ]); } function conv2DBackpropInput4(args) { - const {backend: backend2, inputs, attrs} = args; - const {dy, filter} = inputs; - const {strides, pad: pad3, dataFormat, dimRoundingMode, inputShape} = attrs; + const { backend: backend2, inputs, attrs } = args; + const { dy, filter } = inputs; + const { strides, pad: pad3, dataFormat, dimRoundingMode, inputShape } = attrs; const dilations = 1; const $dataFormat = backend_util_exports.convertConv2DDataFormat(dataFormat); const convInfo = backend_util_exports.computeConv2DInfo(inputShape, filter.shape, strides, dilations, pad3, dimRoundingMode, false, $dataFormat); - const {batchSize, filterHeight, filterWidth, inChannels, inHeight, inWidth, outChannels, outHeight, outWidth, strideHeight, strideWidth} = convInfo; + const { batchSize, filterHeight, filterWidth, inChannels, inHeight, inWidth, outChannels, outHeight, outWidth, strideHeight, strideWidth } = convInfo; const topPad = filterHeight - 1 - convInfo.padInfo.top; const leftPad = filterWidth - 1 - convInfo.padInfo.left; const isChannelsLast = convInfo.dataFormat === "channelsLast"; @@ -55090,16 +55090,16 @@ function setup11(backend2) { ]); } function cropAndResize4(args) { - const {backend: backend2, inputs, attrs} = args; - const {method, extrapolationValue, cropSize} = attrs; - const {image: image3, boxes, boxInd} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { method, extrapolationValue, cropSize } = attrs; + const { image: image3, boxes, boxInd } = inputs; const numBoxes = boxes.shape[0]; const [cropHeight, cropWidth] = cropSize; const outShape = [numBoxes, cropHeight, cropWidth, image3.shape[3]]; let imagesData = backend2.dataIdMap.get(image3.dataId); let castedData; if (image3.dtype !== "float32") { - castedData = cast5({backend: backend2, inputs: {x: image3}, attrs: {dtype: "float32"}}); + castedData = cast5({ backend: backend2, inputs: { x: image3 }, attrs: { dtype: "float32" } }); imagesData = backend2.dataIdMap.get(castedData.dataId); } const imagesId = imagesData.id; @@ -55132,15 +55132,15 @@ function setup12(backend2) { ]); } function cumsum4(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {axis, exclusive, reverse: reverse5} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { axis, exclusive, reverse: reverse5 } = attrs; const xRank = x.shape.length; util_exports.assert(x.dtype === "float32" || x.dtype === "int32", () => `cumsum does not support ${x.dtype} tensors in the WASM backend`); const permutation = backend_util_exports.getAxesPermutation([axis], xRank); let permutedX = x; if (permutation !== null) { - permutedX = transpose4({inputs: {x}, attrs: {perm: permutation}, backend: backend2}); + permutedX = transpose4({ inputs: { x }, attrs: { perm: permutation }, backend: backend2 }); } const permutedAxis = backend_util_exports.getInnerMostAxes(1, xRank)[0]; backend_util_exports.assertAxesAreInnerMostDims("cumsum", [permutedAxis], xRank); @@ -55152,7 +55152,7 @@ function cumsum4(args) { let out = permutedOut; if (permutation !== null) { const undoPermutation = backend_util_exports.getUndoAxesPermutation(permutation); - out = transpose4({inputs: {x: permutedOut}, attrs: {perm: undoPermutation}, backend: backend2}); + out = transpose4({ inputs: { x: permutedOut }, attrs: { perm: undoPermutation }, backend: backend2 }); backend2.disposeData(permutedX.dataId); backend2.disposeData(permutedOut.dataId); } @@ -55179,9 +55179,9 @@ function setup13(backend2) { ]); } function depthToSpace4(args) { - const {backend: backend2, inputs, attrs} = args; - const {x} = inputs; - const {blockSize, dataFormat} = attrs; + const { backend: backend2, inputs, attrs } = args; + const { x } = inputs; + const { blockSize, dataFormat } = attrs; util_exports.assert(blockSize > 1, () => `blockSize should be > 1 for depthToSpace, but was: ${blockSize}`); const batchSize = x.shape[0]; const inputHeight = dataFormat === "NHWC" ? x.shape[1] : x.shape[2]; @@ -55233,11 +55233,11 @@ function setup14(backend2) { ]); } function depthwiseConv2d5(args) { - const {inputs, attrs, backend: backend2} = args; - const {x, filter} = inputs; + const { inputs, attrs, backend: backend2 } = args; + const { x, filter } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; const filterId = backend2.dataIdMap.get(filter.dataId).id; - const {strides, dilations, pad: pad3, dimRoundingMode} = attrs; + const { strides, dilations, pad: pad3, dimRoundingMode } = attrs; const $dilations = dilations == null ? [1, 1] : dilations; const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filter.shape, strides, $dilations, pad3, dimRoundingMode, true); const filterHeight = convInfo.filterHeight; @@ -55271,9 +55271,9 @@ var supportsFullBroadcast2 = false; var equalConfig3 = createBinaryKernelConfig(Equal, supportsFullBroadcast2, "bool"); var expConfig3 = createUnaryKernelConfig(Exp); function expandDims5(args) { - const {inputs, attrs, backend: backend2} = args; - const {input: input2} = inputs; - const {dim} = attrs; + const { inputs, attrs, backend: backend2 } = args; + const { input: input2 } = inputs; + const { dim } = attrs; const inputRank = input2.shape.length; const newShape = input2.shape.slice(); let $dim = dim; @@ -55282,7 +55282,7 @@ function expandDims5(args) { $dim = inputRank + dim + 1; } newShape.splice($dim, 0, 1); - return reshape5({inputs: {x: input2}, backend: backend2, attrs: {shape: newShape}}); + return reshape5({ inputs: { x: input2 }, backend: backend2, attrs: { shape: newShape } }); } var expandDimsConfig3 = { kernelName: ExpandDims, @@ -55290,7 +55290,7 @@ var expandDimsConfig3 = { kernelFunc: expandDims5 }; function fill4(args) { - const {attrs: {shape, value, dtype}, backend: backend2} = args; + const { attrs: { shape, value, dtype }, backend: backend2 } = args; const out = backend2.makeOutput(shape, dtype); const outVals = backend2.typedArrayFromHeap(out); outVals.fill(value); @@ -55313,8 +55313,8 @@ function setup15(backend2) { ]); } function flipLeftRight2(args) { - const {inputs, backend: backend2} = args; - const {image: image3} = inputs; + const { inputs, backend: backend2 } = args; + const { image: image3 } = inputs; const out = backend2.makeOutput(image3.shape, image3.dtype); const imageId = backend2.dataIdMap.get(image3.dataId).id; const outId = backend2.dataIdMap.get(out.dataId).id; @@ -55336,9 +55336,9 @@ function setup16(backend2) { wasmBatchNorm = backend2.wasm.cwrap(FusedBatchNorm, null, ["number", "number", "number", "number", "number", "number", "number"]); } function fusedBatchNorm(args) { - const {backend: backend2, inputs, attrs} = args; - const {varianceEpsilon} = attrs; - const {x, mean: mean4, variance, offset, scale: scale22} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { varianceEpsilon } = attrs; + const { x, mean: mean4, variance, offset, scale: scale22 } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; const meanId = backend2.dataIdMap.get(mean4.dataId).id; const varianceId = backend2.dataIdMap.get(variance.dataId).id; @@ -55387,9 +55387,9 @@ function setup17(backend2) { ]); } function fusedConv2d2(args) { - const {inputs, attrs, backend: backend2} = args; - const {x, filter, bias, preluActivationWeights} = inputs; - const {strides, pad: pad3, dilations, dataFormat, dimRoundingMode, activation: activation2, leakyreluAlpha} = attrs; + const { inputs, attrs, backend: backend2 } = args; + const { x, filter, bias, preluActivationWeights } = inputs; + const { strides, pad: pad3, dilations, dataFormat, dimRoundingMode, activation: activation2, leakyreluAlpha } = attrs; const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad3, dimRoundingMode); const fusedActivation = FusableActivation[activation2]; if (fusedActivation == null) { @@ -55468,9 +55468,9 @@ function setup18(backend2) { ]); } function fusedDepthwiseConv2d(args) { - const {inputs, attrs, backend: backend2} = args; - const {x, filter, bias, preluActivationWeights} = inputs; - const {strides, pad: pad3, dilations, dataFormat, dimRoundingMode, activation: activation2, leakyreluAlpha} = attrs; + const { inputs, attrs, backend: backend2 } = args; + const { x, filter, bias, preluActivationWeights } = inputs; + const { strides, pad: pad3, dilations, dataFormat, dimRoundingMode, activation: activation2, leakyreluAlpha } = attrs; const convInfo = backend_util_exports.computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad3, dimRoundingMode, true); const fusedActivation = FusableActivation[activation2]; if (fusedActivation == null) { @@ -55534,8 +55534,8 @@ function setup19(backend2) { ]); } function gatherNd3(args) { - const {backend: backend2, inputs} = args; - const {params, indices} = inputs; + const { backend: backend2, inputs } = args; + const { params, indices } = inputs; const [resultShape, numSlices, sliceSize, strides] = gather_nd_util_exports.prepareAndValidate(params, indices); const out = backend2.makeOutput(resultShape, params.dtype); if (numSlices === 0) { @@ -55572,13 +55572,13 @@ function setup20(backend2) { ]); } function gatherV23(args) { - const {backend: backend2, inputs, attrs} = args; - const {x, indices} = inputs; - const {axis, batchDims} = attrs; + const { backend: backend2, inputs, attrs } = args; + const { x, indices } = inputs; + const { axis, batchDims } = attrs; const parsedAxis = util_exports.parseAxisParam(axis, x.shape)[0]; const shapeInfo = backend_util_exports.segment_util.collectGatherOpShapeInfo(x, indices, parsedAxis, batchDims); const flattenX = reshape5({ - inputs: {x}, + inputs: { x }, attrs: { shape: [ shapeInfo.batchSize, @@ -55591,8 +55591,8 @@ function gatherV23(args) { }); const indicesSize = util_exports.sizeFromShape(indices.shape); const flattenIndex = reshape5({ - inputs: {x: indices}, - attrs: {shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize]}, + inputs: { x: indices }, + attrs: { shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize] }, backend: backend2 }); const flattenOutputShape = [ @@ -55638,7 +55638,7 @@ function setupFunc2(backend2) { ]); } function leakyRelu4(args) { - const {inputs: {x}, attrs: {alpha}, backend: backend2} = args; + const { inputs: { x }, attrs: { alpha }, backend: backend2 } = args; const xId = backend2.dataIdMap.get(x.dataId).id; const out = backend2.makeOutput(x.shape, x.dtype); if (util_exports.sizeFromShape(x.shape) !== 0) { @@ -55665,13 +55665,13 @@ function setup21(backend2) { wasmMax = backend2.wasm.cwrap(Max, null, ["number, number, number"]); } function max5(args) { - const {backend: backend2, inputs, attrs} = args; - const {reductionIndices: axis, keepDims} = attrs; - const {x} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { reductionIndices: axis, keepDims } = attrs; + const { x } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; let inputId = xId; let input2 = x; - const {transposed, axes, originalAxes, inputWasTransposed} = permuteAxesAndTranspose(x, axis, backend2); + const { transposed, axes, originalAxes, inputWasTransposed } = permuteAxesAndTranspose(x, axis, backend2); if (inputWasTransposed) { const transposedId = backend2.dataIdMap.get(transposed.dataId).id; input2 = transposed; @@ -55726,10 +55726,10 @@ function setup22(backend2) { ]); } function maxPool4(args) { - const {inputs, attrs, backend: backend2} = args; + const { inputs, attrs, backend: backend2 } = args; const x = inputs.x; const xId = backend2.dataIdMap.get(x.dataId).id; - const {filterSize, strides, pad: pad3, dimRoundingMode} = attrs; + const { filterSize, strides, pad: pad3, dimRoundingMode } = attrs; const convInfo = backend_util_exports.computePool2DInfo(x.shape, filterSize, strides, 1, pad3, dimRoundingMode); const filterHeight = convInfo.filterHeight; const filterWidth = convInfo.filterWidth; @@ -55762,13 +55762,13 @@ function setup23(backend2) { wasmMean = backend2.wasm.cwrap(Mean, null, ["number, number, number"]); } function mean3(args) { - const {backend: backend2, inputs, attrs} = args; - const {axis, keepDims} = attrs; - const {x} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { axis, keepDims } = attrs; + const { x } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; let inputId = xId; let input2 = x; - const {transposed, axes, originalAxes, inputWasTransposed} = permuteAxesAndTranspose(x, axis, backend2); + const { transposed, axes, originalAxes, inputWasTransposed } = permuteAxesAndTranspose(x, axis, backend2); let reductionAxes = axes; if (inputWasTransposed) { const transposedId = backend2.dataIdMap.get(transposed.dataId).id; @@ -55783,7 +55783,7 @@ function mean3(args) { const reduceSize = util_exports.sizeFromShape(reduceShape); let castedInput = input2; if (input2.dtype !== "float32") { - castedInput = cast5({backend: backend2, inputs: {x: input2}, attrs: {dtype: "float32"}}); + castedInput = cast5({ backend: backend2, inputs: { x: input2 }, attrs: { dtype: "float32" } }); inputId = backend2.dataIdMap.get(castedInput.dataId).id; } const out = backend2.makeOutput(outShape, "float32"); @@ -55814,13 +55814,13 @@ function setup24(backend2) { wasmMin = backend2.wasm.cwrap(Min, null, ["number, number, number"]); } function min5(args) { - const {backend: backend2, inputs, attrs} = args; - const {axis, keepDims} = attrs; - const {x} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { axis, keepDims } = attrs; + const { x } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; let inputId = xId; let input2 = x; - const {transposed, axes, originalAxes, inputWasTransposed} = permuteAxesAndTranspose(x, axis, backend2); + const { transposed, axes, originalAxes, inputWasTransposed } = permuteAxesAndTranspose(x, axis, backend2); if (inputWasTransposed) { const transposedId = backend2.dataIdMap.get(transposed.dataId).id; if (transposedId !== xId) { @@ -55873,7 +55873,7 @@ function setup25(backend2) { ]); } function mirrorPad3(args) { - const {inputs: {x}, backend: backend2, attrs: {paddings, mode}} = args; + const { inputs: { x }, backend: backend2, attrs: { paddings, mode } } = args; const outShape = paddings.map((p2, i) => p2[0] + x.shape[i] + p2[1]); const xId = backend2.dataIdMap.get(x.dataId).id; const out = backend2.makeOutput(outShape, x.dtype); @@ -55902,7 +55902,7 @@ function parseResultStruct(backend2, resOffset) { const pSelectedScores = result[2]; const pValidOutputs = result[3]; backend2.wasm._free(resOffset); - return {pSelectedIndices, selectedSize, pSelectedScores, pValidOutputs}; + return { pSelectedIndices, selectedSize, pSelectedScores, pValidOutputs }; } var wasmFunc4; function setup26(backend2) { @@ -55915,13 +55915,13 @@ function setup26(backend2) { ]); } function kernelFunc(args) { - const {backend: backend2, inputs, attrs} = args; - const {iouThreshold, maxOutputSize, scoreThreshold} = attrs; - const {boxes, scores} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { iouThreshold, maxOutputSize, scoreThreshold } = attrs; + const { boxes, scores } = inputs; const boxesId = backend2.dataIdMap.get(boxes.dataId).id; const scoresId = backend2.dataIdMap.get(scores.dataId).id; const resOffset = wasmFunc4(boxesId, scoresId, maxOutputSize, iouThreshold, scoreThreshold); - const {pSelectedIndices, selectedSize, pSelectedScores, pValidOutputs} = parseResultStruct(backend2, resOffset); + const { pSelectedIndices, selectedSize, pSelectedScores, pValidOutputs } = parseResultStruct(backend2, resOffset); backend2.wasm._free(pSelectedScores); backend2.wasm._free(pValidOutputs); const selectedIndicesTensor = backend2.makeOutput([selectedSize], "int32", pSelectedIndices); @@ -55945,13 +55945,13 @@ function setup27(backend2) { ]); } function nonMaxSuppressionV43(args) { - const {backend: backend2, inputs, attrs} = args; - const {iouThreshold, maxOutputSize, scoreThreshold, padToMaxOutputSize} = attrs; - const {boxes, scores} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { iouThreshold, maxOutputSize, scoreThreshold, padToMaxOutputSize } = attrs; + const { boxes, scores } = inputs; const boxesId = backend2.dataIdMap.get(boxes.dataId).id; const scoresId = backend2.dataIdMap.get(scores.dataId).id; const resOffset = wasmFunc5(boxesId, scoresId, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize); - const {pSelectedIndices, selectedSize, pSelectedScores, pValidOutputs} = parseResultStruct(backend2, resOffset); + const { pSelectedIndices, selectedSize, pSelectedScores, pValidOutputs } = parseResultStruct(backend2, resOffset); backend2.wasm._free(pSelectedScores); const selectedIndicesTensor = backend2.makeOutput([selectedSize], "int32", pSelectedIndices); const validOutputsTensor = backend2.makeOutput([], "int32", pValidOutputs); @@ -55975,13 +55975,13 @@ function setup28(backend2) { ]); } function kernelFunc2(args) { - const {backend: backend2, inputs, attrs} = args; - const {iouThreshold, maxOutputSize, scoreThreshold, softNmsSigma} = attrs; - const {boxes, scores} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { iouThreshold, maxOutputSize, scoreThreshold, softNmsSigma } = attrs; + const { boxes, scores } = inputs; const boxesId = backend2.dataIdMap.get(boxes.dataId).id; const scoresId = backend2.dataIdMap.get(scores.dataId).id; const resOffset = wasmFunc6(boxesId, scoresId, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma); - const {pSelectedIndices, selectedSize, pSelectedScores, pValidOutputs} = parseResultStruct(backend2, resOffset); + const { pSelectedIndices, selectedSize, pSelectedScores, pValidOutputs } = parseResultStruct(backend2, resOffset); backend2.wasm._free(pValidOutputs); const selectedIndicesTensor = backend2.makeOutput([selectedSize], "int32", pSelectedIndices); const selectedScoresTensor = backend2.makeOutput([selectedSize], "float32", pSelectedScores); @@ -56006,9 +56006,9 @@ function setup29(backend2) { ]); } function oneHot4(args) { - const {inputs, backend: backend2, attrs} = args; - const {indices} = inputs; - const {depth, onValue, offValue} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { indices } = inputs; + const { depth, onValue, offValue } = attrs; const out = backend2.makeOutput([...indices.shape, depth], "int32"); const outId = backend2.dataIdMap.get(out.dataId).id; const indicesData = backend2.dataIdMap.get(indices.dataId); @@ -56023,7 +56023,7 @@ var oneHotConfig3 = { kernelFunc: oneHot4 }; function onesLike4(args) { - const {inputs: {x}, backend: backend2} = args; + const { inputs: { x }, backend: backend2 } = args; const out = backend2.makeOutput(x.shape, x.dtype); const outVals = backend2.typedArrayFromHeap(out); outVals.fill(1); @@ -56035,10 +56035,10 @@ var onesLikeConfig3 = { kernelFunc: onesLike4 }; function pack3(args) { - const {inputs, backend: backend2, attrs} = args; - const {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { axis } = attrs; if (inputs.length === 1) { - return expandDims5({inputs: {input: inputs[0]}, backend: backend2, attrs: {dim: axis}}); + return expandDims5({ inputs: { input: inputs[0] }, backend: backend2, attrs: { dim: axis } }); } const shape = inputs[0].shape; const dtype = inputs[0].dtype; @@ -56048,11 +56048,11 @@ function pack3(args) { }); const intermediateTensorInfos = []; const expandedTensors = inputs.map((t) => { - const expandedT = expandDims5({inputs: {input: t}, backend: backend2, attrs: {dim: axis}}); + const expandedT = expandDims5({ inputs: { input: t }, backend: backend2, attrs: { dim: axis } }); intermediateTensorInfos.push(expandedT); return expandedT; }); - const result = concat4({inputs: expandedTensors, backend: backend2, attrs: {axis}}); + const result = concat4({ inputs: expandedTensors, backend: backend2, attrs: { axis } }); intermediateTensorInfos.forEach((t) => backend2.disposeData(t.dataId)); return result; } @@ -56075,7 +56075,7 @@ function setup30(backend2) { ]); } function pad2(args) { - const {inputs: {x}, backend: backend2, attrs: {paddings, constantValue}} = args; + const { inputs: { x }, backend: backend2, attrs: { paddings, constantValue } } = args; const outShape = paddings.map((p2, i) => p2[0] + x.shape[i] + p2[1]); const xId = backend2.dataIdMap.get(x.dataId).id; const out = backend2.makeOutput(outShape, x.dtype); @@ -56105,8 +56105,8 @@ function setup31(backend2) { ]); } function prelu5(args) { - const {inputs, backend: backend2} = args; - const {x, alpha} = inputs; + const { inputs, backend: backend2 } = args; + const { x, alpha } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; const weightsId = backend2.dataIdMap.get(alpha.dataId).id; const out = backend2.makeOutput(x.shape, "float32"); @@ -56130,13 +56130,13 @@ function setup32(backend2) { ]); } function prod4(args) { - const {backend: backend2, inputs, attrs} = args; - const {axis, keepDims} = attrs; - const {x} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { axis, keepDims } = attrs; + const { x } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; let inputId = xId; let input2 = x; - const {transposed, axes, originalAxes, inputWasTransposed} = permuteAxesAndTranspose(x, axis, backend2); + const { transposed, axes, originalAxes, inputWasTransposed } = permuteAxesAndTranspose(x, axis, backend2); let reductionAxes = axes; if (inputWasTransposed) { const transposedId = backend2.dataIdMap.get(transposed.dataId).id; @@ -56170,8 +56170,8 @@ var prodConfig3 = { kernelFunc: prod4 }; var range5 = (args) => { - const {backend: backend2, attrs} = args; - const {start, stop, step: step5, dtype} = attrs; + const { backend: backend2, attrs } = args; + const { start, stop, step: step5, dtype } = attrs; const values = rangeImpl2(start, stop, step5, dtype); const out = backend2.makeOutput([values.length], dtype); const outVals = backend2.typedArrayFromHeap(out); @@ -56203,16 +56203,16 @@ function setup33(backend2) { ]); } function resizeBilinear4(args) { - const {backend: backend2, inputs, attrs} = args; - const {images} = inputs; - const {alignCorners, halfPixelCenters, size} = attrs; + const { backend: backend2, inputs, attrs } = args; + const { images } = inputs; + const { alignCorners, halfPixelCenters, size } = attrs; const [newHeight, newWidth] = size; const [batch, oldHeight, oldWidth, numChannels] = images.shape; const outShape = [batch, newHeight, newWidth, numChannels]; let xData = backend2.dataIdMap.get(images.dataId); let castedData; if (xData.dtype !== "float32") { - castedData = cast5({backend: backend2, inputs: {x: images}, attrs: {dtype: "float32"}}); + castedData = cast5({ backend: backend2, inputs: { x: images }, attrs: { dtype: "float32" } }); xData = backend2.dataIdMap.get(castedData.dataId); } const xId = xData.id; @@ -56245,12 +56245,12 @@ function setup34(backend2) { ]); } function reverse4(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; - const {dims} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; + const { dims } = attrs; const axes = util_exports.parseAxisParam(dims, x.shape); if (x.shape.length === 0) { - return identity4({inputs: {x}, backend: backend2}); + return identity4({ inputs: { x }, backend: backend2 }); } const out = backend2.makeOutput(x.shape, x.dtype); const xId = backend2.dataIdMap.get(x.dataId).id; @@ -56258,7 +56258,7 @@ function reverse4(args) { const axesBytes = new Uint8Array(new Int32Array(axes).buffer); const outShapeBytes = new Uint8Array(new Int32Array(x.shape).buffer); wasmReverse(xId, axesBytes, axes.length, outShapeBytes, x.shape.length, outId); - const reshaped = reshape5({inputs: {x: out}, attrs: {shape: x.shape}, backend: backend2}); + const reshaped = reshape5({ inputs: { x: out }, attrs: { shape: x.shape }, backend: backend2 }); backend2.disposeData(out.dataId); return reshaped; } @@ -56285,9 +56285,9 @@ function setup35(backend2) { ]); } function rotateWithOffset2(args) { - const {inputs, backend: backend2, attrs} = args; - const {image: image3} = inputs; - const {radians, fillValue, center} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { image: image3 } = inputs; + const { radians, fillValue, center } = attrs; const out = backend2.makeOutput(image3.shape, image3.dtype); const imageId = backend2.dataIdMap.get(image3.dataId).id; const outId = backend2.dataIdMap.get(out.dataId).id; @@ -56323,14 +56323,14 @@ function setup36(backend2) { ]); } function scatterNd3(args) { - const {backend: backend2, inputs, attrs} = args; - const {indices, updates} = inputs; - const {shape} = attrs; + const { backend: backend2, inputs, attrs } = args; + const { indices, updates } = inputs; + const { shape } = attrs; const out = backend2.makeOutput(shape, updates.dtype); if (util_exports.sizeFromShape(shape) === 0) { return out; } - const {sliceRank, numUpdates, sliceSize, strides, outputSize} = scatter_nd_util_exports.calculateShapes(updates, indices, shape); + const { sliceRank, numUpdates, sliceSize, strides, outputSize } = scatter_nd_util_exports.calculateShapes(updates, indices, shape); const indicesData = backend2.dataIdMap.get(indices.dataId); const indicesId = indicesData.id; const updatesData = backend2.dataIdMap.get(updates.dataId); @@ -56357,8 +56357,8 @@ function setup37(backend2) { ]); } function select3(args) { - const {inputs, backend: backend2} = args; - const {condition, t, e} = inputs; + const { inputs, backend: backend2 } = args; + const { condition, t, e } = inputs; const conditionId = backend2.dataIdMap.get(condition.dataId).id; const tId = backend2.dataIdMap.get(t.dataId).id; const eId = backend2.dataIdMap.get(e.dataId).id; @@ -56381,7 +56381,7 @@ function setup38(backend2) { wasmFunc7 = backend2.wasm.cwrap(Sigmoid, null, ["number", "number"]); } function sigmoid4(args) { - const {backend: backend2, inputs: {x}} = args; + const { backend: backend2, inputs: { x } } = args; const xId = backend2.dataIdMap.get(x.dataId).id; const out = backend2.makeOutput(x.shape, x.dtype); const outId = backend2.dataIdMap.get(out.dataId).id; @@ -56399,7 +56399,7 @@ var sigmoidConfig3 = { }; var sinConfig3 = createUnaryKernelConfig(Sin); function slice4(args) { - const {inputs: {x}, attrs: {begin, size}, backend: backend2} = args; + const { inputs: { x }, attrs: { begin, size }, backend: backend2 } = args; const [begin_, size_] = slice_util_exports.parseSliceParams(x, begin, size); const isContinous = slice_util_exports.isSliceContinous(x.shape, begin_, size_); const xVals = backend2.readSync(x.dataId); @@ -56495,7 +56495,7 @@ function setup39(backend2) { ]); } function softmax5(args) { - const {backend: backend2, inputs: {logits}, attrs: {dim}} = args; + const { backend: backend2, inputs: { logits }, attrs: { dim } } = args; const xId = backend2.dataIdMap.get(logits.dataId).id; const out = backend2.makeOutput(logits.shape, logits.dtype); const outId = backend2.dataIdMap.get(out.dataId).id; @@ -56514,9 +56514,9 @@ var softmaxConfig3 = { kernelFunc: softmax5 }; function splitV3(args) { - const {inputs, attrs, backend: backend2} = args; - const {x} = inputs; - const {numOrSizeSplits, axis} = attrs; + const { inputs, attrs, backend: backend2 } = args; + const { x } = inputs; + const { numOrSizeSplits, axis } = attrs; const $axis = util_exports.parseAxisParam(axis, x.shape)[0]; const splitSizes = backend_util_exports.prepareSplitSize(x, numOrSizeSplits, $axis); const begin = new Array(x.shape.length).fill(0); @@ -56524,7 +56524,7 @@ function splitV3(args) { return splitSizes.map((s) => { const xSliceSize = [...size]; xSliceSize[$axis] = s; - const xSlice = slice4({inputs: {x}, attrs: {begin, size: xSliceSize}, backend: backend2}); + const xSlice = slice4({ inputs: { x }, attrs: { begin, size: xSliceSize }, backend: backend2 }); begin[$axis] += s; return xSlice; }); @@ -56547,9 +56547,9 @@ function setup40(backend2) { ]); } function step4(args) { - const {backend: backend2, inputs, attrs} = args; - const {alpha} = attrs; - const {x} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { alpha } = attrs; + const { x } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; const out = backend2.makeOutput(x.shape, x.dtype); const outId = backend2.dataIdMap.get(out.dataId).id; @@ -56578,13 +56578,13 @@ function setup41(backend2) { ]); } function stridedSlice4(args) { - const {backend: backend2, inputs, attrs} = args; - const {x} = inputs; - let {begin, end, strides} = attrs; + const { backend: backend2, inputs, attrs } = args; + const { x } = inputs; + let { begin, end, strides } = attrs; if (strides == null) { strides = new Array(begin.length); } - const {beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask} = attrs; + const { beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask } = attrs; const ellipsisAxes = backend_util_exports.slice_util.maskToAxes(ellipsisMask); if (ellipsisAxes.length > 1) { throw new Error("Multiple ellipses in slice is not allowed."); @@ -56603,8 +56603,8 @@ function stridedSlice4(args) { end[axis] = 1; newShape.splice(axis, 0, 1); }); - const xReshaped = reshape5({inputs: {x}, attrs: {shape: newShape}, backend: backend2}); - const {begin: normalizedBegin, end: normalizedEnd, strides: normalizedStrides} = backend_util_exports.slice_util.getNormalizedAxes(xReshaped.shape, ellipsisAxes, numInterpolatedAxes, begin, end, strides, beginMask, endMask, ellipsisMask); + const xReshaped = reshape5({ inputs: { x }, attrs: { shape: newShape }, backend: backend2 }); + const { begin: normalizedBegin, end: normalizedEnd, strides: normalizedStrides } = backend_util_exports.slice_util.getNormalizedAxes(xReshaped.shape, ellipsisAxes, numInterpolatedAxes, begin, end, strides, beginMask, endMask, ellipsisMask); begin = normalizedBegin; end = normalizedEnd; strides = normalizedStrides; @@ -56617,9 +56617,9 @@ function stridedSlice4(args) { const outShape = size.filter((_, axis) => shrinkAxes.indexOf(axis) === -1); const nonStrided = strides.every((v) => v === 1); if (nonStrided) { - const xSliced = slice4({inputs: {x: xReshaped}, attrs: {begin, size}, backend: backend2}); + const xSliced = slice4({ inputs: { x: xReshaped }, attrs: { begin, size }, backend: backend2 }); backend2.disposeData(xReshaped.dataId); - const reshaped2 = reshape5({inputs: {x: xSliced}, attrs: {shape: outShape}, backend: backend2}); + const reshaped2 = reshape5({ inputs: { x: xSliced }, attrs: { shape: outShape }, backend: backend2 }); backend2.disposeData(xSliced.dataId); return reshaped2; } @@ -56636,7 +56636,7 @@ function stridedSlice4(args) { wasmStridedSlice(xId, xStridesBytes, xReshaped.shape.length, beginBytes, endBytes, stridesBytes, outputShapeBytes, outStridesBytes, outShape.length, outId); } backend2.disposeData(xReshaped.dataId); - const reshaped = reshape5({inputs: {x: out}, attrs: {shape: outShape}, backend: backend2}); + const reshaped = reshape5({ inputs: { x: out }, attrs: { shape: outShape }, backend: backend2 }); backend2.disposeData(out.dataId); return reshaped; } @@ -56653,13 +56653,13 @@ function setup42(backend2) { wasmSum = backend2.wasm.cwrap(Sum, null, ["number, number, number"]); } function sum5(args) { - const {backend: backend2, inputs, attrs} = args; - const {axis, keepDims} = attrs; - const {x} = inputs; + const { backend: backend2, inputs, attrs } = args; + const { axis, keepDims } = attrs; + const { x } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; let inputId = xId; let input2 = x; - const {transposed, axes, originalAxes, inputWasTransposed} = permuteAxesAndTranspose(x, axis, backend2); + const { transposed, axes, originalAxes, inputWasTransposed } = permuteAxesAndTranspose(x, axis, backend2); let reductionAxes = axes; if (inputWasTransposed) { const transposedId = backend2.dataIdMap.get(transposed.dataId).id; @@ -56706,10 +56706,10 @@ function setup43(backend2) { ]); } function tile5(args) { - const {inputs, backend: backend2, attrs} = args; - const {x} = inputs; + const { inputs, backend: backend2, attrs } = args; + const { x } = inputs; const xId = backend2.dataIdMap.get(x.dataId).id; - const {reps} = attrs; + const { reps } = attrs; const newShape = new Array(x.shape.length); for (let i = 0; i < newShape.length; i++) { newShape[i] = x.shape[i] * reps[i]; @@ -56740,9 +56740,9 @@ function setup44(backend2) { "number" ]); } -var topk2 = ({inputs, backend: backend2, attrs}) => { - const {x} = inputs; - const {k, sorted} = attrs; +var topk2 = ({ inputs, backend: backend2, attrs }) => { + const { x } = inputs; + const { k, sorted } = attrs; const xId = backend2.dataIdMap.get(x.dataId).id; const xShapeBytes = new Uint8Array(new Int32Array(x.shape).buffer); const outputShape = x.shape.slice(); @@ -56781,9 +56781,9 @@ function setup45(backend2) { ]); } function transform4(args) { - const {backend: backend2, inputs, attrs} = args; - const {image: image3, transforms} = inputs; - const {interpolation, fillMode, fillValue, outputShape} = attrs; + const { backend: backend2, inputs, attrs } = args; + const { image: image3, transforms } = inputs; + const { interpolation, fillMode, fillValue, outputShape } = attrs; const [batch, imageHeight, imageWidth, numChannels] = image3.shape; const [outHeight, outWidth] = outputShape != null ? outputShape : [imageHeight, imageWidth]; const outShape = [ @@ -56828,9 +56828,9 @@ var transformConfig3 = { kernelFunc: transform4 }; function unpack3(args) { - const {inputs, backend: backend2, attrs} = args; - const {value} = inputs; - let {axis} = attrs; + const { inputs, backend: backend2, attrs } = args; + const { value } = inputs; + let { axis } = attrs; if (axis < 0) { axis += value.shape.length; } @@ -56849,9 +56849,9 @@ function unpack3(args) { size[axis] = 1; for (let i = 0; i < outs.length; i++) { begin[axis] = i; - outs[i] = slice4({inputs: {x: value}, attrs: {begin, size}, backend: backend2}); + outs[i] = slice4({ inputs: { x: value }, attrs: { begin, size }, backend: backend2 }); } - return outs.map(({dataId, dtype}) => ({dataId, dtype, shape: outShape})); + return outs.map(({ dataId, dtype }) => ({ dataId, dtype, shape: outShape })); } var unpackConfig3 = { kernelName: Unpack, @@ -56859,7 +56859,7 @@ var unpackConfig3 = { kernelFunc: unpack3 }; function zerosLike4(args) { - const {inputs: {x}, backend: backend2} = args; + const { inputs: { x }, backend: backend2 } = args; const out = backend2.makeOutput(x.shape, x.dtype); const outVals = backend2.typedArrayFromHeap(out); outVals.fill(0); @@ -57060,7 +57060,7 @@ var BackendWasm = class extends KernelBackend { this.dataIdMap = new DataStorage(this, engine()); } write(values, shape, dtype) { - const dataId = {id: this.dataIdNextNumber++}; + const dataId = { id: this.dataIdNextNumber++ }; this.move(dataId, values, shape, dtype, 1); return dataId; } @@ -57071,19 +57071,19 @@ var BackendWasm = class extends KernelBackend { const start = util_exports.now(); f(); const kernelMs = util_exports.now() - start; - return {kernelMs}; + return { kernelMs }; } move(dataId, values, shape, dtype, refCount) { const id = this.dataIdNextNumber++; if (dtype === "string") { const stringBytes = values; - this.dataIdMap.set(dataId, {id, stringBytes, shape, dtype, memoryOffset: null, refCount}); + this.dataIdMap.set(dataId, { id, stringBytes, shape, dtype, memoryOffset: null, refCount }); return; } const size = util_exports.sizeFromShape(shape); const numBytes = size * util_exports.bytesPerElement(dtype); const memoryOffset = this.wasm._malloc(numBytes); - this.dataIdMap.set(dataId, {id, memoryOffset, shape, dtype, refCount}); + this.dataIdMap.set(dataId, { id, memoryOffset, shape, dtype, refCount }); this.wasm.tfjs.registerTensor(id, size, memoryOffset); if (values != null) { this.wasm.HEAPU8.set(new Uint8Array(values.buffer, values.byteOffset, numBytes), memoryOffset); @@ -57093,7 +57093,7 @@ var BackendWasm = class extends KernelBackend { return this.readSync(dataId); } readSync(dataId) { - const {memoryOffset, dtype, shape, stringBytes} = this.dataIdMap.get(dataId); + const { memoryOffset, dtype, shape, stringBytes } = this.dataIdMap.get(dataId); if (dtype === "string") { return stringBytes; } @@ -57140,7 +57140,7 @@ var BackendWasm = class extends KernelBackend { this.wasm = null; } memory() { - return {unreliable: false}; + return { unreliable: false }; } makeOutput(shape, dtype, memoryOffset) { let dataId; @@ -57148,16 +57148,16 @@ var BackendWasm = class extends KernelBackend { dataId = this.write(null, shape, dtype); } else { const id = this.dataIdNextNumber++; - dataId = {id}; - this.dataIdMap.set(dataId, {id, memoryOffset, shape, dtype, refCount: 1}); + dataId = { id }; + this.dataIdMap.set(dataId, { id, memoryOffset, shape, dtype, refCount: 1 }); const size = util_exports.sizeFromShape(shape); this.wasm.tfjs.registerTensor(id, size, memoryOffset); } - return {dataId, shape, dtype}; + return { dataId, shape, dtype }; } - typedArrayFromHeap({shape, dtype, dataId}) { + typedArrayFromHeap({ shape, dtype, dataId }) { const buffer2 = this.wasm.HEAPU8.buffer; - const {memoryOffset} = this.dataIdMap.get(dataId); + const { memoryOffset } = this.dataIdMap.get(dataId); const size = util_exports.sizeFromShape(shape); switch (dtype) { case "float32": @@ -57173,7 +57173,7 @@ var BackendWasm = class extends KernelBackend { }; function createInstantiateWasmFunc(path) { return (imports, callback) => { - util_exports.fetch(path, {credentials: "same-origin"}).then((response) => { + util_exports.fetch(path, { credentials: "same-origin" }).then((response) => { if (!response["ok"]) { imports.env.a(`failed to load wasm binary file at '${path}'`); } @@ -57213,7 +57213,7 @@ async function init() { factoryConfig.locateFile = (path, prefix) => { if (path.endsWith(".worker.js")) { const response = wasmWorkerContents; - const blob = new Blob([response], {type: "application/javascript"}); + const blob = new Blob([response], { type: "application/javascript" }); return URL.createObjectURL(blob); } if (path.endsWith(".wasm")) { @@ -57234,11 +57234,11 @@ async function init() { } initAborted = true; const rejectMsg = "Make sure the server can serve the `.wasm` file relative to the bundled js file. For more details see https://github.com/tensorflow/tfjs/blob/master/tfjs-backend-wasm/README.md#using-bundlers"; - reject({message: rejectMsg}); + reject({ message: rejectMsg }); }; let wasm; if (threadsSupported && simdSupported && wasmPath == null) { - factoryConfig.mainScriptUrlOrBlob = new Blob([`var WasmBackendModuleThreadedSimd = ` + import_tfjs_backend_wasm_threaded_simd.default.toString()], {type: "text/javascript"}); + factoryConfig.mainScriptUrlOrBlob = new Blob([`var WasmBackendModuleThreadedSimd = ` + import_tfjs_backend_wasm_threaded_simd.default.toString()], { type: "text/javascript" }); wasm = (0, import_tfjs_backend_wasm_threaded_simd.default)(factoryConfig); } else { wasm = (0, import_tfjs_backend_wasm.default)(factoryConfig); @@ -57257,7 +57257,7 @@ async function init() { disposeData: module2.cwrap("dispose_data", voidReturnType, ["number"]), dispose: module2.cwrap("dispose", voidReturnType, []) }; - resolve({wasm: module2}); + resolve({ wasm: module2 }); }); }); } @@ -57309,7 +57309,7 @@ function setWasmPaths(prefixOrFileMap, usePlatformFetch = false) { var version9 = "3.6.0"; var WASM_PRIORITY = 2; registerBackend("wasm", async () => { - const {wasm} = await init(); + const { wasm } = await init(); return new BackendWasm(wasm); }, WASM_PRIORITY); @@ -57332,7 +57332,7 @@ __export(draw_exports, { // src/draw/drawContour.ts function drawContour(ctx, points, isClosed = false) { ctx.beginPath(); - points.slice(1).forEach(({x, y}, prevIdx) => { + points.slice(1).forEach(({ x, y }, prevIdx) => { const from = points[prevIdx]; ctx.moveTo(from.x, from.y); ctx.lineTo(x, y); @@ -57372,7 +57372,7 @@ __export(utils_exports, { var Dimensions = class { constructor(width, height) { if (!isValidNumber(width) || !isValidNumber(height)) { - throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({width, height})}`); + throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({ width, height })}`); } this._width = width; this._height = height; @@ -57417,7 +57417,7 @@ function round5(num, prec = 2) { function isDimensions(obj) { return obj && obj.width && obj.height; } -function computeReshapedDimensions({width, height}, inputSize) { +function computeReshapedDimensions({ width, height }, inputSize) { const scale3 = inputSize / Math.max(height, width); return new Dimensions(Math.round(width * scale3), Math.round(height * scale3)); } @@ -57574,7 +57574,7 @@ var Box = class { y -= diff / 2; height += diff; } - return new Box({x, y, width, height}); + return new Box({ x, y, width, height }); } rescale(s) { const scaleX = isDimensions(s) ? s.width : s; @@ -57601,7 +57601,7 @@ var Box = class { }); } clipAtImageBorders(imgWidth, imgHeight) { - const {x, y, right, bottom} = this; + const { x, y, right, bottom } = this; const clippedX = Math.max(x, 0); const clippedY = Math.max(y, 0); const newWidth = right - clippedX; @@ -57616,7 +57616,7 @@ var Box = class { }).floor(); } shift(sx, sy) { - const {width, height} = this; + const { width, height } = this; const x = this.x + sx; const y = this.y + sy; return new Box({ @@ -57722,7 +57722,7 @@ var ObjectDetection = class { return new Box(this._box).rescale(this.imageDims.reverse()); } forSize(width, height) { - return new ObjectDetection(this.score, this.classScore, this.className, this.relativeBox, {width, height}); + return new ObjectDetection(this.score, this.classScore, this.className, this.relativeBox, { width, height }); } }; @@ -57732,7 +57732,7 @@ var FaceDetection = class extends ObjectDetection { super(score, score, "", relativeBox, imageDims); } forSize(width, height) { - const {score, relativeBox, imageDims} = super.forSize(width, height); + const { score, relativeBox, imageDims } = super.forSize(width, height); return new FaceDetection(score, relativeBox, imageDims); } }; @@ -57758,7 +57758,7 @@ function minBbox(pts) { // src/ops/nonMaxSuppression.ts function nonMaxSuppression2(boxes, scores, iouThreshold, isIOU = true) { - let indicesSortedByScore = scores.map((score, boxIndex) => ({score, boxIndex})).sort((c1, c2) => c1.score - c2.score).map((c) => c.boxIndex); + let indicesSortedByScore = scores.map((score, boxIndex) => ({ score, boxIndex })).sort((c1, c2) => c1.score - c2.score).map((c) => c.boxIndex); const pick = []; while (indicesSortedByScore.length > 0) { const curr = indicesSortedByScore.pop(); @@ -57853,7 +57853,7 @@ var relY = 0.43; var relScale = 0.45; var FaceLandmarks = class { constructor(relativeFaceLandmarkPositions, imgDims, shift = new Point(0, 0)) { - const {width, height} = imgDims; + const { width, height } = imgDims; this._imgDims = new Dimensions(width, height); this._shift = shift; this._positions = relativeFaceLandmarkPositions.map((pt) => pt.mul(new Point(width, height)).add(shift)); @@ -57874,7 +57874,7 @@ var FaceLandmarks = class { return this._positions.map((pt) => pt.sub(this._shift).div(new Point(this.imageWidth, this.imageHeight))); } forSize(width, height) { - return new this.constructor(this.relativePositions, {width, height}); + return new this.constructor(this.relativePositions, { width, height }); } shiftBy(x, y) { return new this.constructor(this.relativePositions, this._imgDims, new Point(x, y)); @@ -57887,7 +57887,7 @@ var FaceLandmarks = class { const box = detection instanceof FaceDetection ? detection.box.floor() : new Box(detection); return this.shiftBy(box.x, box.y).align(null, options); } - const {useDlibAlignment, minBoxPadding} = {useDlibAlignment: false, minBoxPadding: 0.2, ...options}; + const { useDlibAlignment, minBoxPadding } = { useDlibAlignment: false, minBoxPadding: 0.2, ...options }; if (useDlibAlignment) { return this.alignDlib(); } @@ -58047,8 +58047,8 @@ function isWithFaceDetection(obj) { return obj.detection instanceof FaceDetection; } function extendWithFaceDetection(sourceObj, detection) { - const extension = {detection}; - return {...sourceObj, ...extension}; + const extension = { detection }; + return { ...sourceObj, ...extension }; } // src/env/createBrowserEnv.ts @@ -58067,6 +58067,7 @@ function createBrowserEnv() { Video: HTMLVideoElement, createCanvasElement: () => document.createElement("canvas"), createImageElement: () => document.createElement("img"), + createVideoElement: () => document.createElement("video"), fetch: fetch3, readFile }; @@ -58096,6 +58097,7 @@ function createFileSystem(fs) { function createNodejsEnv() { const Canvas = global["Canvas"] || global.HTMLCanvasElement; const Image = global.Image || global.HTMLImageElement; + const Video = global["Video"] || global.HTMLVideoElement; const createCanvasElement = () => { if (Canvas) return new Canvas(); @@ -58106,6 +58108,11 @@ function createNodejsEnv() { return new Image(); throw new Error("createImageElement - missing Image implementation for nodejs environment"); }; + const createVideoElement = () => { + if (Video) + return new Video(); + throw new Error("createVideoElement - missing Video implementation for nodejs environment"); + }; const fetch3 = global.fetch; const fileSystem = createFileSystem(); return { @@ -58121,6 +58128,7 @@ function createNodejsEnv() { }, createCanvasElement, createImageElement, + createVideoElement, fetch: fetch3, ...fileSystem }; @@ -58161,7 +58169,7 @@ function monkeyPatch(env3) { if (!environment) { throw new Error("monkeyPatch - environment is not defined, check isNodejs() and isBrowser()"); } - const {Canvas = environment.Canvas, Image = environment.Image} = env3; + const { Canvas = environment.Canvas, Image = environment.Image } = env3; environment.Canvas = Canvas; environment.Image = Image; environment.createCanvasElement = env3.createCanvasElement || (() => new Canvas()); @@ -58194,7 +58202,7 @@ function resolveInput(arg) { // src/dom/getContext2dOrThrow.ts function getContext2dOrThrow(canvasArg) { - const {Canvas, CanvasRenderingContext2D: CanvasRenderingContext2D2} = env2.getEnv(); + const { Canvas, CanvasRenderingContext2D: CanvasRenderingContext2D2 } = env2.getEnv(); if (canvasArg instanceof CanvasRenderingContext2D2) { return canvasArg; } @@ -58242,15 +58250,15 @@ var DrawTextField = class { this.options = new DrawTextFieldOptions(options); } measureWidth(ctx) { - const {padding} = this.options; + const { padding } = this.options; return this.text.map((l) => ctx.measureText(l).width).reduce((w0, w1) => w0 < w1 ? w1 : w0, 0) + 2 * padding; } measureHeight() { - const {fontSize, padding} = this.options; + const { fontSize, padding } = this.options; return this.text.length * fontSize + 2 * padding; } getUpperLeft(ctx, canvasDims) { - const {anchorPosition} = this.options; + const { anchorPosition } = this.options; const isShiftLeft = anchorPosition === AnchorPosition.BOTTOM_RIGHT || anchorPosition === AnchorPosition.TOP_RIGHT; const isShiftTop = anchorPosition === AnchorPosition.BOTTOM_LEFT || anchorPosition === AnchorPosition.BOTTOM_RIGHT; const textFieldWidth = this.measureWidth(ctx); @@ -58258,12 +58266,12 @@ var DrawTextField = class { const x = isShiftLeft ? this.anchor.x - textFieldWidth : this.anchor.x; const y = isShiftTop ? this.anchor.y - textFieldHeight : this.anchor.y; if (canvasDims) { - const {width, height} = canvasDims; + const { width, height } = canvasDims; const newX = Math.max(Math.min(x, width - textFieldWidth), 0); const newY = Math.max(Math.min(y, height - textFieldHeight), 0); - return {x: newX, y: newY}; + return { x: newX, y: newY }; } - return {x, y}; + return { x, y }; } draw(canvasArg) { const canvas = resolveInput(canvasArg); @@ -58306,7 +58314,7 @@ var DrawBoxOptions = class { anchorPosition: AnchorPosition.BOTTOM_LEFT, backgroundColor: this.boxColor }; - this.drawLabelOptions = new DrawTextFieldOptions({...defaultDrawLabelOptions, ...drawLabelOptions}); + this.drawLabelOptions = new DrawTextFieldOptions({ ...defaultDrawLabelOptions, ...drawLabelOptions }); } }; var DrawBox = class { @@ -58316,7 +58324,7 @@ var DrawBox = class { } draw(canvasArg) { const ctx = getContext2dOrThrow(canvasArg); - const {boxColor, lineWidth} = this.options; + const { boxColor, lineWidth } = this.options; const { x, y, @@ -58326,9 +58334,9 @@ var DrawBox = class { ctx.strokeStyle = boxColor; ctx.lineWidth = lineWidth; ctx.strokeRect(x, y, width, height); - const {label} = this.options; + const { label } = this.options; if (label) { - new DrawTextField([label], {x: x - lineWidth / 2, y}, this.options.drawLabelOptions).draw(canvasArg); + new DrawTextField([label], { x: x - lineWidth / 2, y }, this.options.drawLabelOptions).draw(canvasArg); } } }; @@ -58340,13 +58348,13 @@ function drawDetections(canvasArg, detections) { const score = det instanceof FaceDetection ? det.score : isWithFaceDetection(det) ? det.detection.score : void 0; const box = det instanceof FaceDetection ? det.box : isWithFaceDetection(det) ? det.detection.box : new Box(det); const label = score ? `${round5(score)}` : void 0; - new DrawBox(box, {label}).draw(canvasArg); + new DrawBox(box, { label }).draw(canvasArg); }); } // src/dom/isMediaLoaded.ts function isMediaLoaded(media) { - const {Image, Video} = env2.getEnv(); + const { Image, Video } = env2.getEnv(); return media instanceof Image && media.complete || media instanceof Video && media.readyState >= 3; } @@ -58395,7 +58403,7 @@ function bufferToImage(buf) { // src/dom/getMediaDimensions.ts function getMediaDimensions(input2) { - const {Image, Video} = env2.getEnv(); + const { Image, Video } = env2.getEnv(); if (input2 instanceof Image) { return new Dimensions(input2.naturalWidth, input2.naturalHeight); } @@ -58406,20 +58414,20 @@ function getMediaDimensions(input2) { } // src/dom/createCanvas.ts -function createCanvas2({width, height}) { - const {createCanvasElement} = env2.getEnv(); +function createCanvas2({ width, height }) { + const { createCanvasElement } = env2.getEnv(); const canvas = createCanvasElement(); canvas.width = width; canvas.height = height; return canvas; } function createCanvasFromMedia(media, dims) { - const {ImageData: ImageData2} = env2.getEnv(); + const { ImageData: ImageData2 } = env2.getEnv(); if (!(media instanceof ImageData2) && !isMediaLoaded(media)) { throw new Error("createCanvasFromMedia - media has not finished loading yet"); } - const {width, height} = dims || getMediaDimensions(media); - const canvas = createCanvas2({width, height}); + const { width, height } = dims || getMediaDimensions(media); + const canvas = createCanvas2({ width, height }); if (media instanceof ImageData2) { getContext2dOrThrow(canvas).putImageData(media, 0, 0); } else { @@ -58440,23 +58448,23 @@ async function imageTensorToCanvas(imgTensor, canvas) { // src/dom/isMediaElement.ts function isMediaElement(input2) { - const {Image, Canvas, Video} = env2.getEnv(); + const { Image, Canvas, Video } = env2.getEnv(); return input2 instanceof Image || input2 instanceof Canvas || input2 instanceof Video; } // src/dom/imageToSquare.ts function imageToSquare(input2, inputSize, centerImage = false) { - const {Image, Canvas} = env2.getEnv(); + const { Image, Canvas } = env2.getEnv(); if (!(input2 instanceof Image || input2 instanceof Canvas)) { throw new Error("imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement"); } if (inputSize <= 0) - return createCanvas2({width: 1, height: 1}); + return createCanvas2({ width: 1, height: 1 }); const dims = getMediaDimensions(input2); const scale3 = inputSize / Math.max(dims.height, dims.width); const width = scale3 * dims.width; const height = scale3 * dims.height; - const targetCanvas = createCanvas2({width: inputSize, height: inputSize}); + const targetCanvas = createCanvas2({ width: inputSize, height: inputSize }); const inputCanvas = input2 instanceof Canvas ? input2 : createCanvasFromMedia(input2); const offset = Math.abs(width - height) / 2; const dx = centerImage && width < height ? offset : 0; @@ -58537,7 +58545,7 @@ var NetInput = class { } const width = this.getInputWidth(batchIdx); const height = this.getInputHeight(batchIdx); - return computeReshapedDimensions({width, height}, this.inputSize); + return computeReshapedDimensions({ width, height }, this.inputSize); } toBatchTensor(inputSize, isCenterInputs = true) { this._inputSize = inputSize; @@ -58590,7 +58598,7 @@ async function toNetInput(inputs) { // src/dom/extractFaces.ts async function extractFaces(input2, detections) { - const {Canvas} = env2.getEnv(); + const { Canvas } = env2.getEnv(); let canvas = input2; if (!(input2 instanceof Canvas)) { const netInput = await toNetInput(input2); @@ -58601,8 +58609,8 @@ async function extractFaces(input2, detections) { } const ctx = getContext2dOrThrow(canvas); const boxes = detections.map((det) => det instanceof FaceDetection ? det.forSize(canvas.width, canvas.height).box.floor() : det).map((box) => box.clipAtImageBorders(canvas.width, canvas.height)); - return boxes.map(({x, y, width, height}) => { - const faceImg = createCanvas2({width, height}); + return boxes.map(({ x, y, width, height }) => { + const faceImg = createCanvas2({ width, height }); if (width > 0 && height > 0) getContext2dOrThrow(faceImg).putImageData(ctx.getImageData(x, y, width, height), 0, 0); return faceImg; @@ -58632,7 +58640,7 @@ async function extractFaceTensors(imageTensor, detections) { // src/dom/fetchOrThrow.ts async function fetchOrThrow(url, init2) { - const {fetch: fetch3} = env2.getEnv(); + const { fetch: fetch3 } = env2.getEnv(); const res = await fetch3(url, init2); if (!(res.status < 400)) { throw new Error(`failed to fetch: (${res.status}) ${res.statusText}, from url: ${res.url}`); @@ -58660,6 +58668,31 @@ async function fetchNetWeights(uri) { return new Float32Array(await (await fetchOrThrow(uri)).arrayBuffer()); } +// src/dom/bufferToVideo.ts +function bufferToVideo(buf) { + return new Promise((resolve, reject) => { + if (!(buf instanceof Blob)) + reject(new Error("bufferToVideo - expected buf to be of type: Blob")); + const video = env2.getEnv().createVideoElement(); + video.oncanplay = () => resolve(video); + video.onerror = reject; + video.playsInline = true; + video.autoplay = true; + video.muted = true; + video.src = URL.createObjectURL(buf); + }); +} + +// src/dom/fetchVideo.ts +async function fetchVideo(uri) { + const res = await fetchOrThrow(uri); + const blob = await res.blob(); + if (!blob.type.startsWith("video/")) { + throw new Error(`fetchVideo - expected blob type to be of type video/*, instead have: ${blob.type}, for url: ${res.url}`); + } + return bufferToVideo(blob); +} + // src/common/getModelUris.ts function getModelUris(uri, defaultModelName) { const defaultManifestFilename = `${defaultModelName}-weights_manifest.json`; @@ -58689,17 +58722,17 @@ function getModelUris(uri, defaultModelName) { // src/dom/loadWeightMap.ts async function loadWeightMap(uri, defaultModelName) { - const {manifestUri, modelBaseUri} = getModelUris(uri, defaultModelName); + const { manifestUri, modelBaseUri } = getModelUris(uri, defaultModelName); const manifest = await fetchJson(manifestUri); return io_exports.loadWeights(manifest, modelBaseUri); } // src/dom/matchDimensions.ts function matchDimensions(input2, reference, useMediaDimensions = false) { - const {width, height} = useMediaDimensions ? getMediaDimensions(reference) : reference; + const { width, height } = useMediaDimensions ? getMediaDimensions(reference) : reference; input2.width = width; input2.height = height; - return {width, height}; + return { width, height }; } // src/NeuralNetwork.ts @@ -58719,16 +58752,16 @@ var NeuralNetwork = class { return !!this.params; } getParamFromPath(paramPath) { - const {obj, objProp} = this.traversePropertyPath(paramPath); + const { obj, objProp } = this.traversePropertyPath(paramPath); return obj[objProp]; } reassignParamFromPath(paramPath, tensor2) { - const {obj, objProp} = this.traversePropertyPath(paramPath); + const { obj, objProp } = this.traversePropertyPath(paramPath); obj[objProp].dispose(); obj[objProp] = tensor2; } getParamList() { - return this._paramMappings.map(({paramPath}) => ({ + return this._paramMappings.map(({ paramPath }) => ({ path: paramPath, tensor: this.getParamFromPath(paramPath) })); @@ -58740,12 +58773,12 @@ var NeuralNetwork = class { return this.getParamList().filter((param) => !(param.tensor instanceof Variable)); } variable() { - this.getFrozenParams().forEach(({path, tensor: tensor2}) => { + this.getFrozenParams().forEach(({ path, tensor: tensor2 }) => { this.reassignParamFromPath(path, tensor2.variable()); }); } freeze() { - this.getTrainableParams().forEach(({path, tensor: variable2}) => { + this.getTrainableParams().forEach(({ path, tensor: variable2 }) => { const tensor2 = tensor(variable2.dataSync()); variable2.dispose(); this.reassignParamFromPath(path, tensor2); @@ -58761,7 +58794,7 @@ var NeuralNetwork = class { this._params = void 0; } serializeParams() { - return new Float32Array(this.getParamList().map(({tensor: tensor2}) => Array.from(tensor2.dataSync())).reduce((flat, arr) => flat.concat(arr))); + return new Float32Array(this.getParamList().map(({ tensor: tensor2 }) => Array.from(tensor2.dataSync())).reduce((flat, arr) => flat.concat(arr))); } async load(weightsOrUrl) { if (weightsOrUrl instanceof Float32Array) { @@ -58781,8 +58814,8 @@ var NeuralNetwork = class { if (filePath && typeof filePath !== "string") { throw new Error(`${this._name}.loadFromDisk - expected model file path`); } - const {readFile} = env2.getEnv(); - const {manifestUri, modelBaseUri} = getModelUris(filePath, this.getDefaultModelName()); + const { readFile } = env2.getEnv(); + const { manifestUri, modelBaseUri } = getModelUris(filePath, this.getDefaultModelName()); const fetchWeightsFromDisk = (filePaths) => Promise.all(filePaths.map((fp) => readFile(fp).then((buf) => buf.buffer))); const loadWeights2 = io_exports.weightsLoaderFactory(fetchWeightsFromDisk); const manifest = JSON.parse((await readFile(manifestUri)).toString()); @@ -58790,12 +58823,12 @@ var NeuralNetwork = class { this.loadFromWeightMap(weightMap); } loadFromWeightMap(weightMap) { - const {paramMappings, params} = this.extractParamsFromWeightMap(weightMap); + const { paramMappings, params } = this.extractParamsFromWeightMap(weightMap); this._paramMappings = paramMappings; this._params = params; } extractWeights(weights) { - const {paramMappings, params} = this.extractParams(weights); + const { paramMappings, params } = this.extractParams(weights); this._paramMappings = paramMappings; this._params = params; } @@ -58807,13 +58840,13 @@ var NeuralNetwork = class { if (!res.nextObj.hasOwnProperty(objProp2)) { throw new Error(`traversePropertyPath - object does not have property ${objProp2}, for path ${paramPath}`); } - return {obj: res.nextObj, objProp: objProp2, nextObj: res.nextObj[objProp2]}; - }, {nextObj: this.params}); - const {obj, objProp} = result; + return { obj: res.nextObj, objProp: objProp2, nextObj: res.nextObj[objProp2] }; + }, { nextObj: this.params }); + const { obj, objProp } = result; if (!obj || !objProp || !(obj[objProp] instanceof Tensor)) { throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${paramPath}`); } - return {obj, objProp}; + return { obj, objProp }; } }; @@ -58870,8 +58903,8 @@ function extractConvParamsFactory(extractWeights, paramMappings) { return (channelsIn, channelsOut, filterSize, mappedPrefix) => { const filters = tensor4d(extractWeights(channelsIn * channelsOut * filterSize * filterSize), [filterSize, filterSize, channelsIn, channelsOut]); const bias = tensor1d(extractWeights(channelsOut)); - paramMappings.push({paramPath: `${mappedPrefix}/filters`}, {paramPath: `${mappedPrefix}/bias`}); - return {filters, bias}; + paramMappings.push({ paramPath: `${mappedPrefix}/filters` }, { paramPath: `${mappedPrefix}/bias` }); + return { filters, bias }; }; } @@ -58880,7 +58913,7 @@ function extractFCParamsFactory(extractWeights, paramMappings) { return (channelsIn, channelsOut, mappedPrefix) => { const fc_weights = tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut]); const fc_bias = tensor1d(extractWeights(channelsOut)); - paramMappings.push({paramPath: `${mappedPrefix}/weights`}, {paramPath: `${mappedPrefix}/bias`}); + paramMappings.push({ paramPath: `${mappedPrefix}/weights` }, { paramPath: `${mappedPrefix}/bias` }); return { weights: fc_weights, bias: fc_bias @@ -58903,7 +58936,7 @@ function extractSeparableConvParamsFactory(extractWeights, paramMappings) { const depthwise_filter = tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1]); const pointwise_filter = tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut]); const bias = tensor1d(extractWeights(channelsOut)); - paramMappings.push({paramPath: `${mappedPrefix}/depthwise_filter`}, {paramPath: `${mappedPrefix}/pointwise_filter`}, {paramPath: `${mappedPrefix}/bias`}); + paramMappings.push({ paramPath: `${mappedPrefix}/depthwise_filter` }, { paramPath: `${mappedPrefix}/pointwise_filter` }, { paramPath: `${mappedPrefix}/bias` }); return new SeparableConvParams(depthwise_filter, pointwise_filter, bias); }; } @@ -58923,7 +58956,7 @@ function extractWeightEntryFactory(weightMap, paramMappings) { if (!isTensor(tensor2, paramRank)) { throw new Error(`expected weightMap[${originalPath}] to be a Tensor${paramRank}D, instead have ${tensor2}`); } - paramMappings.push({originalPath, paramPath: mappedPath || originalPath}); + paramMappings.push({ originalPath, paramPath: mappedPath || originalPath }); return tensor2; }; } @@ -58953,10 +58986,10 @@ function extractorsFactory(extractWeights, paramMappings) { const conv0 = isFirstLayer ? extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv0`) : extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/conv0`); const conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv1`); const conv22 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv2`); - return {conv0, conv1, conv2: conv22}; + return { conv0, conv1, conv2: conv22 }; } function extractDenseBlock4Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer = false) { - const {conv0, conv1, conv2: conv22} = extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer); + const { conv0, conv1, conv2: conv22 } = extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer); const conv3 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv3`); return { conv0, @@ -59004,7 +59037,7 @@ function loadConvParamsFactory(extractWeightEntry) { return (prefix) => { const filters = extractWeightEntry(`${prefix}/filters`, 4); const bias = extractWeightEntry(`${prefix}/bias`, 1); - return {filters, bias}; + return { filters, bias }; }; } @@ -59017,7 +59050,7 @@ function loadParamsFactory(weightMap, paramMappings) { const conv0 = isFirstLayer ? extractConvParams(`${prefix}/conv0`) : extractSeparableConvParams(`${prefix}/conv0`); const conv1 = extractSeparableConvParams(`${prefix}/conv1`); const conv22 = extractSeparableConvParams(`${prefix}/conv2`); - return {conv0, conv1, conv2: conv22}; + return { conv0, conv1, conv2: conv22 }; } function extractDenseBlock4Params(prefix, isFirstLayer = false) { const conv0 = isFirstLayer ? extractConvParams(`${prefix}/conv0`) : extractSeparableConvParams(`${prefix}/conv0`); @@ -59050,7 +59083,7 @@ function extractParamsFromWeightMap(weightMap) { dense3: extractDenseBlock4Params("dense3") }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/faceFeatureExtractor/FaceFeatureExtractor.ts @@ -59059,7 +59092,7 @@ var FaceFeatureExtractor = class extends NeuralNetwork { super("FaceFeatureExtractor"); } forwardInput(input2) { - const {params} = this; + const { params } = this; if (!params) { throw new Error("FaceFeatureExtractor - load model before inference"); } @@ -59108,7 +59141,7 @@ function extractParams2(weights, channelsIn, channelsOut) { } return { paramMappings, - params: {fc} + params: { fc } }; } @@ -59119,13 +59152,13 @@ function extractParamsFromWeightMap2(weightMap) { function extractFcParams(prefix) { const weights = extractWeightEntry(`${prefix}/weights`, 2); const bias = extractWeightEntry(`${prefix}/bias`, 1); - return {weights, bias}; + return { weights, bias }; } const params = { fc: extractFcParams("fc") }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/faceProcessor/util.ts @@ -59136,7 +59169,7 @@ function seperateWeightMaps(weightMap) { const map = key.startsWith("fc") ? classifierMap : featureExtractorMap; map[key] = weightMap[key]; }); - return {featureExtractorMap, classifierMap}; + return { featureExtractorMap, classifierMap }; } // src/faceProcessor/FaceProcessor.ts @@ -59149,7 +59182,7 @@ var FaceProcessor = class extends NeuralNetwork { return this._faceFeatureExtractor; } runNet(input2) { - const {params} = this; + const { params } = this; if (!params) { throw new Error(`${this._name} - load model before inference`); } @@ -59163,7 +59196,7 @@ var FaceProcessor = class extends NeuralNetwork { super.dispose(throwOnRedispose); } loadClassifierParams(weights) { - const {params, paramMappings} = this.extractClassifierParams(weights); + const { params, paramMappings } = this.extractClassifierParams(weights); this._params = params; this._paramMappings = paramMappings; } @@ -59171,7 +59204,7 @@ var FaceProcessor = class extends NeuralNetwork { return extractParams2(weights, this.getClassifierChannelsIn(), this.getClassifierChannelsOut()); } extractParamsFromWeightMap(weightMap) { - const {featureExtractorMap, classifierMap} = seperateWeightMaps(weightMap); + const { featureExtractorMap, classifierMap } = seperateWeightMaps(weightMap); this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap); return extractParamsFromWeightMap2(classifierMap); } @@ -59198,7 +59231,7 @@ var FaceExpressions = class { }); } asSortedArray() { - return FACE_EXPRESSION_LABELS.map((expression) => ({expression, probability: this[expression]})).sort((e0, e1) => e1.probability - e0.probability); + return FACE_EXPRESSION_LABELS.map((expression) => ({ expression, probability: this[expression] })).sort((e0, e1) => e1.probability - e0.probability); } }; @@ -59241,8 +59274,8 @@ function isWithFaceExpressions(obj) { return obj.expressions instanceof FaceExpressions; } function extendWithFaceExpressions(sourceObj, expressions) { - const extension = {expressions}; - return {...sourceObj, ...extension}; + const extension = { expressions }; + return { ...sourceObj, ...extension }; } // src/draw/drawFaceExpressions.ts @@ -59268,7 +59301,7 @@ function isWithFaceLandmarks(obj) { function calculateFaceAngle(mesh) { const radians = (a12, a22, b1, b2) => Math.atan2(b2 - a22, b1 - a12) % Math.PI; const degrees = (theta) => theta * 180 / Math.PI; - const angle = {roll: void 0, pitch: void 0, yaw: void 0}; + const angle = { roll: void 0, pitch: void 0, yaw: void 0 }; if (!mesh || !mesh._positions || mesh._positions.length !== 68) return angle; const pt = mesh._positions; @@ -59280,10 +59313,10 @@ function calculateFaceAngle(mesh) { return angle; } function extendWithFaceLandmarks(sourceObj, unshiftedLandmarks) { - const {box: shift} = sourceObj.detection; + const { box: shift } = sourceObj.detection; const landmarks = unshiftedLandmarks.shiftBy(shift.x, shift.y); const rect = landmarks.align(); - const {imageDims} = sourceObj.detection; + const { imageDims } = sourceObj.detection; const alignedRect = new FaceDetection(sourceObj.detection.score, rect.rescale(imageDims.reverse()), imageDims); const angle = calculateFaceAngle(unshiftedLandmarks); const extension = { @@ -59292,7 +59325,7 @@ function extendWithFaceLandmarks(sourceObj, unshiftedLandmarks) { alignedRect, angle }; - return {...sourceObj, ...extension}; + return { ...sourceObj, ...extension }; } // src/draw/DrawFaceLandmarks.ts @@ -59364,7 +59397,7 @@ function drawFaceLandmarks(canvasArg, faceLandmarks) { } // package.json -var version10 = "1.2.4"; +var version10 = "1.2.5"; // src/xception/extractParams.ts function extractorsFactory2(extractWeights, paramMappings) { @@ -59374,13 +59407,13 @@ function extractorsFactory2(extractWeights, paramMappings) { const separable_conv0 = extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/separable_conv1`); const expansion_conv = extractConvParams(channelsIn, channelsOut, 1, `${mappedPrefix}/expansion_conv`); - return {separable_conv0, separable_conv1, expansion_conv}; + return { separable_conv0, separable_conv1, expansion_conv }; } function extractMainBlockParams(channels, mappedPrefix) { const separable_conv0 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv1`); const separable_conv2 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv2`); - return {separable_conv0, separable_conv1, separable_conv2}; + return { separable_conv0, separable_conv1, separable_conv2 }; } return { extractConvParams, @@ -59424,7 +59457,7 @@ function extractParams3(weights, numMainBlocks) { } return { paramMappings, - params: {entry_flow, middle_flow, exit_flow} + params: { entry_flow, middle_flow, exit_flow } }; } @@ -59437,13 +59470,13 @@ function loadParamsFactory2(weightMap, paramMappings) { const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`); const expansion_conv = extractConvParams(`${mappedPrefix}/expansion_conv`); - return {separable_conv0, separable_conv1, expansion_conv}; + return { separable_conv0, separable_conv1, expansion_conv }; } function extractMainBlockParams(mappedPrefix) { const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`); const separable_conv2 = extractSeparableConvParams(`${mappedPrefix}/separable_conv2`); - return {separable_conv0, separable_conv1, separable_conv2}; + return { separable_conv0, separable_conv1, separable_conv2 }; } return { extractConvParams, @@ -59479,7 +59512,7 @@ function extractParamsFromWeightMap3(weightMap, numMainBlocks) { separable_conv: exit_flow_separable_conv }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params: {entry_flow, middle_flow, exit_flow}, paramMappings}; + return { params: { entry_flow, middle_flow, exit_flow }, paramMappings }; } // src/xception/TinyXception.ts @@ -59507,7 +59540,7 @@ var TinyXception = class extends NeuralNetwork { this._numMainBlocks = numMainBlocks; } forwardInput(input2) { - const {params} = this; + const { params } = this; if (!params) { throw new Error("TinyXception - load model before inference"); } @@ -59555,7 +59588,7 @@ function extractParams4(weights) { } return { paramMappings, - params: {fc: {age, gender}} + params: { fc: { age, gender } } }; } @@ -59566,7 +59599,7 @@ function extractParamsFromWeightMap4(weightMap) { function extractFcParams(prefix) { const weights = extractWeightEntry(`${prefix}/weights`, 2); const bias = extractWeightEntry(`${prefix}/bias`, 1); - return {weights, bias}; + return { weights, bias }; } const params = { fc: { @@ -59575,7 +59608,7 @@ function extractParamsFromWeightMap4(weightMap) { } }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/ageGenderNet/types.ts @@ -59595,7 +59628,7 @@ var AgeGenderNet = class extends NeuralNetwork { return this._faceFeatureExtractor; } runNet(input2) { - const {params} = this; + const { params } = this; if (!params) { throw new Error(`${this._name} - load model before inference`); } @@ -59604,13 +59637,13 @@ var AgeGenderNet = class extends NeuralNetwork { const pooled = avgPool(bottleneckFeatures, [7, 7], [2, 2], "valid").as2D(bottleneckFeatures.shape[0], -1); const age = fullyConnectedLayer(pooled, params.fc.age).as1D(); const gender = fullyConnectedLayer(pooled, params.fc.gender); - return {age, gender}; + return { age, gender }; }); } forwardInput(input2) { return tidy(() => { - const {age, gender} = this.runNet(input2); - return {age, gender: softmax(gender)}; + const { age, gender } = this.runNet(input2); + return { age, gender: softmax(gender) }; }); } async forward(input2) { @@ -59625,7 +59658,7 @@ var AgeGenderNet = class extends NeuralNetwork { ageTensor, genderTensor: genders[i] })); - const predictionsByBatch = await Promise.all(ageAndGenderTensors.map(async ({ageTensor, genderTensor}) => { + const predictionsByBatch = await Promise.all(ageAndGenderTensors.map(async ({ ageTensor, genderTensor }) => { const age = ageTensor.dataSync()[0]; const probMale = genderTensor.dataSync()[0]; const isMale = probMale > 0.5; @@ -59633,7 +59666,7 @@ var AgeGenderNet = class extends NeuralNetwork { const genderProbability = isMale ? probMale : 1 - probMale; ageTensor.dispose(); genderTensor.dispose(); - return {age, gender, genderProbability}; + return { age, gender, genderProbability }; })); out.age.dispose(); out.gender.dispose(); @@ -59647,7 +59680,7 @@ var AgeGenderNet = class extends NeuralNetwork { super.dispose(throwOnRedispose); } loadClassifierParams(weights) { - const {params, paramMappings} = this.extractClassifierParams(weights); + const { params, paramMappings } = this.extractClassifierParams(weights); this._params = params; this._paramMappings = paramMappings; } @@ -59655,7 +59688,7 @@ var AgeGenderNet = class extends NeuralNetwork { return extractParams4(weights); } extractParamsFromWeightMap(weightMap) { - const {featureExtractorMap, classifierMap} = seperateWeightMaps(weightMap); + const { featureExtractorMap, classifierMap } = seperateWeightMaps(weightMap); this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap); return extractParamsFromWeightMap4(classifierMap); } @@ -59671,7 +59704,7 @@ var AgeGenderNet = class extends NeuralNetwork { // src/faceLandmarkNet/FaceLandmark68NetBase.ts var FaceLandmark68NetBase = class extends FaceProcessor { postProcess(output, inputSize, originalDimensions) { - const inputDimensions = originalDimensions.map(({width, height}) => { + const inputDimensions = originalDimensions.map(({ width, height }) => { const scale3 = inputSize / Math.max(height, width); return { width: width * scale3, @@ -59682,7 +59715,7 @@ var FaceLandmark68NetBase = class extends FaceProcessor { return tidy(() => { const createInterleavedTensor = (fillX, fillY) => stack([fill([68], fillX, "float32"), fill([68], fillY, "float32")], 1).as2D(1, 136).as1D(); const getPadding2 = (batchIdx, cond) => { - const {width, height} = inputDimensions[batchIdx]; + const { width, height } = inputDimensions[batchIdx]; return cond(width, height) ? Math.abs(width - height) / 2 : 0; }; const getPaddingX = (batchIdx) => getPadding2(batchIdx, (w, h) => w < h); @@ -59694,7 +59727,7 @@ var FaceLandmark68NetBase = class extends FaceProcessor { forwardInput(input2) { return tidy(() => { const out = this.runNet(input2); - return this.postProcess(out, input2.inputSize, input2.inputDimensions.map(([height, width]) => ({height, width}))); + return this.postProcess(out, input2.inputSize, input2.inputDimensions.map(([height, width]) => ({ height, width }))); }); } async forward(input2) { @@ -59745,7 +59778,7 @@ function extractParamsFromWeightMapTiny(weightMap) { dense2: extractDenseBlock3Params("dense2") }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/faceFeatureExtractor/extractParamsTiny.ts @@ -59766,7 +59799,7 @@ function extractParamsTiny(weights) { } return { paramMappings, - params: {dense0, dense1, dense2} + params: { dense0, dense1, dense2 } }; } @@ -59776,7 +59809,7 @@ var TinyFaceFeatureExtractor = class extends NeuralNetwork { super("TinyFaceFeatureExtractor"); } forwardInput(input2) { - const {params} = this; + const { params } = this; if (!params) { throw new Error("TinyFaceFeatureExtractor - load model before inference"); } @@ -59829,7 +59862,7 @@ function scale2(x, params) { // src/faceRecognitionNet/convLayer.ts function convLayer2(x, params, strides, withRelu, padding = "same") { - const {filters, bias} = params.conv; + const { filters, bias } = params.conv; let out = conv2d(x, filters, strides, padding); out = add2(out, bias); out = scale2(out, params.scale); @@ -59858,13 +59891,13 @@ function extractorsFactory3(extractWeights, paramMappings) { function extractConvParams(numFilterValues, numFilters, filterSize, mappedPrefix) { const filters = extractFilterValues(numFilterValues, numFilters, filterSize); const bias = tensor1d(extractWeights(numFilters)); - paramMappings.push({paramPath: `${mappedPrefix}/filters`}, {paramPath: `${mappedPrefix}/bias`}); - return {filters, bias}; + paramMappings.push({ paramPath: `${mappedPrefix}/filters` }, { paramPath: `${mappedPrefix}/bias` }); + return { filters, bias }; } function extractScaleLayerParams(numWeights, mappedPrefix) { const weights = tensor1d(extractWeights(numWeights)); const biases = tensor1d(extractWeights(numWeights)); - paramMappings.push({paramPath: `${mappedPrefix}/weights`}, {paramPath: `${mappedPrefix}/biases`}); + paramMappings.push({ paramPath: `${mappedPrefix}/weights` }, { paramPath: `${mappedPrefix}/biases` }); return { weights, biases @@ -59873,12 +59906,12 @@ function extractorsFactory3(extractWeights, paramMappings) { function extractConvLayerParams(numFilterValues, numFilters, filterSize, mappedPrefix) { const conv3 = extractConvParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv`); const scale3 = extractScaleLayerParams(numFilters, `${mappedPrefix}/scale`); - return {conv: conv3, scale: scale3}; + return { conv: conv3, scale: scale3 }; } function extractResidualLayerParams(numFilterValues, numFilters, filterSize, mappedPrefix, isDown = false) { const conv1 = extractConvLayerParams((isDown ? 0.5 : 1) * numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv1`); const conv22 = extractConvLayerParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv2`); - return {conv1, conv2: conv22}; + return { conv1, conv2: conv22 }; } return { extractConvLayerParams, @@ -59911,7 +59944,7 @@ function extractParams5(weights) { const conv256_2 = extractResidualLayerParams(589824, 256, 3, "conv256_2"); const conv256_down_out = extractResidualLayerParams(589824, 256, 3, "conv256_down_out"); const fc = tidy(() => transpose(tensor2d(extractWeights(256 * 128), [128, 256]), [1, 0])); - paramMappings.push({paramPath: "fc"}); + paramMappings.push({ paramPath: "fc" }); if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } @@ -59933,7 +59966,7 @@ function extractParams5(weights) { conv256_down_out, fc }; - return {params, paramMappings}; + return { params, paramMappings }; } // src/faceRecognitionNet/extractParamsFromWeightMap.ts @@ -59942,13 +59975,13 @@ function extractorsFactory4(weightMap, paramMappings) { function extractScaleLayerParams(prefix) { const weights = extractWeightEntry(`${prefix}/scale/weights`, 1); const biases = extractWeightEntry(`${prefix}/scale/biases`, 1); - return {weights, biases}; + return { weights, biases }; } function extractConvLayerParams(prefix) { const filters = extractWeightEntry(`${prefix}/conv/filters`, 4); const bias = extractWeightEntry(`${prefix}/conv/bias`, 1); const scale3 = extractScaleLayerParams(prefix); - return {conv: {filters, bias}, scale: scale3}; + return { conv: { filters, bias }, scale: scale3 }; } function extractResidualLayerParams(prefix) { return { @@ -59982,8 +60015,8 @@ function extractParamsFromWeightMap5(weightMap) { const conv256_1 = extractResidualLayerParams("conv256_1"); const conv256_2 = extractResidualLayerParams("conv256_2"); const conv256_down_out = extractResidualLayerParams("conv256_down_out"); - const {fc} = weightMap; - paramMappings.push({originalPath: "fc", paramPath: "fc"}); + const { fc } = weightMap; + paramMappings.push({ originalPath: "fc", paramPath: "fc" }); if (!isTensor2D(fc)) { throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${fc}`); } @@ -60006,7 +60039,7 @@ function extractParamsFromWeightMap5(weightMap) { fc }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/faceRecognitionNet/residualLayer.ts @@ -60046,7 +60079,7 @@ var FaceRecognitionNet = class extends NeuralNetwork { super("FaceRecognitionNet"); } forwardInput(input2) { - const {params} = this; + const { params } = this; if (!params) { throw new Error("FaceRecognitionNet - load model before inference"); } @@ -60108,8 +60141,8 @@ function createFaceRecognitionNet(weights) { // src/factories/WithFaceDescriptor.ts function extendWithFaceDescriptor(sourceObj, descriptor) { - const extension = {descriptor}; - return {...sourceObj, ...extension}; + const extension = { descriptor }; + return { ...sourceObj, ...extension }; } // src/factories/WithAge.ts @@ -60117,8 +60150,8 @@ function isWithAge(obj) { return typeof obj.age === "number"; } function extendWithAge(sourceObj, age) { - const extension = {age}; - return {...sourceObj, ...extension}; + const extension = { age }; + return { ...sourceObj, ...extension }; } // src/factories/WithGender.ts @@ -60126,8 +60159,8 @@ function isWithGender(obj) { return (obj.gender === Gender.MALE || obj.gender === Gender.FEMALE) && isValidProbablitiy(obj.genderProbability); } function extendWithGender(sourceObj, gender, genderProbability) { - const extension = {gender, genderProbability}; - return {...sourceObj, ...extension}; + const extension = { gender, genderProbability }; + return { ...sourceObj, ...extension }; } // src/ssdMobilenetv1/extractParams.ts @@ -60138,7 +60171,7 @@ function extractorsFactory5(extractWeights, paramMappings) { const batch_norm_offset = tensor1d(extractWeights(numChannels)); const batch_norm_mean = tensor1d(extractWeights(numChannels)); const batch_norm_variance = tensor1d(extractWeights(numChannels)); - paramMappings.push({paramPath: `${mappedPrefix}/filters`}, {paramPath: `${mappedPrefix}/batch_norm_scale`}, {paramPath: `${mappedPrefix}/batch_norm_offset`}, {paramPath: `${mappedPrefix}/batch_norm_mean`}, {paramPath: `${mappedPrefix}/batch_norm_variance`}); + paramMappings.push({ paramPath: `${mappedPrefix}/filters` }, { paramPath: `${mappedPrefix}/batch_norm_scale` }, { paramPath: `${mappedPrefix}/batch_norm_offset` }, { paramPath: `${mappedPrefix}/batch_norm_mean` }, { paramPath: `${mappedPrefix}/batch_norm_variance` }); return { filters, batch_norm_scale, @@ -60150,8 +60183,8 @@ function extractorsFactory5(extractWeights, paramMappings) { function extractConvParams(channelsIn, channelsOut, filterSize, mappedPrefix, isPointwiseConv) { const filters = tensor4d(extractWeights(channelsIn * channelsOut * filterSize * filterSize), [filterSize, filterSize, channelsIn, channelsOut]); const bias = tensor1d(extractWeights(channelsOut)); - paramMappings.push({paramPath: `${mappedPrefix}/filters`}, {paramPath: `${mappedPrefix}/${isPointwiseConv ? "batch_norm_offset" : "bias"}`}); - return {filters, bias}; + paramMappings.push({ paramPath: `${mappedPrefix}/filters` }, { paramPath: `${mappedPrefix}/${isPointwiseConv ? "batch_norm_offset" : "bias"}` }); + return { filters, bias }; } function extractPointwiseConvParams(channelsIn, channelsOut, filterSize, mappedPrefix) { const { @@ -60166,7 +60199,7 @@ function extractorsFactory5(extractWeights, paramMappings) { function extractConvPairParams(channelsIn, channelsOut, mappedPrefix) { const depthwise_conv = extractDepthwiseConvParams(channelsIn, `${mappedPrefix}/depthwise_conv`); const pointwise_conv = extractPointwiseConvParams(channelsIn, channelsOut, 1, `${mappedPrefix}/pointwise_conv`); - return {depthwise_conv, pointwise_conv}; + return { depthwise_conv, pointwise_conv }; } function extractMobilenetV1Params() { const conv_0 = extractPointwiseConvParams(3, 32, 3, "mobilenetv1/conv_0"); @@ -60283,7 +60316,7 @@ function extractParams6(weights) { const output_layer = { extra_dim }; - paramMappings.push({paramPath: "output_layer/extra_dim"}); + paramMappings.push({ paramPath: "output_layer/extra_dim" }); if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } @@ -60303,7 +60336,7 @@ function extractorsFactory6(weightMap, paramMappings) { function extractPointwiseConvParams(prefix, idx, mappedPrefix) { const filters = extractWeightEntry(`${prefix}/Conv2d_${idx}_pointwise/weights`, 4, `${mappedPrefix}/filters`); const batch_norm_offset = extractWeightEntry(`${prefix}/Conv2d_${idx}_pointwise/convolution_bn_offset`, 1, `${mappedPrefix}/batch_norm_offset`); - return {filters, batch_norm_offset}; + return { filters, batch_norm_offset }; } function extractConvPairParams(idx) { const mappedPrefix = `mobilenetv1/conv_${idx}`; @@ -60347,12 +60380,12 @@ function extractorsFactory6(weightMap, paramMappings) { function extractConvParams(prefix, mappedPrefix) { const filters = extractWeightEntry(`${prefix}/weights`, 4, `${mappedPrefix}/filters`); const bias = extractWeightEntry(`${prefix}/biases`, 1, `${mappedPrefix}/bias`); - return {filters, bias}; + return { filters, bias }; } function extractBoxPredictorParams(idx) { const box_encoding_predictor = extractConvParams(`Prediction/BoxPredictor_${idx}/BoxEncodingPredictor`, `prediction_layer/box_predictor_${idx}/box_encoding_predictor`); const class_predictor = extractConvParams(`Prediction/BoxPredictor_${idx}/ClassPredictor`, `prediction_layer/box_predictor_${idx}/class_predictor`); - return {box_encoding_predictor, class_predictor}; + return { box_encoding_predictor, class_predictor }; } function extractPredictionLayerParams() { return { @@ -60384,7 +60417,7 @@ function extractParamsFromWeightMap6(weightMap) { extractPredictionLayerParams } = extractorsFactory6(weightMap, paramMappings); const extra_dim = weightMap["Output/extra_dim"]; - paramMappings.push({originalPath: "Output/extra_dim", paramPath: "output_layer/extra_dim"}); + paramMappings.push({ originalPath: "Output/extra_dim", paramPath: "output_layer/extra_dim" }); if (!isTensor3D(extra_dim)) { throw new Error(`expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have ${extra_dim}`); } @@ -60396,7 +60429,7 @@ function extractParamsFromWeightMap6(weightMap) { } }; disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/ssdMobilenetv1/pointwiseConvLayer.ts @@ -60482,7 +60515,7 @@ function IOU(boxes, i, j) { function nonMaxSuppression3(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) { const numBoxes = boxes.shape[0]; const outputSize = Math.min(maxOutputSize, numBoxes); - const candidates = scores.map((score, boxIndex) => ({score, boxIndex})).filter((c) => c.score > scoreThreshold).sort((c1, c2) => c2.score - c1.score); + const candidates = scores.map((score, boxIndex) => ({ score, boxIndex })).filter((c) => c.score > scoreThreshold).sort((c1, c2) => c2.score - c1.score); const suppressFunc = (x) => x <= iouThreshold ? 1 : 0; const selected = []; candidates.forEach((c) => { @@ -60515,10 +60548,10 @@ function getCenterCoordinatesAndSizesLayer(x) { add2(vec[0], div(sizes[0], 2)), add2(vec[1], div(sizes[1], 2)) ]; - return {sizes, centers}; + return { sizes, centers }; } function decodeBoxesLayer(x0, x1) { - const {sizes, centers} = getCenterCoordinatesAndSizesLayer(x0); + const { sizes, centers } = getCenterCoordinatesAndSizesLayer(x0); const vec = unstack(transpose(x1, [1, 0])); const div0_out = div(mul(exp(div(vec[2], 5)), sizes[0]), 2); const add0_out = add2(mul(div(vec[0], 10), sizes[0]), centers[0]); @@ -60541,7 +60574,7 @@ function outputLayer(boxPredictions, classPredictions, params) { scores = reshape(scores, [batchSize, scores.shape[1]]); const boxesByBatch = unstack(boxes); const scoresByBatch = unstack(scores); - return {boxes: boxesByBatch, scores: scoresByBatch}; + return { boxes: boxesByBatch, scores: scoresByBatch }; }); } @@ -60551,7 +60584,7 @@ function boxPredictionLayer(x, params) { const batchSize = x.shape[0]; const boxPredictionEncoding = reshape(convLayer(x, params.box_encoding_predictor), [batchSize, -1, 1, 4]); const classPrediction = reshape(convLayer(x, params.class_predictor), [batchSize, -1, 3]); - return {boxPredictionEncoding, classPrediction}; + return { boxPredictionEncoding, classPrediction }; }); } @@ -60597,7 +60630,7 @@ function predictionLayer(x, conv11, params) { // src/ssdMobilenetv1/SsdMobilenetv1Options.ts var SsdMobilenetv1Options = class { - constructor({minConfidence, maxResults} = {}) { + constructor({ minConfidence, maxResults } = {}) { this._name = "SsdMobilenetv1Options"; this._minConfidence = minConfidence || 0.5; this._maxResults = maxResults || 100; @@ -60622,14 +60655,14 @@ var SsdMobilenetv1 = class extends NeuralNetwork { super("SsdMobilenetv1"); } forwardInput(input2) { - const {params} = this; + const { params } = this; if (!params) throw new Error("SsdMobilenetv1 - load model before inference"); return tidy(() => { const batchTensor = cast(input2.toBatchTensor(512, false), "float32"); const x = sub(div(batchTensor, 127.5), 1); const features = mobileNetV1(x, params.mobilenetv1); - const {boxPredictions, classPredictions} = predictionLayer(features.out, features.conv11, params.prediction_layer); + const { boxPredictions, classPredictions } = predictionLayer(features.out, features.conv11, params.prediction_layer); return outputLayer(boxPredictions, classPredictions, params.output_layer); }); } @@ -60637,9 +60670,9 @@ var SsdMobilenetv1 = class extends NeuralNetwork { return this.forwardInput(await toNetInput(input2)); } async locateFaces(input2, options = {}) { - const {maxResults, minConfidence} = new SsdMobilenetv1Options(options); + const { maxResults, minConfidence } = new SsdMobilenetv1Options(options); const netInput = await toNetInput(input2); - const {boxes: _boxes, scores: _scores} = this.forwardInput(netInput); + const { boxes: _boxes, scores: _scores } = this.forwardInput(netInput); const boxes = _boxes[0]; const scores = _scores[0]; for (let i = 1; i < _boxes.length; i++) { @@ -60663,7 +60696,7 @@ var SsdMobilenetv1 = class extends NeuralNetwork { Math.max(0, boxesData[idx][1]), Math.min(1, boxesData[idx][3]) ].map((val) => val * padX); - return new FaceDetection(scoresData[idx], new Rect(left, top, right - left, bottom - top), {height: netInput.getInputHeight(0), width: netInput.getInputWidth(0)}); + return new FaceDetection(scoresData[idx], new Rect(left, top, right - left, bottom - top), { height: netInput.getInputHeight(0), width: netInput.getInputWidth(0) }); }); boxes.dispose(); scores.dispose(); @@ -60771,13 +60804,13 @@ function extractorsFactory7(extractWeights, paramMappings) { function extractBatchNormParams(size, mappedPrefix) { const sub4 = tensor1d(extractWeights(size)); const truediv = tensor1d(extractWeights(size)); - paramMappings.push({paramPath: `${mappedPrefix}/sub`}, {paramPath: `${mappedPrefix}/truediv`}); - return {sub: sub4, truediv}; + paramMappings.push({ paramPath: `${mappedPrefix}/sub` }, { paramPath: `${mappedPrefix}/truediv` }); + return { sub: sub4, truediv }; } function extractConvWithBatchNormParams(channelsIn, channelsOut, mappedPrefix) { const conv3 = extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv`); const bn = extractBatchNormParams(channelsOut, `${mappedPrefix}/bn`); - return {conv: conv3, bn}; + return { conv: conv3, bn }; } const extractSeparableConvParams = extractSeparableConvParamsFactory(extractWeights, paramMappings); return { @@ -60846,7 +60879,7 @@ function extractParams7(weights, config, boxEncodingSize, filterSizes) { if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } - return {params, paramMappings}; + return { params, paramMappings }; } // src/tinyYolov2/extractParamsFromWeightMap.ts @@ -60855,17 +60888,17 @@ function extractorsFactory8(weightMap, paramMappings) { function extractBatchNormParams(prefix) { const sub4 = extractWeightEntry(`${prefix}/sub`, 1); const truediv = extractWeightEntry(`${prefix}/truediv`, 1); - return {sub: sub4, truediv}; + return { sub: sub4, truediv }; } function extractConvParams(prefix) { const filters = extractWeightEntry(`${prefix}/filters`, 4); const bias = extractWeightEntry(`${prefix}/bias`, 1); - return {filters, bias}; + return { filters, bias }; } function extractConvWithBatchNormParams(prefix) { const conv3 = extractConvParams(`${prefix}/conv`); const bn = extractBatchNormParams(`${prefix}/bn`); - return {conv: conv3, bn}; + return { conv: conv3, bn }; } const extractSeparableConvParams = loadSeparableConvParamsFactory(extractWeightEntry); return { @@ -60909,12 +60942,12 @@ function extractParamsFromWeightMap7(weightMap, config) { }; } disposeUnusedWeightTensors(weightMap, paramMappings); - return {params, paramMappings}; + return { params, paramMappings }; } // src/tinyYolov2/TinyYolov2Options.ts var TinyYolov2Options = class { - constructor({inputSize, scoreThreshold} = {}) { + constructor({ inputSize, scoreThreshold } = {}) { this._name = "TinyYolov2Options"; this._inputSize = inputSize || 416; this._scoreThreshold = scoreThreshold || 0.5; @@ -60984,7 +61017,7 @@ var _TinyYolov2Base = class extends NeuralNetwork { return convLayer(out, params.conv8, "valid", false); } forwardInput(input2, inputSize) { - const {params} = this; + const { params } = this; if (!params) { throw new Error("TinyYolov2 - load model before inference"); } @@ -60999,7 +61032,7 @@ var _TinyYolov2Base = class extends NeuralNetwork { return this.forwardInput(await toNetInput(input2), inputSize); } async detect(input2, forwardParams = {}) { - const {inputSize, scoreThreshold} = new TinyYolov2Options(forwardParams); + const { inputSize, scoreThreshold } = new TinyYolov2Options(forwardParams); const netInput = await toNetInput(input2); const out = await this.forwardInput(netInput, inputSize); const out0 = tidy(() => unstack(out)[0].expandDims()); @@ -61033,7 +61066,7 @@ var _TinyYolov2Base = class extends NeuralNetwork { return extractParams7(weights, this.config, this.boxEncodingSize, filterSizes); } async extractBoxes(outputTensor, inputBlobDimensions, scoreThreshold) { - const {width, height} = inputBlobDimensions; + const { width, height } = inputBlobDimensions; const inputSize = Math.max(width, height); const correctionFactorX = inputSize / width; const correctionFactorY = inputSize / height; @@ -61060,8 +61093,8 @@ var _TinyYolov2Base = class extends NeuralNetwork { const heightLocal = Math.exp(boxesData[row][col][anchor][3]) * this.config.anchors[anchor].y / numCells * correctionFactorY; const x = ctX - widthLocal / 2; const y = ctY - heightLocal / 2; - const pos = {row, col, anchor}; - const {classScore, label} = this.withClassScores ? await this.extractPredictedClass(classScoresTensor, pos) : {classScore: 1, label: 0}; + const pos = { row, col, anchor }; + const { classScore, label } = this.withClassScores ? await this.extractPredictedClass(classScoresTensor, pos) : { classScore: 1, label: 0 }; results.push({ box: new BoundingBox(x, y, x + widthLocal, y + heightLocal), score, @@ -61079,7 +61112,7 @@ var _TinyYolov2Base = class extends NeuralNetwork { return results; } async extractPredictedClass(classesTensor, pos) { - const {row, col, anchor} = pos; + const { row, col, anchor } = pos; const classesData = await classesTensor.array(); return Array(this.config.classes.length).fill(0).map((_, i) => classesData[row][col][anchor][i]).map((classScore, label) => ({ classScore, @@ -61115,7 +61148,7 @@ var TinyYolov2 = class extends TinyYolov2Base { } async locateFaces(input2, forwardParams) { const objectDetections = await this.detect(input2, forwardParams); - return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, {width: det.imageWidth, height: det.imageHeight})); + return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight })); } getDefaultModelName() { return this.withSeparableConvs ? DEFAULT_MODEL_NAME_SEPARABLE_CONV : DEFAULT_MODEL_NAME2; @@ -61151,7 +61184,7 @@ var ComposableTask = class { }; // src/globalApi/extractFacesAndComputeResults.ts -async function extractAllFacesAndComputeResults(parentResults, input2, computeResults, extractedFaces, getRectForAlignment = ({alignedRect}) => alignedRect) { +async function extractAllFacesAndComputeResults(parentResults, input2, computeResults, extractedFaces, getRectForAlignment = ({ alignedRect }) => alignedRect) { const faceBoxes = parentResults.map((parentResult) => isWithFaceLandmarks(parentResult) ? getRectForAlignment(parentResult) : parentResult.detection); const faces = extractedFaces || (input2 instanceof Tensor ? await extractFaceTensors(input2, faceBoxes) : await extractFaces(input2, faceBoxes)); const results = await computeResults(faces); @@ -61192,7 +61225,7 @@ var TinyFaceDetector = class extends TinyYolov2Base { } async locateFaces(input2, forwardParams) { const objectDetections = await this.detect(input2, forwardParams); - return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, {width: det.imageWidth, height: det.imageHeight})); + return objectDetections.map((det) => new FaceDetection(det.score, det.relativeBox, { width: det.imageWidth, height: det.imageHeight })); } getDefaultModelName() { return "tiny_face_detector_model"; @@ -61296,7 +61329,7 @@ var PredictAllAgeAndGenderTask = class extends PredictAgeAndGenderTaskBase { const parentResults = await this.parentTask; const ageAndGenderByFace = await extractAllFacesAndComputeResults(parentResults, this.input, async (faces) => Promise.all(faces.map((face) => nets.ageGenderNet.predictAgeAndGender(face))), this.extractedFaces); return parentResults.map((parentResult, i) => { - const {age, gender, genderProbability} = ageAndGenderByFace[i]; + const { age, gender, genderProbability } = ageAndGenderByFace[i]; return extendWithAge(extendWithGender(parentResult, gender, genderProbability), age); }); } @@ -61309,7 +61342,7 @@ var PredictSingleAgeAndGenderTask = class extends PredictAgeAndGenderTaskBase { const parentResult = await this.parentTask; if (!parentResult) return void 0; - const {age, gender, genderProbability} = await extractSingleFaceAndComputeResult(parentResult, this.input, (face) => nets.ageGenderNet.predictAgeAndGender(face), this.extractedFaces); + const { age, gender, genderProbability } = await extractSingleFaceAndComputeResult(parentResult, this.input, (face) => nets.ageGenderNet.predictAgeAndGender(face), this.extractedFaces); return extendWithAge(extendWithGender(parentResult, gender, genderProbability), age); } withFaceExpressions() { @@ -61344,7 +61377,7 @@ var ComputeFaceDescriptorsTaskBase = class extends ComposableTask { var ComputeAllFaceDescriptorsTask = class extends ComputeFaceDescriptorsTaskBase { async run() { const parentResults = await this.parentTask; - const descriptors = await extractAllFacesAndComputeResults(parentResults, this.input, (faces) => Promise.all(faces.map((face) => nets.faceRecognitionNet.computeFaceDescriptor(face))), null, (parentResult) => parentResult.landmarks.align(null, {useDlibAlignment: true})); + const descriptors = await extractAllFacesAndComputeResults(parentResults, this.input, (faces) => Promise.all(faces.map((face) => nets.faceRecognitionNet.computeFaceDescriptor(face))), null, (parentResult) => parentResult.landmarks.align(null, { useDlibAlignment: true })); return descriptors.map((descriptor, i) => extendWithFaceDescriptor(parentResults[i], descriptor)); } withFaceExpressions() { @@ -61360,7 +61393,7 @@ var ComputeSingleFaceDescriptorTask = class extends ComputeFaceDescriptorsTaskBa if (!parentResult) { return void 0; } - const descriptor = await extractSingleFaceAndComputeResult(parentResult, this.input, (face) => nets.faceRecognitionNet.computeFaceDescriptor(face), null, (parentResult2) => parentResult2.landmarks.align(null, {useDlibAlignment: true})); + const descriptor = await extractSingleFaceAndComputeResult(parentResult, this.input, (face) => nets.faceRecognitionNet.computeFaceDescriptor(face), null, (parentResult2) => parentResult2.landmarks.align(null, { useDlibAlignment: true })); return extendWithFaceDescriptor(parentResult, descriptor); } withFaceExpressions() { @@ -61408,7 +61441,7 @@ var DetectSingleFaceLandmarksTask = class extends DetectFaceLandmarksTaskBase { if (!parentResult) { return void 0; } - const {detection} = parentResult; + const { detection } = parentResult; const faces = this.input instanceof Tensor ? await extractFaceTensors(this.input, [detection]) : await extractFaces(this.input, [detection]); const landmarks = await this.landmarkNet.detectLandmarks(faces[0]); faces.forEach((f) => f instanceof Tensor && f.dispose()); @@ -61435,7 +61468,7 @@ var DetectFacesTaskBase = class extends ComposableTask { }; var DetectAllFacesTask = class extends DetectFacesTaskBase { async run() { - const {input: input2, options} = this; + const { input: input2, options } = this; let result; if (options instanceof TinyFaceDetectorOptions) result = nets.tinyFaceDetector.locateFaces(input2, options); @@ -61499,7 +61532,7 @@ function detectAllFaces(input2, options = new SsdMobilenetv1Options()) { // src/globalApi/allFaces.ts async function allFacesSsdMobilenetv1(input2, minConfidence) { - return detectAllFaces(input2, new SsdMobilenetv1Options(minConfidence ? {minConfidence} : {})).withFaceLandmarks().withFaceDescriptors(); + return detectAllFaces(input2, new SsdMobilenetv1Options(minConfidence ? { minConfidence } : {})).withFaceLandmarks().withFaceDescriptors(); } async function allFacesTinyYolov2(input2, forwardParams = {}) { return detectAllFaces(input2, new TinyYolov2Options(forwardParams)).withFaceLandmarks().withFaceDescriptors(); @@ -61548,7 +61581,7 @@ var FaceMatcher = class { return descriptors.map((d) => euclideanDistance(d, queryDescriptor)).reduce((d1, d2) => d1 + d2, 0) / (descriptors.length || 1); } matchDescriptor(queryDescriptor) { - return this.labeledDescriptors.map(({descriptors, label}) => new FaceMatch(label, this.computeMeanDistance(queryDescriptor, descriptors))).reduce((best, curr) => best.distance < curr.distance ? best : curr); + return this.labeledDescriptors.map(({ descriptors, label }) => new FaceMatch(label, this.computeMeanDistance(queryDescriptor, descriptors))).reduce((best, curr) => best.distance < curr.distance ? best : curr); } findBestMatch(queryDescriptor) { const bestMatch = this.matchDescriptor(queryDescriptor); @@ -61575,12 +61608,12 @@ function createTinyFaceDetector(weights) { // src/resizeResults.ts function resizeResults(results, dimensions) { - const {width, height} = new Dimensions(dimensions.width, dimensions.height); + const { width, height } = new Dimensions(dimensions.width, dimensions.height); if (width <= 0 || height <= 0) { - throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({width, height})}`); + throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({ width, height })}`); } if (Array.isArray(results)) { - return results.map((obj) => resizeResults(obj, {width, height})); + return results.map((obj) => resizeResults(obj, { width, height })); } if (isWithFaceLandmarks(results)) { const resizedDetection = results.detection.forSize(width, height); @@ -61599,7 +61632,7 @@ function resizeResults(results, dimensions) { // src/index.ts var node = typeof process !== "undefined"; var browser = typeof navigator !== "undefined" && typeof navigator.userAgent !== "undefined"; -var version11 = {faceapi: version10, node, browser}; +var version11 = { faceapi: version10, node, browser }; export { AgeGenderNet, BoundingBox, @@ -61677,6 +61710,7 @@ export { fetchJson, fetchNetWeights, fetchOrThrow, + fetchVideo, getContext2dOrThrow, getMediaDimensions, imageTensorToCanvas, diff --git a/dist/face-api.esm.js.map b/dist/face-api.esm.js.map index 1ccf2a7..c45a506 100644 --- a/dist/face-api.esm.js.map +++ b/dist/face-api.esm.js.map @@ -1,7 +1,7 @@ { "version": 3, - "sources": ["../node_modules/.pnpm/node-fetch@2.6.1/node_modules/node-fetch/browser.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/alea.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/xor128.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/xorwow.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/xorshift7.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/xor4096.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/lib/tychei.js", "(disabled):crypto", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/seedrandom.js", "../node_modules/.pnpm/seedrandom@2.4.3/node_modules/seedrandom/index.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/alea.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/xor128.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/xorwow.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/xorshift7.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/xor4096.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/lib/tychei.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/seedrandom.js", "../node_modules/.pnpm/seedrandom@3.0.5/node_modules/seedrandom/index.js", "../node_modules/.pnpm/string_decoder@1.1.1/node_modules/string_decoder/lib/string_decoder.js", "(disabled):path", "(disabled):worker_threads", "(disabled):perf_hooks", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/wasm-out/tfjs-backend-wasm-threaded-simd.js", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/wasm-out/tfjs-backend-wasm.js", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/backends/backend.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/util_base.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/environment.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/global_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/kernel_names.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/kernel_registry.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/profiler.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/tape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/tensor_format.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/tensor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/tensor_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/types.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/engine.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/device_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/flags.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/tensor_util_env.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/operation.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/complex.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/tensor_ops_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/tensor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/types.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/io_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/router_registry.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/indexed_db.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/local_storage.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/model_management.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/platforms/platform_browser.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/platforms/platform_node.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/buffer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/cast.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/clone.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/print.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/base_side_effects.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/io.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/browser_files.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/progress.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/weights_loader.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/http.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/io/passthrough.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/math.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/mat_mul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/one_hot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/transpose.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/confusion_matrix.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/browser.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/tensor3d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/gather_nd_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/scatter_nd_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/slice_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/serialization.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/test_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/globals.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/add.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/floorDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/div.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/mul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/abs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/acos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/acosh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/add_n.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/all.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/any.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/arg_max.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/arg_min.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/asin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/asinh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/atan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/atan2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/atanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/conv_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/reshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/avg_pool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/avg_pool_3d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/concat.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/sigmoid.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/slice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/tanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/basic_lstm_cell.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/batch_to_space_nd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/batchnorm_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/batchnorm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/batchnorm2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/batchnorm3d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/batchnorm4d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/bincount.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/broadcast_to.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/ceil.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/clip_by_value.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/concat_1d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/concat_2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/concat_3d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/concat_4d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/conv2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/conv1d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/conv2d_backprop_input.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/conv2d_transpose.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/conv3d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/conv3d_backprop_input.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/conv3d_transpose.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/cos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/cosh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/cumsum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/dense_bincount.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/depth_to_space.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/depthwise_conv2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/diag.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/dilation2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/broadcast_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/where.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/zeros_like.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/div_no_nan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/dot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/einsum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/elu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/erf.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/exp.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/expand_dims.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/expm1.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/tile.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/eye.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/fill.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/floor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/gather.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/greater.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/greater_equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/imag.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/is_finite.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/is_inf.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/is_nan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/leaky_relu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/less.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/less_equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/linspace.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/local_response_normalization.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/log.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/log1p.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/neg.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/softplus.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/log_sigmoid.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/max.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/sub.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/sum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/log_softmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/axis_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/log_sum_exp.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/logical_and.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/logical_not.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/logical_or.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/logical_xor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/max_pool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/max_pool_3d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/max_pool_with_argmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/maximum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/mean.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/zeros.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/ones.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/meshgrid.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/min.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/minimum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/mirror_pad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/mod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/square.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/moments.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/multi_rnn_cell.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/multinomial.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/not_equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/ones_like.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/outer_product.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/pad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/pad1d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/pad2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/pad3d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/pad4d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/space_to_batch_nd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/pool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/pow.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/prelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/prod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/rand.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/rand_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/random_gamma.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/random_normal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/random_uniform.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/range.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/real.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/reciprocal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/relu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/relu6.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/reverse.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/reverse_1d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/reverse_2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/reverse_3d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/reverse_4d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/round.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/rsqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/scalar.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/selu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/separable_conv2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/setdiff1d_async.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/sign.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/sin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/sinh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/slice1d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/slice2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/slice3d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/slice4d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/softmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/spectral/fft.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/spectral/ifft.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/spectral/irfft.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/split.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/spectral/rfft.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/sqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/squared_difference.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/squeeze.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/stack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/step.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/strided_slice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/tan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/tensor1d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/tensor2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/tensor4d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/tensor5d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/tensor6d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/topk.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/truncated_normal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/unique.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/unsorted_segment_sum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/unstack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/variable.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/backends/where_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/where_async.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/boolean_mask.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/norm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/moving_average.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/scatter_nd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/sparse_to_dense_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/sparse_to_dense.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/gather_nd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/dropout_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/dropout.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/signal_ops_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/in_top_k.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/fused_ops.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/conv2d_backprop_filter.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/fused_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/fused/conv2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/depthwise_conv2d_native_backprop_filter.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/depthwise_conv2d_native_backprop_input.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/fused/depthwise_conv2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/fused/mat_mul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/signal/hamming_window.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/signal/hann_window.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/signal/frame.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/signal/stft.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/crop_and_resize.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/flip_left_right.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/rotate_with_offset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/nonmax_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/non_max_suppression.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/backends/non_max_suppression_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/backends/non_max_suppression_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/non_max_suppression_async.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/non_max_suppression_with_score.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/non_max_suppression_with_score_async.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/non_max_suppression_padded.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/non_max_suppression_padded_async.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/resize_bilinear.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/resize_nearest_neighbor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/threshold.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/image/transform.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/linalg/band_part.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/linalg/gram_schmidt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/linalg/qr.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/loss_ops_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/losses/compute_weighted_loss.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/losses/absolute_difference.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/losses/cosine_distance.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/losses/hinge_loss.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/losses/huber_loss.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/losses/log_loss.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/losses/mean_squared_error.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/losses/sigmoid_cross_entropy.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/losses/softmax_cross_entropy.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/sparse/sparse_fill_empty_rows.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/sparse/sparse_reshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/ops.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/optimizers/optimizer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/optimizers/adadelta_optimizer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/optimizers/adagrad_optimizer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/optimizers/adam_optimizer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/optimizers/adamax_optimizer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/optimizers/sgd_optimizer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/optimizers/momentum_optimizer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/optimizers/rmsprop_optimizer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/optimizers/optimizer_constructors.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/train.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/browser_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/backends/backend_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/concat_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/reduce_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/rotate_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/array_ops_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/selu_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/erf_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/log.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/backends/complex_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/backends/einsum_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/split_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/segment_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/backends/kernel_impls.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/base.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Abs_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Acos_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Acosh_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Add_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/AddN_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/ArgMax_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/ArgMin_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Asin_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Asinh_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Atan2_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Atan_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Atanh_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/avg_pool_3d_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/AvgPool3D_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/avg_pool_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/AvgPool_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/BatchMatMul_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/BatchToSpaceND_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/BroadcastTo_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Cast_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Ceil_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/ClipByValue_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/ComplexAbs_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Concat_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Conv2D_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Conv2DBackpropInput_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/conv3d_backprop_filter.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Conv3D_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Cos_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Cosh_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Cumsum_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/DepthwiseConv2dNative_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Dilation2D_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Elu_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Erf_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Exp_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/ExpandDims_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Expm1_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Floor_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/FloorDiv_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/FusedBatchNorm_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/GatherV2_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/GreaterEqual_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Identity_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/IsFinite_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/IsInf_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/IsNan_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/LeakyRelu_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Log1p_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Log_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/LogSoftmax_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/local_response_normalization_backprop.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/LRN_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/min_max_grad_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Max_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Maximum_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/max_pool_3d_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/MaxPool3D_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/ops/max_pool_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/MaxPool_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Mean_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Min_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Minimum_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/MirrorPad_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Mod_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Multiply_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Neg_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/OneHot_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/OnesLike_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Pack_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/PadV2_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Pow_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Prelu_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/RealDiv_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Reciprocal_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Relu6_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Relu_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Reshape_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/ResizeBilinear_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/ResizeNearestNeighbor_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Reverse_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Round_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Rsqrt_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Select_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Selu_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Sigmoid_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Sign_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Sin_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Sinh_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Slice_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Softmax_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Softplus_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/SpaceToBatchND_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/SplitV_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Sqrt_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Square_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/SquaredDifference_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Step_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Sub_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Sum_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Tan_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Tanh_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Tile_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Transpose_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/Unpack_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/UnsortedSegmentSum_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/gradients/ZerosLike_grad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/register_all_gradients.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/abs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/acos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/acosh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/add.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/all.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/any.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/arg_max.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/arg_min.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/as_scalar.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/as_type.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/as1d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/as2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/as3d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/as4d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/as5d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/asin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/asinh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/atan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/atan2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/atanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/avg_pool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/batch_to_space_nd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/batchnorm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/broadcast_to.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/cast.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/ceil.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/clip_by_value.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/concat.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/conv1d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/conv2d_transpose.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/conv2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/cos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/cosh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/cumsum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/depth_to_space.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/depthwise_conv2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/dilation2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/div_no_nan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/div.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/dot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/elu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/erf.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/exp.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/expand_dims.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/expm1.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/fft.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/flatten.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/floor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/floorDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/gather.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/greater_equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/greater.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/ifft.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/irfft.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/is_finite.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/is_inf.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/is_nan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/leaky_relu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/less_equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/less.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/local_response_normalization.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/log_sigmoid.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/log_softmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/log_sum_exp.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/log.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/log1p.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/logical_and.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/logical_not.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/logical_or.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/logical_xor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/mat_mul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/max_pool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/max.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/maximum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/mean.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/min.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/minimum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/mirror_pad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/mod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/mul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/neg.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/norm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/not_equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/one_hot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/ones_like.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/pad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/pool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/pow.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/prelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/prod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/reciprocal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/relu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/relu6.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/reshape_as.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/reshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/resize_bilinear.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/resize_nearest_neighbor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/reverse.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/rfft.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/round.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/rsqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/selu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/separable_conv2d.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/sigmoid.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/sign.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/sin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/sinh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/slice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/softmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/softplus.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/space_to_batch_nd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/split.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/sqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/square.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/squared_difference.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/squeeze.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/stack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/step.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/strided_slice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/sub.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/sum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/tan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/tanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/tile.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/to_bool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/to_float.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/to_int.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/topk.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/transpose.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/unique.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/unsorted_segment_sum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/unstack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/where.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/zeros_like.ts", "../node_modules/.pnpm/@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-core/src/public/chained_ops/register_all_chained_ops.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/exports_constraints.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/backend/common.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/errors.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/utils/generic_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/constraints.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/exports_initializers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/keras_format/common.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/common.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/utils/math_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/backend/tfjs_backend.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/keras_format/initializer_config.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/initializers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/exports_layers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/backend/state.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/utils/types_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/utils/variable_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/variables.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/engine/topology.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/engine/input_layer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/logs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/base_callbacks.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/serialization.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/losses.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/metrics.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/optimizers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/user_defined_metadata.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/utils/layer_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/utils/serialization_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/engine/executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/engine/container.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/engine/training_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/engine/training_dataset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/engine/training_tensors.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/engine/training.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/models.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/exports.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/activations.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/regularizers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/advanced_activations.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/utils/conv_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/convolutional.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/convolutional_depthwise.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/recurrent.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/convolutional_recurrent.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/core.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/embeddings.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/merge.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/noise.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/normalization.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/padding.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/pooling.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/layers/wrappers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/exports_metrics.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/exports_models.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/exports_regularizers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/callbacks.ts", "../node_modules/.pnpm/@tensorflow+tfjs-layers@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-layers/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/data/compiled_api.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/custom_op/register.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/arithmetic.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/basic_math.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/control.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/convolution.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/creation.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/dynamic.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/evaluation.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/graph.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/hash_table.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/image.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/logical.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/matrices.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/normalization.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/reduction.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/slice_join.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/spectral.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/op_list/transformation.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/operation_mapper.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/custom_op/node_value_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/arithmetic_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/basic_math_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/executor/tensor_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/executor/tensor_array.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/executor/tensor_list.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/control_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/convolution_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/creation_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/dynamic_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/evaluation_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/graph_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/executor/hash_table.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/hash_table_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/image_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/logical_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/matrices_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/normalization_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/reduction_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/slice_join_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/sparse_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/spectral_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/executors/transformation_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/operations/operation_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/executor/execution_context.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/executor/model_analysis.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/executor/graph_executor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/executor/resource_manager.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/executor/graph_model.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-converter@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-converter/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/dataset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/iterators/lazy_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/util/deep_map.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/util/deep_clone.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/util/ring_buffer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/util/growing_ring_buffer.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/datasets/text_line_dataset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/datasets/csv_dataset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/iterators/microphone_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/iterators/webcam_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/datasource.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/iterators/string_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/iterators/byte_chunk_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/iterators/file_chunk_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/iterators/url_chunk_iterator.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/util/source_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/sources/file_data_source.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/sources/url_data_source.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/readers.ts", "../node_modules/.pnpm/@tensorflow+tfjs-data@3.6.0_5706159424f1774d88b030214a8dd243/node_modules/@tensorflow/tfjs-data/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/cpu_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/backend_cpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/shared.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Abs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/utils/binary_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Complex.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/utils/zeros_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Identity.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Real.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Cast.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/utils/binary_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Add.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Bincount_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/utils/unary_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/utils/unary_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Ceil.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Concat_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Exp.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Expm1.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Floor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/GatherV2_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Greater.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Less.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/LinSpace_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Log.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Max_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Maximum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Minimum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Multiply.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Neg.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/NotEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Transpose_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Transpose.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Prod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Range_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Rsqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Slice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/SparseFillEmptyRows_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/SparseReshape_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/SquaredDifference.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/StridedSlice_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Sub.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Tile_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/TopK_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Unique_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/base.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Elu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/LeakyRelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Prelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Relu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Relu6.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Sigmoid.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/utils/fused_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Reshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/BatchMatMul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/_FusedMatMul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Acos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Acosh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/AddN.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/All.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Any.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/ArgMax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/ArgMin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Asin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Asinh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Atan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Atan2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Atanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/utils/pool_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/AvgPool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/AvgPool3D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/AvgPool3DGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/AvgPoolGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/BatchNorm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/BatchToSpaceND.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Bincount.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Clip.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/ComplexAbs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Imag.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Concat.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Conv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Conv2DBackpropFilter.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Conv2DBackpropInput.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Conv3D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Conv3DBackpropFilterV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Conv3DBackpropInputV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Cos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Cosh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/CropAndResize.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Cumsum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/DenseBincount.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/DepthToSpace.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/DepthwiseConv2dNative.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/DepthwiseConv2dNativeBackpropFilter.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/DepthwiseConv2dNativeBackpropInput.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Diag.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Dilation2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Dilation2DBackpropFilter.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Dilation2DBackpropInput.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Sum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Einsum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/EluGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Erf.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/ExpandDims.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/RealDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/utils/fft_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/FFT.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Fill.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/FlipLeftRight.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/FloorDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/FusedConv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/FusedDepthwiseConv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/GatherNd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/GatherV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/GreaterEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/IFFT.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/IsFinite.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/IsInf.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/IsNaN.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/LessEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/LinSpace.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Log1p.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/LogicalAnd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/LogicalNot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/LogicalOr.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/LRN.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/LRNGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Max.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/MaxPool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/MaxPool3D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/MaxPool3DGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/MaxPoolGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/MaxPoolWithArgmax_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/MaxPoolWithArgmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Mean.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Min.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/MirrorPad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Mod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Multinomial.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Softmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/NonMaxSuppressionV3.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/NonMaxSuppressionV4.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/NonMaxSuppressionV5.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/OneHot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/ZerosLike.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/OnesLike.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Pack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/PadV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Pow.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Range.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Reciprocal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/ResizeBilinear.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/ResizeBilinearGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/ResizeNearestNeighbor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/ResizeNearestNeighborGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Reverse.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/RotateWithOffset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Round.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Scatter_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/ScatterNd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Select.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Selu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Sign.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Sin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Sinh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Softplus.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/SpaceToBatchND.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/SparseFillEmptyRows.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/SparseReshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/SparseToDense.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/SplitV.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Sqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Square.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Step.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/StridedSlice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Tan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Tanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Tile.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/TopK.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Transform.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Unique.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Unpack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/UnsortedSegmentSum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/register_all_kernels.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/canvas_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/tex_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/webgl_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/flags_webgl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/glsl_version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/shader_compiler_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/decode_matrix_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/decode_matrix_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/encode_float_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/encode_float_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/encode_matrix_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/encode_matrix_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/gpgpu_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/gpgpu_context.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/shader_compiler.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/gpgpu_math.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/shared.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/packing_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/pack_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/reshape_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/texture_manager.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/unaryop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/unaryop_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/unpack_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/backend_webgl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/base.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/binaryop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/binaryop_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Identity.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Complex.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LeakyRelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Prelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/kernel_funcs_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/mulmat_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/binaryop_complex_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Multiply.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/reshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Reshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/mean_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/reduce_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/reduce.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/transpose_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/transpose_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Transpose_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sum_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Transpose.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/BatchMatMul_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/_FusedMatMul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Abs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Acos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Acosh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Add.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/addn_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/addn_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/AddN.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/All.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Any.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/argminmax_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/argminmax_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/arg_min_max.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ArgMax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ArgMin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Asin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Asinh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Atan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Atan2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Atanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/pool_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/AvgPool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/AvgPool3D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/avg_pool_backprop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/AvgPool3DGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/AvgPoolGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/BatchMatMul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/batchnorm_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/batchnorm_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/BatchNorm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/slice_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/slice_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Slice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/BatchToSpaceND.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Bincount.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/NotEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Real.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernel_utils/int.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Cast.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Ceil.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/clip_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/clip_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ClipByValue.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/complex_abs_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ComplexAbs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/concat_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/concat_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Imag.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Concat_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Concat.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/conv_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/im2col_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv2D_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/conv_backprop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv2DBackpropFilter.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv2DBackpropInput.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv3D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv3DBackpropFilterV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Conv3DBackpropInputV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Cos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Cosh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/crop_and_resize_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/CropAndResize.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/cumsum_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Cumsum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/DenseBincount.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/depth_to_space_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/DepthToSpace.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/conv_gpu_depthwise.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/conv_packed_gpu_depthwise.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/DepthwiseConv2dNative.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/conv_backprop_gpu_depthwise.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/DepthwiseConv2dNativeBackpropFilter.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/DepthwiseConv2dNativeBackpropInput.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/diag_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Diag.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/dilation_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Dilation2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Einsum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Elu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/EluGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Erf.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Exp.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ExpandDims.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Expm1.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/fft_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FFT_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FFT.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/fill_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Fill.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/flip_left_right_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FlipLeftRight.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Floor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FloorDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FromPixels_utils/from_pixels_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FromPixels_utils/from_pixels_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FromPixels.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FusedConv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/FusedDepthwiseConv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/gather_nd_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/GatherNd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/gather_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/GatherV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Greater.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/GreaterEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/IFFT.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/IsFinite.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/IsInf.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/IsNaN.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Less.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LessEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LinSpace.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Log.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Log1p.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LogicalAnd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LogicalNot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LogicalOr.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/lrn_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/lrn_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LRN.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/lrn_grad_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/LRNGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Max_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Max.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Maximum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPool3D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/max_pool_backprop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPool3DGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPoolGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPoolWithArgmax_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MaxPoolWithArgmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Mean_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Mean.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Min.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Minimum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/mirror_pad_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/mirror_pad_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/MirrorPad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Mod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/multinomial_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/RealDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sub.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Softmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Multinomial.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Neg.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/NonMaxSuppressionV3.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/NonMaxSuppressionV4.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/NonMaxSuppressionV5.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/onehot_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/OneHot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ZerosLike.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/OnesLike.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Pack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/pad_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/pad_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/PadV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Pow.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Prod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Range.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Reciprocal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Relu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Relu6.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_bilinear_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_bilinear_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ResizeBilinear.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_bilinear_backprop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ResizeBilinearGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_nearest_neighbor_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_nearest_neighbor_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ResizeNearestNeighbor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/resize_nearest_neighbor_backprop_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ResizeNearestNeighborGrad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/reverse_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/reverse_packed_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Reverse.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/rotate_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/RotateWithOffset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Round.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Rsqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/scatter_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/ScatterNd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/select_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Select.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Selu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sigmoid.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sign.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sinh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Softplus.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SpaceToBatchND.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SparseFillEmptyRows.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SparseReshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SparseToDense.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SplitV.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Sqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Square.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/SquaredDifference.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Step.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/strided_slice_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/StridedSlice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Tan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Tanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/tile_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Tile.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/TopK.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/transform_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Transform.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Unique.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/Unpack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/segment_gpu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/kernels/UnsortedSegmentSum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/register_all_kernels.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-webgl@3.6.0_@tensorflow+tfjs-core@3.6.0/node_modules/@tensorflow/tfjs-backend-webgl/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs@3.6.0_seedrandom@3.0.5/node_modules/@tensorflow/tfjs/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs@3.6.0_seedrandom@3.0.5/node_modules/@tensorflow/tfjs/src/index.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/types.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/_FusedMatMul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/unary_kernel.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Abs.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/binary_kernel.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Add.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/AddN.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Identity.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Transpose.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/kernel_utils.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/All.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Any.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ArgMax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/AvgPool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Reshape.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/BatchMatMul.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Cast.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Ceil.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ClipByValue.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Concat_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Range_impl.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/kernels/Slice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-cpu@3.6.0/node_modules/@tensorflow/tfjs-backend-cpu/src/shared.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernel_utils/shared.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Concat.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Conv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Conv2DBackpropInput.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Cos.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/CropAndResize.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Cumsum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/DepthToSpace.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/DepthwiseConv2dNative.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Equal.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Exp.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ExpandDims.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Fill.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/FlipLeftRight.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Floor.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/FloorDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/FusedBatchNorm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/FusedConv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/FusedDepthwiseConv2D.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/GatherNd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/GatherV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Greater.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/GreaterEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/LeakyRelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Less.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/LessEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Log.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/LogicalAnd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Max.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Maximum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/MaxPool.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Mean.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Min.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Minimum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/MirrorPad.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Multiply.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Neg.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/NonMaxSuppression_util.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/NonMaxSuppressionV3.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/NonMaxSuppressionV4.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/NonMaxSuppressionV5.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/NotEqual.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/OneHot.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/OnesLike.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Pack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/PadV2.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Pow.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Prelu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Prod.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Range.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/RealDiv.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Relu.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Relu6.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ResizeBilinear.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Reverse.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/RotateWithOffset.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Round.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Rsqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ScatterNd.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Select.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Sigmoid.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Sin.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Slice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Softmax.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/SplitV.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Sqrt.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Square.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/SquaredDifference.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Step.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/StridedSlice.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Sub.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Sum.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Tan.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Tanh.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Tile.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/TopK.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Transform.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/Unpack.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/kernels/ZerosLike.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/register_all_kernels.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/flags_wasm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/backend_wasm.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/wasm-out/tfjs-backend-wasm-threaded-simd.worker.js", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/version.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/base.ts", "../node_modules/.pnpm/@tensorflow+tfjs-backend-wasm@3.6.0/node_modules/@tensorflow/tfjs-backend-wasm/src/index.ts", "../src/draw/index.ts", "../src/draw/drawContour.ts", "../src/utils/index.ts", "../src/classes/Dimensions.ts", "../src/classes/Point.ts", "../src/classes/Box.ts", "../src/classes/BoundingBox.ts", "../src/classes/ObjectDetection.ts", "../src/classes/FaceDetection.ts", "../src/ops/iou.ts", "../src/ops/minBbox.ts", "../src/ops/nonMaxSuppression.ts", "../src/ops/normalize.ts", "../src/ops/padToSquare.ts", "../src/ops/shuffleArray.ts", "../src/ops/index.ts", "../src/classes/Rect.ts", "../src/classes/FaceLandmarks.ts", "../src/classes/FaceLandmarks5.ts", "../src/classes/FaceLandmarks68.ts", "../src/classes/FaceMatch.ts", "../src/classes/LabeledBox.ts", "../src/classes/LabeledFaceDescriptors.ts", "../src/classes/PredictedBox.ts", "../src/factories/WithFaceDetection.ts", "../src/env/createBrowserEnv.ts", "../src/env/createFileSystem.ts", "../src/env/createNodejsEnv.ts", "../src/env/isBrowser.ts", "../src/env/isNodejs.ts", "../src/env/index.ts", "../src/dom/resolveInput.ts", "../src/dom/getContext2dOrThrow.ts", "../src/draw/DrawTextField.ts", "../src/draw/DrawBox.ts", "../src/draw/drawDetections.ts", "../src/dom/isMediaLoaded.ts", "../src/dom/awaitMediaLoaded.ts", "../src/dom/bufferToImage.ts", "../src/dom/getMediaDimensions.ts", "../src/dom/createCanvas.ts", "../src/dom/imageTensorToCanvas.ts", "../src/dom/isMediaElement.ts", "../src/dom/imageToSquare.ts", "../src/dom/NetInput.ts", "../src/dom/toNetInput.ts", "../src/dom/extractFaces.ts", "../src/dom/extractFaceTensors.ts", "../src/dom/fetchOrThrow.ts", "../src/dom/fetchImage.ts", "../src/dom/fetchJson.ts", "../src/dom/fetchNetWeights.ts", "../src/common/getModelUris.ts", "../src/dom/loadWeightMap.ts", "../src/dom/matchDimensions.ts", "../src/NeuralNetwork.ts", "../src/common/depthwiseSeparableConv.ts", "../src/faceFeatureExtractor/denseBlock.ts", "../src/common/convLayer.ts", "../src/common/disposeUnusedWeightTensors.ts", "../src/common/extractConvParamsFactory.ts", "../src/common/extractFCParamsFactory.ts", "../src/common/types.ts", "../src/common/extractSeparableConvParamsFactory.ts", "../src/common/extractWeightEntryFactory.ts", "../src/common/extractWeightsFactory.ts", "../src/faceFeatureExtractor/extractorsFactory.ts", "../src/faceFeatureExtractor/extractParams.ts", "../src/common/loadConvParamsFactory.ts", "../src/faceFeatureExtractor/loadParamsFactory.ts", "../src/faceFeatureExtractor/extractParamsFromWeightMap.ts", "../src/faceFeatureExtractor/FaceFeatureExtractor.ts", "../src/common/fullyConnectedLayer.ts", "../src/faceProcessor/extractParams.ts", "../src/faceProcessor/extractParamsFromWeightMap.ts", "../src/faceProcessor/util.ts", "../src/faceProcessor/FaceProcessor.ts", "../src/faceExpressionNet/FaceExpressions.ts", "../src/faceExpressionNet/FaceExpressionNet.ts", "../src/factories/WithFaceExpressions.ts", "../src/draw/drawFaceExpressions.ts", "../src/factories/WithFaceLandmarks.ts", "../src/draw/DrawFaceLandmarks.ts", "../src/xception/extractParams.ts", "../src/xception/extractParamsFromWeightMap.ts", "../src/xception/TinyXception.ts", "../src/ageGenderNet/extractParams.ts", "../src/ageGenderNet/extractParamsFromWeightMap.ts", "../src/ageGenderNet/types.ts", "../src/ageGenderNet/AgeGenderNet.ts", "../src/faceLandmarkNet/FaceLandmark68NetBase.ts", "../src/faceLandmarkNet/FaceLandmark68Net.ts", "../src/faceFeatureExtractor/extractParamsFromWeightMapTiny.ts", "../src/faceFeatureExtractor/extractParamsTiny.ts", "../src/faceFeatureExtractor/TinyFaceFeatureExtractor.ts", "../src/faceLandmarkNet/FaceLandmark68TinyNet.ts", "../src/faceLandmarkNet/index.ts", "../src/faceRecognitionNet/scaleLayer.ts", "../src/faceRecognitionNet/convLayer.ts", "../src/faceRecognitionNet/extractParams.ts", "../src/faceRecognitionNet/extractParamsFromWeightMap.ts", "../src/faceRecognitionNet/residualLayer.ts", "../src/faceRecognitionNet/FaceRecognitionNet.ts", "../src/faceRecognitionNet/index.ts", "../src/factories/WithFaceDescriptor.ts", "../src/factories/WithAge.ts", "../src/factories/WithGender.ts", "../src/ssdMobilenetv1/extractParams.ts", "../src/ssdMobilenetv1/extractParamsFromWeightMap.ts", "../src/ssdMobilenetv1/pointwiseConvLayer.ts", "../src/ssdMobilenetv1/mobileNetV1.ts", "../src/ssdMobilenetv1/nonMaxSuppression.ts", "../src/ssdMobilenetv1/outputLayer.ts", "../src/ssdMobilenetv1/boxPredictionLayer.ts", "../src/ssdMobilenetv1/predictionLayer.ts", "../src/ssdMobilenetv1/SsdMobilenetv1Options.ts", "../src/ssdMobilenetv1/SsdMobilenetv1.ts", "../src/ssdMobilenetv1/index.ts", "../src/tinyYolov2/const.ts", "../src/tinyYolov2/config.ts", "../src/tinyYolov2/leaky.ts", "../src/tinyYolov2/convWithBatchNorm.ts", "../src/tinyYolov2/depthwiseSeparableConv.ts", "../src/tinyYolov2/extractParams.ts", "../src/tinyYolov2/extractParamsFromWeightMap.ts", "../src/tinyYolov2/TinyYolov2Options.ts", "../src/tinyYolov2/TinyYolov2Base.ts", "../src/tinyYolov2/TinyYolov2.ts", "../src/tinyYolov2/index.ts", "../src/tinyFaceDetector/TinyFaceDetectorOptions.ts", "../src/globalApi/ComposableTask.ts", "../src/globalApi/extractFacesAndComputeResults.ts", "../src/tinyFaceDetector/const.ts", "../src/tinyFaceDetector/TinyFaceDetector.ts", "../src/globalApi/nets.ts", "../src/globalApi/PredictFaceExpressionsTask.ts", "../src/globalApi/PredictAgeAndGenderTask.ts", "../src/globalApi/ComputeFaceDescriptorsTasks.ts", "../src/globalApi/DetectFaceLandmarksTasks.ts", "../src/globalApi/DetectFacesTasks.ts", "../src/globalApi/detectFaces.ts", "../src/globalApi/allFaces.ts", "../src/euclideanDistance.ts", "../src/globalApi/FaceMatcher.ts", "../src/tinyFaceDetector/index.ts", "../src/resizeResults.ts", "../src/index.ts"], - "sourcesContent": ["", "// A port of an algorithm by Johannes Baag\u00F8e , 2010\n// http://baagoe.com/en/RandomMusings/javascript/\n// https://github.com/nquinlan/better-random-numbers-for-javascript-mirror\n// Original work is under MIT license -\n\n// Copyright (C) 2010 by Johannes Baag\u00F8e \n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n// \n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n// \n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\n\n\n(function(global, module, define) {\n\nfunction Alea(seed) {\n var me = this, mash = Mash();\n\n me.next = function() {\n var t = 2091639 * me.s0 + me.c * 2.3283064365386963e-10; // 2^-32\n me.s0 = me.s1;\n me.s1 = me.s2;\n return me.s2 = t - (me.c = t | 0);\n };\n\n // Apply the seeding algorithm from Baagoe.\n me.c = 1;\n me.s0 = mash(' ');\n me.s1 = mash(' ');\n me.s2 = mash(' ');\n me.s0 -= mash(seed);\n if (me.s0 < 0) { me.s0 += 1; }\n me.s1 -= mash(seed);\n if (me.s1 < 0) { me.s1 += 1; }\n me.s2 -= mash(seed);\n if (me.s2 < 0) { me.s2 += 1; }\n mash = null;\n}\n\nfunction copy(f, t) {\n t.c = f.c;\n t.s0 = f.s0;\n t.s1 = f.s1;\n t.s2 = f.s2;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new Alea(seed),\n state = opts && opts.state,\n prng = xg.next;\n prng.int32 = function() { return (xg.next() * 0x100000000) | 0; }\n prng.double = function() {\n return prng() + (prng() * 0x200000 | 0) * 1.1102230246251565e-16; // 2^-53\n };\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nfunction Mash() {\n var n = 0xefc8249d;\n\n var mash = function(data) {\n data = data.toString();\n for (var i = 0; i < data.length; i++) {\n n += data.charCodeAt(i);\n var h = 0.02519603282416938 * n;\n n = h >>> 0;\n h -= n;\n h *= n;\n n = h >>> 0;\n h -= n;\n n += h * 0x100000000; // 2^32\n }\n return (n >>> 0) * 2.3283064365386963e-10; // 2^-32\n };\n\n return mash;\n}\n\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.alea = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xor128\" prng algorithm by\n// George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n me.x = 0;\n me.y = 0;\n me.z = 0;\n me.w = 0;\n\n // Set up generator function.\n me.next = function() {\n var t = me.x ^ (me.x << 11);\n me.x = me.y;\n me.y = me.z;\n me.z = me.w;\n return me.w ^= (me.w >>> 19) ^ t ^ (t >>> 8);\n };\n\n if (seed === (seed | 0)) {\n // Integer seed.\n me.x = seed;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 64; k++) {\n me.x ^= strseed.charCodeAt(k) | 0;\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.x = f.x;\n t.y = f.y;\n t.z = f.z;\n t.w = f.w;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xor128 = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xorwow\" prng algorithm by\n// George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n // Set up generator function.\n me.next = function() {\n var t = (me.x ^ (me.x >>> 2));\n me.x = me.y; me.y = me.z; me.z = me.w; me.w = me.v;\n return (me.d = (me.d + 362437 | 0)) +\n (me.v = (me.v ^ (me.v << 4)) ^ (t ^ (t << 1))) | 0;\n };\n\n me.x = 0;\n me.y = 0;\n me.z = 0;\n me.w = 0;\n me.v = 0;\n\n if (seed === (seed | 0)) {\n // Integer seed.\n me.x = seed;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 64; k++) {\n me.x ^= strseed.charCodeAt(k) | 0;\n if (k == strseed.length) {\n me.d = me.x << 10 ^ me.x >>> 4;\n }\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.x = f.x;\n t.y = f.y;\n t.z = f.z;\n t.w = f.w;\n t.v = f.v;\n t.d = f.d;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xorwow = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xorshift7\" algorithm by\n// Fran\u00E7ois Panneton and Pierre L'ecuyer:\n// \"On the Xorgshift Random Number Generators\"\n// http://saluc.engr.uconn.edu/refs/crypto/rng/panneton05onthexorshift.pdf\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this;\n\n // Set up generator function.\n me.next = function() {\n // Update xor generator.\n var X = me.x, i = me.i, t, v, w;\n t = X[i]; t ^= (t >>> 7); v = t ^ (t << 24);\n t = X[(i + 1) & 7]; v ^= t ^ (t >>> 10);\n t = X[(i + 3) & 7]; v ^= t ^ (t >>> 3);\n t = X[(i + 4) & 7]; v ^= t ^ (t << 7);\n t = X[(i + 7) & 7]; t = t ^ (t << 13); v ^= t ^ (t << 9);\n X[i] = v;\n me.i = (i + 1) & 7;\n return v;\n };\n\n function init(me, seed) {\n var j, w, X = [];\n\n if (seed === (seed | 0)) {\n // Seed state array using a 32-bit integer.\n w = X[0] = seed;\n } else {\n // Seed state using a string.\n seed = '' + seed;\n for (j = 0; j < seed.length; ++j) {\n X[j & 7] = (X[j & 7] << 15) ^\n (seed.charCodeAt(j) + X[(j + 1) & 7] << 13);\n }\n }\n // Enforce an array length of 8, not all zeroes.\n while (X.length < 8) X.push(0);\n for (j = 0; j < 8 && X[j] === 0; ++j);\n if (j == 8) w = X[7] = -1; else w = X[j];\n\n me.x = X;\n me.i = 0;\n\n // Discard an initial 256 values.\n for (j = 256; j > 0; --j) {\n me.next();\n }\n }\n\n init(me, seed);\n}\n\nfunction copy(f, t) {\n t.x = f.x.slice();\n t.i = f.i;\n return t;\n}\n\nfunction impl(seed, opts) {\n if (seed == null) seed = +(new Date);\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (state.x) copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xorshift7 = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n", "// A Javascript implementaion of Richard Brent's Xorgens xor4096 algorithm.\n//\n// This fast non-cryptographic random number generator is designed for\n// use in Monte-Carlo algorithms. It combines a long-period xorshift\n// generator with a Weyl generator, and it passes all common batteries\n// of stasticial tests for randomness while consuming only a few nanoseconds\n// for each prng generated. For background on the generator, see Brent's\n// paper: \"Some long-period random number generators using shifts and xors.\"\n// http://arxiv.org/pdf/1004.3115v1.pdf\n//\n// Usage:\n//\n// var xor4096 = require('xor4096');\n// random = xor4096(1); // Seed with int32 or string.\n// assert.equal(random(), 0.1520436450538547); // (0, 1) range, 53 bits.\n// assert.equal(random.int32(), 1806534897); // signed int32, 32 bits.\n//\n// For nonzero numeric keys, this impelementation provides a sequence\n// identical to that by Brent's xorgens 3 implementaion in C. This\n// implementation also provides for initalizing the generator with\n// string seeds, or for saving and restoring the state of the generator.\n//\n// On Chrome, this prng benchmarks about 2.1 times slower than\n// Javascript's built-in Math.random().\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this;\n\n // Set up generator function.\n me.next = function() {\n var w = me.w,\n X = me.X, i = me.i, t, v;\n // Update Weyl generator.\n me.w = w = (w + 0x61c88647) | 0;\n // Update xor generator.\n v = X[(i + 34) & 127];\n t = X[i = ((i + 1) & 127)];\n v ^= v << 13;\n t ^= t << 17;\n v ^= v >>> 15;\n t ^= t >>> 12;\n // Update Xor generator array state.\n v = X[i] = v ^ t;\n me.i = i;\n // Result is the combination.\n return (v + (w ^ (w >>> 16))) | 0;\n };\n\n function init(me, seed) {\n var t, v, i, j, w, X = [], limit = 128;\n if (seed === (seed | 0)) {\n // Numeric seeds initialize v, which is used to generates X.\n v = seed;\n seed = null;\n } else {\n // String seeds are mixed into v and X one character at a time.\n seed = seed + '\\0';\n v = 0;\n limit = Math.max(limit, seed.length);\n }\n // Initialize circular array and weyl value.\n for (i = 0, j = -32; j < limit; ++j) {\n // Put the unicode characters into the array, and shuffle them.\n if (seed) v ^= seed.charCodeAt((j + 32) % seed.length);\n // After 32 shuffles, take v as the starting w value.\n if (j === 0) w = v;\n v ^= v << 10;\n v ^= v >>> 15;\n v ^= v << 4;\n v ^= v >>> 13;\n if (j >= 0) {\n w = (w + 0x61c88647) | 0; // Weyl.\n t = (X[j & 127] ^= (v + w)); // Combine xor and weyl to init array.\n i = (0 == t) ? i + 1 : 0; // Count zeroes.\n }\n }\n // We have detected all zeroes; make the key nonzero.\n if (i >= 128) {\n X[(seed && seed.length || 0) & 127] = -1;\n }\n // Run the generator 512 times to further mix the state before using it.\n // Factoring this as a function slows the main generator, so it is just\n // unrolled here. The weyl generator is not advanced while warming up.\n i = 127;\n for (j = 4 * 128; j > 0; --j) {\n v = X[(i + 34) & 127];\n t = X[i = ((i + 1) & 127)];\n v ^= v << 13;\n t ^= t << 17;\n v ^= v >>> 15;\n t ^= t >>> 12;\n X[i] = v ^ t;\n }\n // Storing state as object members is faster than using closure variables.\n me.w = w;\n me.X = X;\n me.i = i;\n }\n\n init(me, seed);\n}\n\nfunction copy(f, t) {\n t.i = f.i;\n t.w = f.w;\n t.X = f.X.slice();\n return t;\n};\n\nfunction impl(seed, opts) {\n if (seed == null) seed = +(new Date);\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (state.X) copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xor4096 = impl;\n}\n\n})(\n this, // window object or global\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n", "// A Javascript implementaion of the \"Tyche-i\" prng algorithm by\n// Samuel Neves and Filipe Araujo.\n// See https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n // Set up generator function.\n me.next = function() {\n var b = me.b, c = me.c, d = me.d, a = me.a;\n b = (b << 25) ^ (b >>> 7) ^ c;\n c = (c - d) | 0;\n d = (d << 24) ^ (d >>> 8) ^ a;\n a = (a - b) | 0;\n me.b = b = (b << 20) ^ (b >>> 12) ^ c;\n me.c = c = (c - d) | 0;\n me.d = (d << 16) ^ (c >>> 16) ^ a;\n return me.a = (a - b) | 0;\n };\n\n /* The following is non-inverted tyche, which has better internal\n * bit diffusion, but which is about 25% slower than tyche-i in JS.\n me.next = function() {\n var a = me.a, b = me.b, c = me.c, d = me.d;\n a = (me.a + me.b | 0) >>> 0;\n d = me.d ^ a; d = d << 16 ^ d >>> 16;\n c = me.c + d | 0;\n b = me.b ^ c; b = b << 12 ^ d >>> 20;\n me.a = a = a + b | 0;\n d = d ^ a; me.d = d = d << 8 ^ d >>> 24;\n me.c = c = c + d | 0;\n b = b ^ c;\n return me.b = (b << 7 ^ b >>> 25);\n }\n */\n\n me.a = 0;\n me.b = 0;\n me.c = 2654435769 | 0;\n me.d = 1367130551;\n\n if (seed === Math.floor(seed)) {\n // Integer seed.\n me.a = (seed / 0x100000000) | 0;\n me.b = seed | 0;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 20; k++) {\n me.b ^= strseed.charCodeAt(k) | 0;\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.a = f.a;\n t.b = f.b;\n t.c = f.c;\n t.d = f.d;\n return t;\n};\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.tychei = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "", "/*\nCopyright 2014 David Bau.\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n*/\n\n(function (pool, math) {\n//\n// The following constants are related to IEEE 754 limits.\n//\nvar global = this,\n width = 256, // each RC4 output is 0 <= x < 256\n chunks = 6, // at least six RC4 outputs for each double\n digits = 52, // there are 52 significant digits in a double\n rngname = 'random', // rngname: name for Math.random and Math.seedrandom\n startdenom = math.pow(width, chunks),\n significance = math.pow(2, digits),\n overflow = significance * 2,\n mask = width - 1,\n nodecrypto; // node.js crypto module, initialized at the bottom.\n\n//\n// seedrandom()\n// This is the seedrandom function described above.\n//\nfunction seedrandom(seed, options, callback) {\n var key = [];\n options = (options == true) ? { entropy: true } : (options || {});\n\n // Flatten the seed string or build one from local entropy if needed.\n var shortseed = mixkey(flatten(\n options.entropy ? [seed, tostring(pool)] :\n (seed == null) ? autoseed() : seed, 3), key);\n\n // Use the seed to initialize an ARC4 generator.\n var arc4 = new ARC4(key);\n\n // This function returns a random double in [0, 1) that contains\n // randomness in every bit of the mantissa of the IEEE 754 value.\n var prng = function() {\n var n = arc4.g(chunks), // Start with a numerator n < 2 ^ 48\n d = startdenom, // and denominator d = 2 ^ 48.\n x = 0; // and no 'extra last byte'.\n while (n < significance) { // Fill up all significant digits by\n n = (n + x) * width; // shifting numerator and\n d *= width; // denominator and generating a\n x = arc4.g(1); // new least-significant-byte.\n }\n while (n >= overflow) { // To avoid rounding up, before adding\n n /= 2; // last byte, shift everything\n d /= 2; // right using integer math until\n x >>>= 1; // we have exactly the desired bits.\n }\n return (n + x) / d; // Form the number within [0, 1).\n };\n\n prng.int32 = function() { return arc4.g(4) | 0; }\n prng.quick = function() { return arc4.g(4) / 0x100000000; }\n prng.double = prng;\n\n // Mix the randomness into accumulated entropy.\n mixkey(tostring(arc4.S), pool);\n\n // Calling convention: what to return as a function of prng, seed, is_math.\n return (options.pass || callback ||\n function(prng, seed, is_math_call, state) {\n if (state) {\n // Load the arc4 state from the given state if it has an S array.\n if (state.S) { copy(state, arc4); }\n // Only provide the .state method if requested via options.state.\n prng.state = function() { return copy(arc4, {}); }\n }\n\n // If called as a method of Math (Math.seedrandom()), mutate\n // Math.random because that is how seedrandom.js has worked since v1.0.\n if (is_math_call) { math[rngname] = prng; return seed; }\n\n // Otherwise, it is a newer calling convention, so return the\n // prng directly.\n else return prng;\n })(\n prng,\n shortseed,\n 'global' in options ? options.global : (this == math),\n options.state);\n}\nmath['seed' + rngname] = seedrandom;\n\n//\n// ARC4\n//\n// An ARC4 implementation. The constructor takes a key in the form of\n// an array of at most (width) integers that should be 0 <= x < (width).\n//\n// The g(count) method returns a pseudorandom integer that concatenates\n// the next (count) outputs from ARC4. Its return value is a number x\n// that is in the range 0 <= x < (width ^ count).\n//\nfunction ARC4(key) {\n var t, keylen = key.length,\n me = this, i = 0, j = me.i = me.j = 0, s = me.S = [];\n\n // The empty key [] is treated as [0].\n if (!keylen) { key = [keylen++]; }\n\n // Set up S using the standard key scheduling algorithm.\n while (i < width) {\n s[i] = i++;\n }\n for (i = 0; i < width; i++) {\n s[i] = s[j = mask & (j + key[i % keylen] + (t = s[i]))];\n s[j] = t;\n }\n\n // The \"g\" method returns the next (count) outputs as one number.\n (me.g = function(count) {\n // Using instance members instead of closure state nearly doubles speed.\n var t, r = 0,\n i = me.i, j = me.j, s = me.S;\n while (count--) {\n t = s[i = mask & (i + 1)];\n r = r * width + s[mask & ((s[i] = s[j = mask & (j + t)]) + (s[j] = t))];\n }\n me.i = i; me.j = j;\n return r;\n // For robust unpredictability, the function call below automatically\n // discards an initial batch of values. This is called RC4-drop[256].\n // See http://google.com/search?q=rsa+fluhrer+response&btnI\n })(width);\n}\n\n//\n// copy()\n// Copies internal state of ARC4 to or from a plain object.\n//\nfunction copy(f, t) {\n t.i = f.i;\n t.j = f.j;\n t.S = f.S.slice();\n return t;\n};\n\n//\n// flatten()\n// Converts an object tree to nested arrays of strings.\n//\nfunction flatten(obj, depth) {\n var result = [], typ = (typeof obj), prop;\n if (depth && typ == 'object') {\n for (prop in obj) {\n try { result.push(flatten(obj[prop], depth - 1)); } catch (e) {}\n }\n }\n return (result.length ? result : typ == 'string' ? obj : obj + '\\0');\n}\n\n//\n// mixkey()\n// Mixes a string seed into a key that is an array of integers, and\n// returns a shortened string seed that is equivalent to the result key.\n//\nfunction mixkey(seed, key) {\n var stringseed = seed + '', smear, j = 0;\n while (j < stringseed.length) {\n key[mask & j] =\n mask & ((smear ^= key[mask & j] * 19) + stringseed.charCodeAt(j++));\n }\n return tostring(key);\n}\n\n//\n// autoseed()\n// Returns an object for autoseeding, using window.crypto and Node crypto\n// module if available.\n//\nfunction autoseed() {\n try {\n var out;\n if (nodecrypto && (out = nodecrypto.randomBytes)) {\n // The use of 'out' to remember randomBytes makes tight minified code.\n out = out(width);\n } else {\n out = new Uint8Array(width);\n (global.crypto || global.msCrypto).getRandomValues(out);\n }\n return tostring(out);\n } catch (e) {\n var browser = global.navigator,\n plugins = browser && browser.plugins;\n return [+new Date, global, plugins, global.screen, tostring(pool)];\n }\n}\n\n//\n// tostring()\n// Converts an array of charcodes to a string\n//\nfunction tostring(a) {\n return String.fromCharCode.apply(0, a);\n}\n\n//\n// When seedrandom.js is loaded, we immediately mix a few bits\n// from the built-in RNG into the entropy pool. Because we do\n// not want to interfere with deterministic PRNG state later,\n// seedrandom will not call math.random on its own again after\n// initialization.\n//\nmixkey(math.random(), pool);\n\n//\n// Nodejs and AMD support: export the implementation as a module using\n// either convention.\n//\nif ((typeof module) == 'object' && module.exports) {\n module.exports = seedrandom;\n // When in node.js, try using crypto package for autoseeding.\n try {\n nodecrypto = require('crypto');\n } catch (ex) {}\n} else if ((typeof define) == 'function' && define.amd) {\n define(function() { return seedrandom; });\n}\n\n// End anonymous scope, and pass initial values.\n})(\n [], // pool: entropy pool starts empty\n Math // math: package containing random, pow, and seedrandom\n);\n", "// A library of seedable RNGs implemented in Javascript.\n//\n// Usage:\n//\n// var seedrandom = require('seedrandom');\n// var random = seedrandom(1); // or any seed.\n// var x = random(); // 0 <= x < 1. Every bit is random.\n// var x = random.quick(); // 0 <= x < 1. 32 bits of randomness.\n\n// alea, a 53-bit multiply-with-carry generator by Johannes Baag\u00F8e.\n// Period: ~2^116\n// Reported to pass all BigCrush tests.\nvar alea = require('./lib/alea');\n\n// xor128, a pure xor-shift generator by George Marsaglia.\n// Period: 2^128-1.\n// Reported to fail: MatrixRank and LinearComp.\nvar xor128 = require('./lib/xor128');\n\n// xorwow, George Marsaglia's 160-bit xor-shift combined plus weyl.\n// Period: 2^192-2^32\n// Reported to fail: CollisionOver, SimpPoker, and LinearComp.\nvar xorwow = require('./lib/xorwow');\n\n// xorshift7, by Fran\u00E7ois Panneton and Pierre L'ecuyer, takes\n// a different approach: it adds robustness by allowing more shifts\n// than Marsaglia's original three. It is a 7-shift generator\n// with 256 bits, that passes BigCrush with no systmatic failures.\n// Period 2^256-1.\n// No systematic BigCrush failures reported.\nvar xorshift7 = require('./lib/xorshift7');\n\n// xor4096, by Richard Brent, is a 4096-bit xor-shift with a\n// very long period that also adds a Weyl generator. It also passes\n// BigCrush with no systematic failures. Its long period may\n// be useful if you have many generators and need to avoid\n// collisions.\n// Period: 2^4128-2^32.\n// No systematic BigCrush failures reported.\nvar xor4096 = require('./lib/xor4096');\n\n// Tyche-i, by Samuel Neves and Filipe Araujo, is a bit-shifting random\n// number generator derived from ChaCha, a modern stream cipher.\n// https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf\n// Period: ~2^127\n// No systematic BigCrush failures reported.\nvar tychei = require('./lib/tychei');\n\n// The original ARC4-based prng included in this library.\n// Period: ~2^1600\nvar sr = require('./seedrandom');\n\nsr.alea = alea;\nsr.xor128 = xor128;\nsr.xorwow = xorwow;\nsr.xorshift7 = xorshift7;\nsr.xor4096 = xor4096;\nsr.tychei = tychei;\n\nmodule.exports = sr;\n", "// A port of an algorithm by Johannes Baag\u00F8e , 2010\n// http://baagoe.com/en/RandomMusings/javascript/\n// https://github.com/nquinlan/better-random-numbers-for-javascript-mirror\n// Original work is under MIT license -\n\n// Copyright (C) 2010 by Johannes Baag\u00F8e \n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\n\n\n(function(global, module, define) {\n\nfunction Alea(seed) {\n var me = this, mash = Mash();\n\n me.next = function() {\n var t = 2091639 * me.s0 + me.c * 2.3283064365386963e-10; // 2^-32\n me.s0 = me.s1;\n me.s1 = me.s2;\n return me.s2 = t - (me.c = t | 0);\n };\n\n // Apply the seeding algorithm from Baagoe.\n me.c = 1;\n me.s0 = mash(' ');\n me.s1 = mash(' ');\n me.s2 = mash(' ');\n me.s0 -= mash(seed);\n if (me.s0 < 0) { me.s0 += 1; }\n me.s1 -= mash(seed);\n if (me.s1 < 0) { me.s1 += 1; }\n me.s2 -= mash(seed);\n if (me.s2 < 0) { me.s2 += 1; }\n mash = null;\n}\n\nfunction copy(f, t) {\n t.c = f.c;\n t.s0 = f.s0;\n t.s1 = f.s1;\n t.s2 = f.s2;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new Alea(seed),\n state = opts && opts.state,\n prng = xg.next;\n prng.int32 = function() { return (xg.next() * 0x100000000) | 0; }\n prng.double = function() {\n return prng() + (prng() * 0x200000 | 0) * 1.1102230246251565e-16; // 2^-53\n };\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nfunction Mash() {\n var n = 0xefc8249d;\n\n var mash = function(data) {\n data = String(data);\n for (var i = 0; i < data.length; i++) {\n n += data.charCodeAt(i);\n var h = 0.02519603282416938 * n;\n n = h >>> 0;\n h -= n;\n h *= n;\n n = h >>> 0;\n h -= n;\n n += h * 0x100000000; // 2^32\n }\n return (n >>> 0) * 2.3283064365386963e-10; // 2^-32\n };\n\n return mash;\n}\n\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.alea = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xor128\" prng algorithm by\n// George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n me.x = 0;\n me.y = 0;\n me.z = 0;\n me.w = 0;\n\n // Set up generator function.\n me.next = function() {\n var t = me.x ^ (me.x << 11);\n me.x = me.y;\n me.y = me.z;\n me.z = me.w;\n return me.w ^= (me.w >>> 19) ^ t ^ (t >>> 8);\n };\n\n if (seed === (seed | 0)) {\n // Integer seed.\n me.x = seed;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 64; k++) {\n me.x ^= strseed.charCodeAt(k) | 0;\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.x = f.x;\n t.y = f.y;\n t.z = f.z;\n t.w = f.w;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xor128 = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xorwow\" prng algorithm by\n// George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n // Set up generator function.\n me.next = function() {\n var t = (me.x ^ (me.x >>> 2));\n me.x = me.y; me.y = me.z; me.z = me.w; me.w = me.v;\n return (me.d = (me.d + 362437 | 0)) +\n (me.v = (me.v ^ (me.v << 4)) ^ (t ^ (t << 1))) | 0;\n };\n\n me.x = 0;\n me.y = 0;\n me.z = 0;\n me.w = 0;\n me.v = 0;\n\n if (seed === (seed | 0)) {\n // Integer seed.\n me.x = seed;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 64; k++) {\n me.x ^= strseed.charCodeAt(k) | 0;\n if (k == strseed.length) {\n me.d = me.x << 10 ^ me.x >>> 4;\n }\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.x = f.x;\n t.y = f.y;\n t.z = f.z;\n t.w = f.w;\n t.v = f.v;\n t.d = f.d;\n return t;\n}\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xorwow = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "// A Javascript implementaion of the \"xorshift7\" algorithm by\n// Fran\u00E7ois Panneton and Pierre L'ecuyer:\n// \"On the Xorgshift Random Number Generators\"\n// http://saluc.engr.uconn.edu/refs/crypto/rng/panneton05onthexorshift.pdf\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this;\n\n // Set up generator function.\n me.next = function() {\n // Update xor generator.\n var X = me.x, i = me.i, t, v, w;\n t = X[i]; t ^= (t >>> 7); v = t ^ (t << 24);\n t = X[(i + 1) & 7]; v ^= t ^ (t >>> 10);\n t = X[(i + 3) & 7]; v ^= t ^ (t >>> 3);\n t = X[(i + 4) & 7]; v ^= t ^ (t << 7);\n t = X[(i + 7) & 7]; t = t ^ (t << 13); v ^= t ^ (t << 9);\n X[i] = v;\n me.i = (i + 1) & 7;\n return v;\n };\n\n function init(me, seed) {\n var j, w, X = [];\n\n if (seed === (seed | 0)) {\n // Seed state array using a 32-bit integer.\n w = X[0] = seed;\n } else {\n // Seed state using a string.\n seed = '' + seed;\n for (j = 0; j < seed.length; ++j) {\n X[j & 7] = (X[j & 7] << 15) ^\n (seed.charCodeAt(j) + X[(j + 1) & 7] << 13);\n }\n }\n // Enforce an array length of 8, not all zeroes.\n while (X.length < 8) X.push(0);\n for (j = 0; j < 8 && X[j] === 0; ++j);\n if (j == 8) w = X[7] = -1; else w = X[j];\n\n me.x = X;\n me.i = 0;\n\n // Discard an initial 256 values.\n for (j = 256; j > 0; --j) {\n me.next();\n }\n }\n\n init(me, seed);\n}\n\nfunction copy(f, t) {\n t.x = f.x.slice();\n t.i = f.i;\n return t;\n}\n\nfunction impl(seed, opts) {\n if (seed == null) seed = +(new Date);\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (state.x) copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xorshift7 = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n", "// A Javascript implementaion of Richard Brent's Xorgens xor4096 algorithm.\n//\n// This fast non-cryptographic random number generator is designed for\n// use in Monte-Carlo algorithms. It combines a long-period xorshift\n// generator with a Weyl generator, and it passes all common batteries\n// of stasticial tests for randomness while consuming only a few nanoseconds\n// for each prng generated. For background on the generator, see Brent's\n// paper: \"Some long-period random number generators using shifts and xors.\"\n// http://arxiv.org/pdf/1004.3115v1.pdf\n//\n// Usage:\n//\n// var xor4096 = require('xor4096');\n// random = xor4096(1); // Seed with int32 or string.\n// assert.equal(random(), 0.1520436450538547); // (0, 1) range, 53 bits.\n// assert.equal(random.int32(), 1806534897); // signed int32, 32 bits.\n//\n// For nonzero numeric keys, this impelementation provides a sequence\n// identical to that by Brent's xorgens 3 implementaion in C. This\n// implementation also provides for initalizing the generator with\n// string seeds, or for saving and restoring the state of the generator.\n//\n// On Chrome, this prng benchmarks about 2.1 times slower than\n// Javascript's built-in Math.random().\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this;\n\n // Set up generator function.\n me.next = function() {\n var w = me.w,\n X = me.X, i = me.i, t, v;\n // Update Weyl generator.\n me.w = w = (w + 0x61c88647) | 0;\n // Update xor generator.\n v = X[(i + 34) & 127];\n t = X[i = ((i + 1) & 127)];\n v ^= v << 13;\n t ^= t << 17;\n v ^= v >>> 15;\n t ^= t >>> 12;\n // Update Xor generator array state.\n v = X[i] = v ^ t;\n me.i = i;\n // Result is the combination.\n return (v + (w ^ (w >>> 16))) | 0;\n };\n\n function init(me, seed) {\n var t, v, i, j, w, X = [], limit = 128;\n if (seed === (seed | 0)) {\n // Numeric seeds initialize v, which is used to generates X.\n v = seed;\n seed = null;\n } else {\n // String seeds are mixed into v and X one character at a time.\n seed = seed + '\\0';\n v = 0;\n limit = Math.max(limit, seed.length);\n }\n // Initialize circular array and weyl value.\n for (i = 0, j = -32; j < limit; ++j) {\n // Put the unicode characters into the array, and shuffle them.\n if (seed) v ^= seed.charCodeAt((j + 32) % seed.length);\n // After 32 shuffles, take v as the starting w value.\n if (j === 0) w = v;\n v ^= v << 10;\n v ^= v >>> 15;\n v ^= v << 4;\n v ^= v >>> 13;\n if (j >= 0) {\n w = (w + 0x61c88647) | 0; // Weyl.\n t = (X[j & 127] ^= (v + w)); // Combine xor and weyl to init array.\n i = (0 == t) ? i + 1 : 0; // Count zeroes.\n }\n }\n // We have detected all zeroes; make the key nonzero.\n if (i >= 128) {\n X[(seed && seed.length || 0) & 127] = -1;\n }\n // Run the generator 512 times to further mix the state before using it.\n // Factoring this as a function slows the main generator, so it is just\n // unrolled here. The weyl generator is not advanced while warming up.\n i = 127;\n for (j = 4 * 128; j > 0; --j) {\n v = X[(i + 34) & 127];\n t = X[i = ((i + 1) & 127)];\n v ^= v << 13;\n t ^= t << 17;\n v ^= v >>> 15;\n t ^= t >>> 12;\n X[i] = v ^ t;\n }\n // Storing state as object members is faster than using closure variables.\n me.w = w;\n me.X = X;\n me.i = i;\n }\n\n init(me, seed);\n}\n\nfunction copy(f, t) {\n t.i = f.i;\n t.w = f.w;\n t.X = f.X.slice();\n return t;\n};\n\nfunction impl(seed, opts) {\n if (seed == null) seed = +(new Date);\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (state.X) copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.xor4096 = impl;\n}\n\n})(\n this, // window object or global\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n", "// A Javascript implementaion of the \"Tyche-i\" prng algorithm by\n// Samuel Neves and Filipe Araujo.\n// See https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf\n\n(function(global, module, define) {\n\nfunction XorGen(seed) {\n var me = this, strseed = '';\n\n // Set up generator function.\n me.next = function() {\n var b = me.b, c = me.c, d = me.d, a = me.a;\n b = (b << 25) ^ (b >>> 7) ^ c;\n c = (c - d) | 0;\n d = (d << 24) ^ (d >>> 8) ^ a;\n a = (a - b) | 0;\n me.b = b = (b << 20) ^ (b >>> 12) ^ c;\n me.c = c = (c - d) | 0;\n me.d = (d << 16) ^ (c >>> 16) ^ a;\n return me.a = (a - b) | 0;\n };\n\n /* The following is non-inverted tyche, which has better internal\n * bit diffusion, but which is about 25% slower than tyche-i in JS.\n me.next = function() {\n var a = me.a, b = me.b, c = me.c, d = me.d;\n a = (me.a + me.b | 0) >>> 0;\n d = me.d ^ a; d = d << 16 ^ d >>> 16;\n c = me.c + d | 0;\n b = me.b ^ c; b = b << 12 ^ d >>> 20;\n me.a = a = a + b | 0;\n d = d ^ a; me.d = d = d << 8 ^ d >>> 24;\n me.c = c = c + d | 0;\n b = b ^ c;\n return me.b = (b << 7 ^ b >>> 25);\n }\n */\n\n me.a = 0;\n me.b = 0;\n me.c = 2654435769 | 0;\n me.d = 1367130551;\n\n if (seed === Math.floor(seed)) {\n // Integer seed.\n me.a = (seed / 0x100000000) | 0;\n me.b = seed | 0;\n } else {\n // String seed.\n strseed += seed;\n }\n\n // Mix in string seed, then discard an initial batch of 64 values.\n for (var k = 0; k < strseed.length + 20; k++) {\n me.b ^= strseed.charCodeAt(k) | 0;\n me.next();\n }\n}\n\nfunction copy(f, t) {\n t.a = f.a;\n t.b = f.b;\n t.c = f.c;\n t.d = f.d;\n return t;\n};\n\nfunction impl(seed, opts) {\n var xg = new XorGen(seed),\n state = opts && opts.state,\n prng = function() { return (xg.next() >>> 0) / 0x100000000; };\n prng.double = function() {\n do {\n var top = xg.next() >>> 11,\n bot = (xg.next() >>> 0) / 0x100000000,\n result = (top + bot) / (1 << 21);\n } while (result === 0);\n return result;\n };\n prng.int32 = xg.next;\n prng.quick = prng;\n if (state) {\n if (typeof(state) == 'object') copy(state, xg);\n prng.state = function() { return copy(xg, {}); }\n }\n return prng;\n}\n\nif (module && module.exports) {\n module.exports = impl;\n} else if (define && define.amd) {\n define(function() { return impl; });\n} else {\n this.tychei = impl;\n}\n\n})(\n this,\n (typeof module) == 'object' && module, // present in node.js\n (typeof define) == 'function' && define // present with an AMD loader\n);\n\n\n", "/*\nCopyright 2019 David Bau.\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n*/\n\n(function (global, pool, math) {\n//\n// The following constants are related to IEEE 754 limits.\n//\n\nvar width = 256, // each RC4 output is 0 <= x < 256\n chunks = 6, // at least six RC4 outputs for each double\n digits = 52, // there are 52 significant digits in a double\n rngname = 'random', // rngname: name for Math.random and Math.seedrandom\n startdenom = math.pow(width, chunks),\n significance = math.pow(2, digits),\n overflow = significance * 2,\n mask = width - 1,\n nodecrypto; // node.js crypto module, initialized at the bottom.\n\n//\n// seedrandom()\n// This is the seedrandom function described above.\n//\nfunction seedrandom(seed, options, callback) {\n var key = [];\n options = (options == true) ? { entropy: true } : (options || {});\n\n // Flatten the seed string or build one from local entropy if needed.\n var shortseed = mixkey(flatten(\n options.entropy ? [seed, tostring(pool)] :\n (seed == null) ? autoseed() : seed, 3), key);\n\n // Use the seed to initialize an ARC4 generator.\n var arc4 = new ARC4(key);\n\n // This function returns a random double in [0, 1) that contains\n // randomness in every bit of the mantissa of the IEEE 754 value.\n var prng = function() {\n var n = arc4.g(chunks), // Start with a numerator n < 2 ^ 48\n d = startdenom, // and denominator d = 2 ^ 48.\n x = 0; // and no 'extra last byte'.\n while (n < significance) { // Fill up all significant digits by\n n = (n + x) * width; // shifting numerator and\n d *= width; // denominator and generating a\n x = arc4.g(1); // new least-significant-byte.\n }\n while (n >= overflow) { // To avoid rounding up, before adding\n n /= 2; // last byte, shift everything\n d /= 2; // right using integer math until\n x >>>= 1; // we have exactly the desired bits.\n }\n return (n + x) / d; // Form the number within [0, 1).\n };\n\n prng.int32 = function() { return arc4.g(4) | 0; }\n prng.quick = function() { return arc4.g(4) / 0x100000000; }\n prng.double = prng;\n\n // Mix the randomness into accumulated entropy.\n mixkey(tostring(arc4.S), pool);\n\n // Calling convention: what to return as a function of prng, seed, is_math.\n return (options.pass || callback ||\n function(prng, seed, is_math_call, state) {\n if (state) {\n // Load the arc4 state from the given state if it has an S array.\n if (state.S) { copy(state, arc4); }\n // Only provide the .state method if requested via options.state.\n prng.state = function() { return copy(arc4, {}); }\n }\n\n // If called as a method of Math (Math.seedrandom()), mutate\n // Math.random because that is how seedrandom.js has worked since v1.0.\n if (is_math_call) { math[rngname] = prng; return seed; }\n\n // Otherwise, it is a newer calling convention, so return the\n // prng directly.\n else return prng;\n })(\n prng,\n shortseed,\n 'global' in options ? options.global : (this == math),\n options.state);\n}\n\n//\n// ARC4\n//\n// An ARC4 implementation. The constructor takes a key in the form of\n// an array of at most (width) integers that should be 0 <= x < (width).\n//\n// The g(count) method returns a pseudorandom integer that concatenates\n// the next (count) outputs from ARC4. Its return value is a number x\n// that is in the range 0 <= x < (width ^ count).\n//\nfunction ARC4(key) {\n var t, keylen = key.length,\n me = this, i = 0, j = me.i = me.j = 0, s = me.S = [];\n\n // The empty key [] is treated as [0].\n if (!keylen) { key = [keylen++]; }\n\n // Set up S using the standard key scheduling algorithm.\n while (i < width) {\n s[i] = i++;\n }\n for (i = 0; i < width; i++) {\n s[i] = s[j = mask & (j + key[i % keylen] + (t = s[i]))];\n s[j] = t;\n }\n\n // The \"g\" method returns the next (count) outputs as one number.\n (me.g = function(count) {\n // Using instance members instead of closure state nearly doubles speed.\n var t, r = 0,\n i = me.i, j = me.j, s = me.S;\n while (count--) {\n t = s[i = mask & (i + 1)];\n r = r * width + s[mask & ((s[i] = s[j = mask & (j + t)]) + (s[j] = t))];\n }\n me.i = i; me.j = j;\n return r;\n // For robust unpredictability, the function call below automatically\n // discards an initial batch of values. This is called RC4-drop[256].\n // See http://google.com/search?q=rsa+fluhrer+response&btnI\n })(width);\n}\n\n//\n// copy()\n// Copies internal state of ARC4 to or from a plain object.\n//\nfunction copy(f, t) {\n t.i = f.i;\n t.j = f.j;\n t.S = f.S.slice();\n return t;\n};\n\n//\n// flatten()\n// Converts an object tree to nested arrays of strings.\n//\nfunction flatten(obj, depth) {\n var result = [], typ = (typeof obj), prop;\n if (depth && typ == 'object') {\n for (prop in obj) {\n try { result.push(flatten(obj[prop], depth - 1)); } catch (e) {}\n }\n }\n return (result.length ? result : typ == 'string' ? obj : obj + '\\0');\n}\n\n//\n// mixkey()\n// Mixes a string seed into a key that is an array of integers, and\n// returns a shortened string seed that is equivalent to the result key.\n//\nfunction mixkey(seed, key) {\n var stringseed = seed + '', smear, j = 0;\n while (j < stringseed.length) {\n key[mask & j] =\n mask & ((smear ^= key[mask & j] * 19) + stringseed.charCodeAt(j++));\n }\n return tostring(key);\n}\n\n//\n// autoseed()\n// Returns an object for autoseeding, using window.crypto and Node crypto\n// module if available.\n//\nfunction autoseed() {\n try {\n var out;\n if (nodecrypto && (out = nodecrypto.randomBytes)) {\n // The use of 'out' to remember randomBytes makes tight minified code.\n out = out(width);\n } else {\n out = new Uint8Array(width);\n (global.crypto || global.msCrypto).getRandomValues(out);\n }\n return tostring(out);\n } catch (e) {\n var browser = global.navigator,\n plugins = browser && browser.plugins;\n return [+new Date, global, plugins, global.screen, tostring(pool)];\n }\n}\n\n//\n// tostring()\n// Converts an array of charcodes to a string\n//\nfunction tostring(a) {\n return String.fromCharCode.apply(0, a);\n}\n\n//\n// When seedrandom.js is loaded, we immediately mix a few bits\n// from the built-in RNG into the entropy pool. Because we do\n// not want to interfere with deterministic PRNG state later,\n// seedrandom will not call math.random on its own again after\n// initialization.\n//\nmixkey(math.random(), pool);\n\n//\n// Nodejs and AMD support: export the implementation as a module using\n// either convention.\n//\nif ((typeof module) == 'object' && module.exports) {\n module.exports = seedrandom;\n // When in node.js, try using crypto package for autoseeding.\n try {\n nodecrypto = require('crypto');\n } catch (ex) {}\n} else if ((typeof define) == 'function' && define.amd) {\n define(function() { return seedrandom; });\n} else {\n // When included as a plain script, set up Math.seedrandom global.\n math['seed' + rngname] = seedrandom;\n}\n\n\n// End anonymous scope, and pass initial values.\n})(\n // global: `self` in browsers (including strict mode and web workers),\n // otherwise `this` in Node and other environments\n (typeof self !== 'undefined') ? self : this,\n [], // pool: entropy pool starts empty\n Math // math: package containing random, pow, and seedrandom\n);\n", "// A library of seedable RNGs implemented in Javascript.\n//\n// Usage:\n//\n// var seedrandom = require('seedrandom');\n// var random = seedrandom(1); // or any seed.\n// var x = random(); // 0 <= x < 1. Every bit is random.\n// var x = random.quick(); // 0 <= x < 1. 32 bits of randomness.\n\n// alea, a 53-bit multiply-with-carry generator by Johannes Baag\u00F8e.\n// Period: ~2^116\n// Reported to pass all BigCrush tests.\nvar alea = require('./lib/alea');\n\n// xor128, a pure xor-shift generator by George Marsaglia.\n// Period: 2^128-1.\n// Reported to fail: MatrixRank and LinearComp.\nvar xor128 = require('./lib/xor128');\n\n// xorwow, George Marsaglia's 160-bit xor-shift combined plus weyl.\n// Period: 2^192-2^32\n// Reported to fail: CollisionOver, SimpPoker, and LinearComp.\nvar xorwow = require('./lib/xorwow');\n\n// xorshift7, by Fran\u00E7ois Panneton and Pierre L'ecuyer, takes\n// a different approach: it adds robustness by allowing more shifts\n// than Marsaglia's original three. It is a 7-shift generator\n// with 256 bits, that passes BigCrush with no systmatic failures.\n// Period 2^256-1.\n// No systematic BigCrush failures reported.\nvar xorshift7 = require('./lib/xorshift7');\n\n// xor4096, by Richard Brent, is a 4096-bit xor-shift with a\n// very long period that also adds a Weyl generator. It also passes\n// BigCrush with no systematic failures. Its long period may\n// be useful if you have many generators and need to avoid\n// collisions.\n// Period: 2^4128-2^32.\n// No systematic BigCrush failures reported.\nvar xor4096 = require('./lib/xor4096');\n\n// Tyche-i, by Samuel Neves and Filipe Araujo, is a bit-shifting random\n// number generator derived from ChaCha, a modern stream cipher.\n// https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf\n// Period: ~2^127\n// No systematic BigCrush failures reported.\nvar tychei = require('./lib/tychei');\n\n// The original ARC4-based prng included in this library.\n// Period: ~2^1600\nvar sr = require('./seedrandom');\n\nsr.alea = alea;\nsr.xor128 = xor128;\nsr.xorwow = xorwow;\nsr.xorshift7 = xorshift7;\nsr.xor4096 = xor4096;\nsr.tychei = tychei;\n\nmodule.exports = sr;\n", "", "", "", "", "\nvar WasmBackendModuleThreadedSimd = (function() {\n var _scriptDir = typeof document !== 'undefined' && document.currentScript ? document.currentScript.src : undefined;\n if (typeof __filename !== 'undefined') _scriptDir = _scriptDir || __filename;\n return (\nfunction(WasmBackendModuleThreadedSimd) {\n WasmBackendModuleThreadedSimd = WasmBackendModuleThreadedSimd || {};\n\nfunction GROWABLE_HEAP_I8(){if(wasmMemory.buffer!=buffer){updateGlobalBufferAndViews(wasmMemory.buffer)}return HEAP8}function GROWABLE_HEAP_U8(){if(wasmMemory.buffer!=buffer){updateGlobalBufferAndViews(wasmMemory.buffer)}return HEAPU8}function GROWABLE_HEAP_I32(){if(wasmMemory.buffer!=buffer){updateGlobalBufferAndViews(wasmMemory.buffer)}return HEAP32}function GROWABLE_HEAP_U32(){if(wasmMemory.buffer!=buffer){updateGlobalBufferAndViews(wasmMemory.buffer)}return HEAPU32}function GROWABLE_HEAP_F64(){if(wasmMemory.buffer!=buffer){updateGlobalBufferAndViews(wasmMemory.buffer)}return HEAPF64}var Module=typeof WasmBackendModuleThreadedSimd!==\"undefined\"?WasmBackendModuleThreadedSimd:{};var readyPromiseResolve,readyPromiseReject;Module[\"ready\"]=new Promise(function(resolve,reject){readyPromiseResolve=resolve;readyPromiseReject=reject});var moduleOverrides={};var key;for(key in Module){if(Module.hasOwnProperty(key)){moduleOverrides[key]=Module[key]}}var arguments_=[];var thisProgram=\"./this.program\";var quit_=function(status,toThrow){throw toThrow};var ENVIRONMENT_IS_WEB=false;var ENVIRONMENT_IS_WORKER=false;var ENVIRONMENT_IS_NODE=false;var ENVIRONMENT_IS_SHELL=false;ENVIRONMENT_IS_WEB=typeof window===\"object\";ENVIRONMENT_IS_WORKER=typeof importScripts===\"function\";ENVIRONMENT_IS_NODE=typeof process===\"object\"&&typeof process.versions===\"object\"&&typeof process.versions.node===\"string\";ENVIRONMENT_IS_SHELL=!ENVIRONMENT_IS_WEB&&!ENVIRONMENT_IS_NODE&&!ENVIRONMENT_IS_WORKER;var ENVIRONMENT_IS_PTHREAD=Module[\"ENVIRONMENT_IS_PTHREAD\"]||false;if(ENVIRONMENT_IS_PTHREAD){buffer=Module[\"buffer\"]}var scriptDirectory=\"\";function locateFile(path){if(Module[\"locateFile\"]){return Module[\"locateFile\"](path,scriptDirectory)}return scriptDirectory+path}var read_,readAsync,readBinary,setWindowTitle;var nodeFS;var nodePath;if(ENVIRONMENT_IS_NODE){if(ENVIRONMENT_IS_WORKER){scriptDirectory=require(\"path\").dirname(scriptDirectory)+\"/\"}else{scriptDirectory=__dirname+\"/\"}read_=function shell_read(filename,binary){if(!nodeFS)nodeFS=require(\"fs\");if(!nodePath)nodePath=require(\"path\");filename=nodePath[\"normalize\"](filename);return nodeFS[\"readFileSync\"](filename,binary?null:\"utf8\")};readBinary=function readBinary(filename){var ret=read_(filename,true);if(!ret.buffer){ret=new Uint8Array(ret)}assert(ret.buffer);return ret};if(process[\"argv\"].length>1){thisProgram=process[\"argv\"][1].replace(/\\\\/g,\"/\")}arguments_=process[\"argv\"].slice(2);process[\"on\"](\"uncaughtException\",function(ex){if(!(ex instanceof ExitStatus)){throw ex}});process[\"on\"](\"unhandledRejection\",abort);quit_=function(status){process[\"exit\"](status)};Module[\"inspect\"]=function(){return\"[Emscripten Module object]\"};var nodeWorkerThreads;try{nodeWorkerThreads=require(\"worker_threads\")}catch(e){console.error('The \"worker_threads\" module is not supported in this node.js build - perhaps a newer version is needed?');throw e}global.Worker=nodeWorkerThreads.Worker}else if(ENVIRONMENT_IS_SHELL){if(typeof read!=\"undefined\"){read_=function shell_read(f){return read(f)}}readBinary=function readBinary(f){var data;if(typeof readbuffer===\"function\"){return new Uint8Array(readbuffer(f))}data=read(f,\"binary\");assert(typeof data===\"object\");return data};if(typeof scriptArgs!=\"undefined\"){arguments_=scriptArgs}else if(typeof arguments!=\"undefined\"){arguments_=arguments}if(typeof quit===\"function\"){quit_=function(status){quit(status)}}if(typeof print!==\"undefined\"){if(typeof console===\"undefined\")console={};console.log=print;console.warn=console.error=typeof printErr!==\"undefined\"?printErr:print}}else if(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER){if(ENVIRONMENT_IS_WORKER){scriptDirectory=self.location.href}else if(typeof document!==\"undefined\"&&document.currentScript){scriptDirectory=document.currentScript.src}if(typeof _scriptDir !== \"undefined\" && _scriptDir){scriptDirectory=_scriptDir}if(scriptDirectory.indexOf(\"blob:\")!==0){scriptDirectory=scriptDirectory.substr(0,scriptDirectory.lastIndexOf(\"/\")+1)}else{scriptDirectory=\"\"}if(ENVIRONMENT_IS_NODE){read_=function shell_read(filename,binary){if(!nodeFS)nodeFS=require(\"fs\");if(!nodePath)nodePath=require(\"path\");filename=nodePath[\"normalize\"](filename);return nodeFS[\"readFileSync\"](filename,binary?null:\"utf8\")};readBinary=function readBinary(filename){var ret=read_(filename,true);if(!ret.buffer){ret=new Uint8Array(ret)}assert(ret.buffer);return ret}}else{read_=function(url){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,false);xhr.send(null);return xhr.responseText};if(ENVIRONMENT_IS_WORKER){readBinary=function(url){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,false);xhr.responseType=\"arraybuffer\";xhr.send(null);return new Uint8Array(xhr.response)}}readAsync=function(url,onload,onerror){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,true);xhr.responseType=\"arraybuffer\";xhr.onload=function(){if(xhr.status==200||xhr.status==0&&xhr.response){onload(xhr.response);return}onerror()};xhr.onerror=onerror;xhr.send(null)}}setWindowTitle=function(title){document.title=title}}else{}if(ENVIRONMENT_IS_NODE){if(typeof performance===\"undefined\"){global.performance=require(\"perf_hooks\").performance}}var out=Module[\"print\"]||console.log.bind(console);var err=Module[\"printErr\"]||console.warn.bind(console);for(key in moduleOverrides){if(moduleOverrides.hasOwnProperty(key)){Module[key]=moduleOverrides[key]}}moduleOverrides=null;if(Module[\"arguments\"])arguments_=Module[\"arguments\"];if(Module[\"thisProgram\"])thisProgram=Module[\"thisProgram\"];if(Module[\"quit\"])quit_=Module[\"quit\"];var Atomics_load=Atomics.load;var Atomics_store=Atomics.store;var Atomics_compareExchange=Atomics.compareExchange;var wasmBinary;if(Module[\"wasmBinary\"])wasmBinary=Module[\"wasmBinary\"];var noExitRuntime=Module[\"noExitRuntime\"]||true;if(typeof WebAssembly!==\"object\"){abort(\"no native wasm support detected\")}var wasmMemory;var wasmModule;var ABORT=false;var EXITSTATUS;function assert(condition,text){if(!condition){abort(\"Assertion failed: \"+text)}}function getCFunc(ident){var func=Module[\"_\"+ident];assert(func,\"Cannot call unknown function \"+ident+\", make sure it is exported\");return func}function ccall(ident,returnType,argTypes,args,opts){var toC={\"string\":function(str){var ret=0;if(str!==null&&str!==undefined&&str!==0){var len=(str.length<<2)+1;ret=stackAlloc(len);stringToUTF8(str,ret,len)}return ret},\"array\":function(arr){var ret=stackAlloc(arr.length);writeArrayToMemory(arr,ret);return ret}};function convertReturnValue(ret){if(returnType===\"string\")return UTF8ToString(ret);if(returnType===\"boolean\")return Boolean(ret);return ret}var func=getCFunc(ident);var cArgs=[];var stack=0;if(args){for(var i=0;i=endIdx)){var u0=heap[idx++];if(!u0)return str;if(!(u0&128)){str+=String.fromCharCode(u0);continue}var u1=heap[idx++]&63;if((u0&224)==192){str+=String.fromCharCode((u0&31)<<6|u1);continue}var u2=heap[idx++]&63;if((u0&240)==224){u0=(u0&15)<<12|u1<<6|u2}else{u0=(u0&7)<<18|u1<<12|u2<<6|heap[idx++]&63}if(u0<65536){str+=String.fromCharCode(u0)}else{var ch=u0-65536;str+=String.fromCharCode(55296|ch>>10,56320|ch&1023)}}return str}function UTF8ToString(ptr,maxBytesToRead){return ptr?UTF8ArrayToString(GROWABLE_HEAP_U8(),ptr,maxBytesToRead):\"\"}function stringToUTF8Array(str,heap,outIdx,maxBytesToWrite){if(!(maxBytesToWrite>0))return 0;var startIdx=outIdx;var endIdx=outIdx+maxBytesToWrite-1;for(var i=0;i=55296&&u<=57343){var u1=str.charCodeAt(++i);u=65536+((u&1023)<<10)|u1&1023}if(u<=127){if(outIdx>=endIdx)break;heap[outIdx++]=u}else if(u<=2047){if(outIdx+1>=endIdx)break;heap[outIdx++]=192|u>>6;heap[outIdx++]=128|u&63}else if(u<=65535){if(outIdx+2>=endIdx)break;heap[outIdx++]=224|u>>12;heap[outIdx++]=128|u>>6&63;heap[outIdx++]=128|u&63}else{if(outIdx+3>=endIdx)break;heap[outIdx++]=240|u>>18;heap[outIdx++]=128|u>>12&63;heap[outIdx++]=128|u>>6&63;heap[outIdx++]=128|u&63}}heap[outIdx]=0;return outIdx-startIdx}function stringToUTF8(str,outPtr,maxBytesToWrite){return stringToUTF8Array(str,GROWABLE_HEAP_U8(),outPtr,maxBytesToWrite)}function lengthBytesUTF8(str){var len=0;for(var i=0;i=55296&&u<=57343)u=65536+((u&1023)<<10)|str.charCodeAt(++i)&1023;if(u<=127)++len;else if(u<=2047)len+=2;else if(u<=65535)len+=3;else len+=4}return len}function writeArrayToMemory(array,buffer){GROWABLE_HEAP_I8().set(array,buffer)}function alignUp(x,multiple){if(x%multiple>0){x+=multiple-x%multiple}return x}var buffer,HEAP8,HEAPU8,HEAP16,HEAPU16,HEAP32,HEAPU32,HEAPF32,HEAPF64;function updateGlobalBufferAndViews(buf){buffer=buf;Module[\"HEAP8\"]=HEAP8=new Int8Array(buf);Module[\"HEAP16\"]=HEAP16=new Int16Array(buf);Module[\"HEAP32\"]=HEAP32=new Int32Array(buf);Module[\"HEAPU8\"]=HEAPU8=new Uint8Array(buf);Module[\"HEAPU16\"]=HEAPU16=new Uint16Array(buf);Module[\"HEAPU32\"]=HEAPU32=new Uint32Array(buf);Module[\"HEAPF32\"]=HEAPF32=new Float32Array(buf);Module[\"HEAPF64\"]=HEAPF64=new Float64Array(buf)}var INITIAL_MEMORY=Module[\"INITIAL_MEMORY\"]||16777216;if(ENVIRONMENT_IS_PTHREAD){wasmMemory=Module[\"wasmMemory\"];buffer=Module[\"buffer\"]}else{if(Module[\"wasmMemory\"]){wasmMemory=Module[\"wasmMemory\"]}else{wasmMemory=new WebAssembly.Memory({\"initial\":INITIAL_MEMORY/65536,\"maximum\":2147483648/65536,\"shared\":true});if(!(wasmMemory.buffer instanceof SharedArrayBuffer)){err(\"requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag\");if(ENVIRONMENT_IS_NODE){console.log(\"(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and also use a recent version)\")}throw Error(\"bad memory\")}}}if(wasmMemory){buffer=wasmMemory.buffer}INITIAL_MEMORY=buffer.byteLength;updateGlobalBufferAndViews(buffer);var wasmTable;var __ATPRERUN__=[];var __ATINIT__=[];var __ATMAIN__=[];var __ATEXIT__=[];var __ATPOSTRUN__=[];var runtimeInitialized=false;var runtimeExited=false;if(!ENVIRONMENT_IS_PTHREAD)__ATINIT__.push({func:function(){___wasm_call_ctors()}});function preRun(){if(ENVIRONMENT_IS_PTHREAD)return;if(Module[\"preRun\"]){if(typeof Module[\"preRun\"]==\"function\")Module[\"preRun\"]=[Module[\"preRun\"]];while(Module[\"preRun\"].length){addOnPreRun(Module[\"preRun\"].shift())}}callRuntimeCallbacks(__ATPRERUN__)}function initRuntime(){runtimeInitialized=true;if(ENVIRONMENT_IS_PTHREAD)return;callRuntimeCallbacks(__ATINIT__)}function preMain(){if(ENVIRONMENT_IS_PTHREAD)return;callRuntimeCallbacks(__ATMAIN__)}function exitRuntime(){if(ENVIRONMENT_IS_PTHREAD)return;runtimeExited=true}function postRun(){if(ENVIRONMENT_IS_PTHREAD)return;if(Module[\"postRun\"]){if(typeof Module[\"postRun\"]==\"function\")Module[\"postRun\"]=[Module[\"postRun\"]];while(Module[\"postRun\"].length){addOnPostRun(Module[\"postRun\"].shift())}}callRuntimeCallbacks(__ATPOSTRUN__)}function addOnPreRun(cb){__ATPRERUN__.unshift(cb)}function addOnPostRun(cb){__ATPOSTRUN__.unshift(cb)}var runDependencies=0;var runDependencyWatcher=null;var dependenciesFulfilled=null;function addRunDependency(id){assert(!ENVIRONMENT_IS_PTHREAD,\"addRunDependency cannot be used in a pthread worker\");runDependencies++;if(Module[\"monitorRunDependencies\"]){Module[\"monitorRunDependencies\"](runDependencies)}}function removeRunDependency(id){runDependencies--;if(Module[\"monitorRunDependencies\"]){Module[\"monitorRunDependencies\"](runDependencies)}if(runDependencies==0){if(runDependencyWatcher!==null){clearInterval(runDependencyWatcher);runDependencyWatcher=null}if(dependenciesFulfilled){var callback=dependenciesFulfilled;dependenciesFulfilled=null;callback()}}}Module[\"preloadedImages\"]={};Module[\"preloadedAudios\"]={};function abort(what){if(Module[\"onAbort\"]){Module[\"onAbort\"](what)}if(ENVIRONMENT_IS_PTHREAD)console.error(\"Pthread aborting at \"+(new Error).stack);what+=\"\";err(what);ABORT=true;EXITSTATUS=1;what=\"abort(\"+what+\"). Build with -s ASSERTIONS=1 for more info.\";var e=new WebAssembly.RuntimeError(what);readyPromiseReject(e);throw e}function hasPrefix(str,prefix){return String.prototype.startsWith?str.startsWith(prefix):str.indexOf(prefix)===0}var dataURIPrefix=\"data:application/octet-stream;base64,\";function isDataURI(filename){return hasPrefix(filename,dataURIPrefix)}var fileURIPrefix=\"file://\";function isFileURI(filename){return hasPrefix(filename,fileURIPrefix)}var wasmBinaryFile=\"tfjs-backend-wasm-threaded-simd.wasm\";if(!isDataURI(wasmBinaryFile)){wasmBinaryFile=locateFile(wasmBinaryFile)}function getBinary(file){try{if(file==wasmBinaryFile&&wasmBinary){return new Uint8Array(wasmBinary)}if(readBinary){return readBinary(file)}else{throw\"both async and sync fetching of the wasm failed\"}}catch(err){abort(err)}}function getBinaryPromise(){if(!wasmBinary&&(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER)){if(typeof fetch===\"function\"&&!isFileURI(wasmBinaryFile)){return fetch(wasmBinaryFile,{credentials:\"same-origin\"}).then(function(response){if(!response[\"ok\"]){throw\"failed to load wasm binary file at '\"+wasmBinaryFile+\"'\"}return response[\"arrayBuffer\"]()}).catch(function(){return getBinary(wasmBinaryFile)})}else{if(readAsync){return new Promise(function(resolve,reject){readAsync(wasmBinaryFile,function(response){resolve(new Uint8Array(response))},reject)})}}}return Promise.resolve().then(function(){return getBinary(wasmBinaryFile)})}function createWasm(){var info={\"a\":asmLibraryArg};function receiveInstance(instance,module){var exports=instance.exports;Module[\"asm\"]=exports;wasmTable=Module[\"asm\"][\"F\"];wasmModule=module;if(!ENVIRONMENT_IS_PTHREAD){var numWorkersToLoad=PThread.unusedWorkers.length;PThread.unusedWorkers.forEach(function(w){PThread.loadWasmModuleToWorker(w,function(){if(!--numWorkersToLoad)removeRunDependency(\"wasm-instantiate\")})})}}if(!ENVIRONMENT_IS_PTHREAD){addRunDependency(\"wasm-instantiate\")}function receiveInstantiatedSource(output){receiveInstance(output[\"instance\"],output[\"module\"])}function instantiateArrayBuffer(receiver){return getBinaryPromise().then(function(binary){return WebAssembly.instantiate(binary,info)}).then(receiver,function(reason){err(\"failed to asynchronously prepare wasm: \"+reason);abort(reason)})}function instantiateAsync(){if(!wasmBinary&&typeof WebAssembly.instantiateStreaming===\"function\"&&!isDataURI(wasmBinaryFile)&&!isFileURI(wasmBinaryFile)&&typeof fetch===\"function\"){return fetch(wasmBinaryFile,{credentials:\"same-origin\"}).then(function(response){var result=WebAssembly.instantiateStreaming(response,info);return result.then(receiveInstantiatedSource,function(reason){err(\"wasm streaming compile failed: \"+reason);err(\"falling back to ArrayBuffer instantiation\");return instantiateArrayBuffer(receiveInstantiatedSource)})})}else{return instantiateArrayBuffer(receiveInstantiatedSource)}}if(Module[\"instantiateWasm\"]){try{var exports=Module[\"instantiateWasm\"](info,receiveInstance);return exports}catch(e){err(\"Module.instantiateWasm callback failed with error: \"+e);return false}}instantiateAsync().catch(readyPromiseReject);return{}}var ASM_CONSTS={9816:function(){throw\"Canceled!\"},9834:function($0,$1){setTimeout(function(){__emscripten_do_dispatch_to_thread($0,$1)},0)}};function initPthreadsJS(){PThread.initRuntime()}function callRuntimeCallbacks(callbacks){while(callbacks.length>0){var callback=callbacks.shift();if(typeof callback==\"function\"){callback(Module);continue}var func=callback.func;if(typeof func===\"number\"){if(callback.arg===undefined){wasmTable.get(func)()}else{wasmTable.get(func)(callback.arg)}}else{func(callback.arg===undefined?null:callback.arg)}}}function _emscripten_futex_wake(addr,count){if(addr<=0||addr>GROWABLE_HEAP_I8().length||addr&3!=0||count<0)return-28;if(count==0)return 0;if(count>=2147483647)count=Infinity;var mainThreadWaitAddress=Atomics.load(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2);var mainThreadWoken=0;if(mainThreadWaitAddress==addr){var loadedAddr=Atomics.compareExchange(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2,mainThreadWaitAddress,0);if(loadedAddr==mainThreadWaitAddress){--count;mainThreadWoken=1;if(count<=0)return 1}}var ret=Atomics.notify(GROWABLE_HEAP_I32(),addr>>2,count);if(ret>=0)return ret+mainThreadWoken;throw\"Atomics.notify returned an unexpected value \"+ret}Module[\"_emscripten_futex_wake\"]=_emscripten_futex_wake;function killThread(pthread_ptr){if(ENVIRONMENT_IS_PTHREAD)throw\"Internal Error! killThread() can only ever be called from main application thread!\";if(!pthread_ptr)throw\"Internal Error! Null pthread_ptr in killThread!\";GROWABLE_HEAP_I32()[pthread_ptr+12>>2]=0;var pthread=PThread.pthreads[pthread_ptr];pthread.worker.terminate();PThread.freeThreadData(pthread);PThread.runningWorkers.splice(PThread.runningWorkers.indexOf(pthread.worker),1);pthread.worker.pthread=undefined}function cancelThread(pthread_ptr){if(ENVIRONMENT_IS_PTHREAD)throw\"Internal Error! cancelThread() can only ever be called from main application thread!\";if(!pthread_ptr)throw\"Internal Error! Null pthread_ptr in cancelThread!\";var pthread=PThread.pthreads[pthread_ptr];pthread.worker.postMessage({\"cmd\":\"cancel\"})}function cleanupThread(pthread_ptr){if(ENVIRONMENT_IS_PTHREAD)throw\"Internal Error! cleanupThread() can only ever be called from main application thread!\";if(!pthread_ptr)throw\"Internal Error! Null pthread_ptr in cleanupThread!\";var pthread=PThread.pthreads[pthread_ptr];if(pthread){GROWABLE_HEAP_I32()[pthread_ptr+12>>2]=0;var worker=pthread.worker;PThread.returnWorkerToPool(worker)}}var PThread={unusedWorkers:[],runningWorkers:[],initMainThreadBlock:function(){var pthreadPoolSize=Math.min(4,Math.max(1,(navigator.hardwareConcurrency||1)/2));for(var i=0;i>2]=tb;var headPtr=tb+152;GROWABLE_HEAP_I32()[headPtr>>2]=headPtr;var tlsMemory=_malloc(512);for(var i=0;i<128;++i)GROWABLE_HEAP_U32()[tlsMemory/4+i]=0;Atomics.store(GROWABLE_HEAP_U32(),tb+100>>2,tlsMemory);Atomics.store(GROWABLE_HEAP_U32(),tb+40>>2,tb);__emscripten_thread_init(tb,!ENVIRONMENT_IS_WORKER,1);_emscripten_register_main_browser_thread_id(tb)},initWorker:function(){},pthreads:{},threadExitHandlers:[],setThreadStatus:function(){},runExitHandlers:function(){while(PThread.threadExitHandlers.length>0){PThread.threadExitHandlers.pop()()}if(ENVIRONMENT_IS_PTHREAD&&_pthread_self())___pthread_tsd_run_dtors()},runExitHandlersAndDeinitThread:function(tb,exitCode){Atomics.store(GROWABLE_HEAP_U32(),tb+56>>2,1);Atomics.store(GROWABLE_HEAP_U32(),tb+60>>2,0);PThread.runExitHandlers();Atomics.store(GROWABLE_HEAP_U32(),tb+4>>2,exitCode);Atomics.store(GROWABLE_HEAP_U32(),tb+0>>2,1);_emscripten_futex_wake(tb+0,2147483647);__emscripten_thread_init(0,0,0)},threadExit:function(exitCode){var tb=_pthread_self();if(tb){PThread.runExitHandlersAndDeinitThread(tb,exitCode);if(ENVIRONMENT_IS_PTHREAD){postMessage({\"cmd\":\"exit\"})}}},threadCancel:function(){PThread.runExitHandlersAndDeinitThread(_pthread_self(),-1);postMessage({\"cmd\":\"cancelDone\"})},terminateAllThreads:function(){for(var t in PThread.pthreads){var pthread=PThread.pthreads[t];if(pthread&&pthread.worker){PThread.returnWorkerToPool(pthread.worker)}}PThread.pthreads={};for(var i=0;i>2];GROWABLE_HEAP_I32()[pthread.threadInfoStruct+100>>2]=0;_free(tlsMemory);_free(pthread.threadInfoStruct)}pthread.threadInfoStruct=0;if(pthread.allocatedOwnStack&&pthread.stackBase)_free(pthread.stackBase);pthread.stackBase=0;if(pthread.worker)pthread.worker.pthread=null},returnWorkerToPool:function(worker){PThread.runWithoutMainThreadQueuedCalls(function(){delete PThread.pthreads[worker.pthread.threadInfoStruct];PThread.unusedWorkers.push(worker);PThread.runningWorkers.splice(PThread.runningWorkers.indexOf(worker),1);PThread.freeThreadData(worker.pthread);worker.pthread=undefined})},runWithoutMainThreadQueuedCalls:function(func){GROWABLE_HEAP_I32()[__emscripten_allow_main_runtime_queued_calls>>2]=0;try{func()}finally{GROWABLE_HEAP_I32()[__emscripten_allow_main_runtime_queued_calls>>2]=1}},receiveObjectTransfer:function(data){},loadWasmModuleToWorker:function(worker,onFinishedLoading){worker.onmessage=function(e){var d=e[\"data\"];var cmd=d[\"cmd\"];if(worker.pthread)PThread.currentProxiedOperationCallerThread=worker.pthread.threadInfoStruct;if(d[\"targetThread\"]&&d[\"targetThread\"]!=_pthread_self()){var thread=PThread.pthreads[d.targetThread];if(thread){thread.worker.postMessage(e.data,d[\"transferList\"])}else{console.error('Internal error! Worker sent a message \"'+cmd+'\" to target pthread '+d[\"targetThread\"]+\", but that thread no longer exists!\")}PThread.currentProxiedOperationCallerThread=undefined;return}if(cmd===\"processQueuedMainThreadWork\"){_emscripten_main_thread_process_queued_calls()}else if(cmd===\"spawnThread\"){spawnThread(e.data)}else if(cmd===\"cleanupThread\"){cleanupThread(d[\"thread\"])}else if(cmd===\"killThread\"){killThread(d[\"thread\"])}else if(cmd===\"cancelThread\"){cancelThread(d[\"thread\"])}else if(cmd===\"loaded\"){worker.loaded=true;if(onFinishedLoading)onFinishedLoading(worker);if(worker.runPthread){worker.runPthread();delete worker.runPthread}}else if(cmd===\"print\"){out(\"Thread \"+d[\"threadId\"]+\": \"+d[\"text\"])}else if(cmd===\"printErr\"){err(\"Thread \"+d[\"threadId\"]+\": \"+d[\"text\"])}else if(cmd===\"alert\"){alert(\"Thread \"+d[\"threadId\"]+\": \"+d[\"text\"])}else if(cmd===\"exit\"){var detached=worker.pthread&&Atomics.load(GROWABLE_HEAP_U32(),worker.pthread.threadInfoStruct+64>>2);if(detached){PThread.returnWorkerToPool(worker)}}else if(cmd===\"exitProcess\"){try{exit(d[\"returnCode\"])}catch(e){if(e instanceof ExitStatus)return;throw e}}else if(cmd===\"cancelDone\"){PThread.returnWorkerToPool(worker)}else if(cmd===\"objectTransfer\"){PThread.receiveObjectTransfer(e.data)}else if(e.data.target===\"setimmediate\"){worker.postMessage(e.data)}else{err(\"worker sent an unknown command \"+cmd)}PThread.currentProxiedOperationCallerThread=undefined};worker.onerror=function(e){err(\"pthread sent an error! \"+e.filename+\":\"+e.lineno+\": \"+e.message)};if(ENVIRONMENT_IS_NODE){worker.on(\"message\",function(data){worker.onmessage({data:data})});worker.on(\"error\",function(data){worker.onerror(data)});worker.on(\"exit\",function(data){})}worker.postMessage({\"cmd\":\"load\",\"urlOrBlob\":Module[\"mainScriptUrlOrBlob\"]||_scriptDir,\"wasmMemory\":wasmMemory,\"wasmModule\":wasmModule})},allocateUnusedWorker:function(){var pthreadMainJs=locateFile(\"tfjs-backend-wasm-threaded-simd.worker.js\");PThread.unusedWorkers.push(new Worker(pthreadMainJs))},getNewWorker:function(){if(PThread.unusedWorkers.length==0){PThread.allocateUnusedWorker();PThread.loadWasmModuleToWorker(PThread.unusedWorkers[0])}if(PThread.unusedWorkers.length>0)return PThread.unusedWorkers.pop();else return null},busySpinWait:function(msecs){var t=performance.now()+msecs;while(performance.now()>2]=value;return value}function _atexit(func,arg){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(1,1,func,arg)}function __emscripten_notify_thread_queue(targetThreadId,mainThreadId){if(targetThreadId==mainThreadId){postMessage({\"cmd\":\"processQueuedMainThreadWork\"})}else if(ENVIRONMENT_IS_PTHREAD){postMessage({\"targetThread\":targetThreadId,\"cmd\":\"processThreadQueue\"})}else{var pthread=PThread.pthreads[targetThreadId];var worker=pthread&&pthread.worker;if(!worker){return}worker.postMessage({\"cmd\":\"processThreadQueue\"})}return 1}function _abort(){abort()}function _emscripten_asm_const_int(code,sigPtr,argbuf){var args=readAsmConstArgs(sigPtr,argbuf);return ASM_CONSTS[code].apply(null,args)}function _emscripten_conditional_set_current_thread_status(expectedStatus,newStatus){}function _emscripten_futex_wait(addr,val,timeout){if(addr<=0||addr>GROWABLE_HEAP_I8().length||addr&3!=0)return-28;if(!ENVIRONMENT_IS_WEB){var ret=Atomics.wait(GROWABLE_HEAP_I32(),addr>>2,val,timeout);if(ret===\"timed-out\")return-73;if(ret===\"not-equal\")return-6;if(ret===\"ok\")return 0;throw\"Atomics.wait returned an unexpected value \"+ret}else{if(Atomics.load(GROWABLE_HEAP_I32(),addr>>2)!=val){return-6}var tNow=performance.now();var tEnd=tNow+timeout;var lastAddr=Atomics.exchange(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2,addr);while(1){tNow=performance.now();if(tNow>tEnd){lastAddr=Atomics.exchange(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2,0);return-73}lastAddr=Atomics.exchange(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2,0);if(lastAddr==0){break}_emscripten_main_thread_process_queued_calls();if(Atomics.load(GROWABLE_HEAP_I32(),addr>>2)!=val){return-6}lastAddr=Atomics.exchange(GROWABLE_HEAP_I32(),__emscripten_main_thread_futex>>2,addr)}return 0}}function _emscripten_memcpy_big(dest,src,num){GROWABLE_HEAP_U8().copyWithin(dest,src,src+num)}function _emscripten_num_logical_cores(){if(ENVIRONMENT_IS_NODE)return require(\"os\").cpus().length;return navigator[\"hardwareConcurrency\"]}function _emscripten_proxy_to_main_thread_js(index,sync){var numCallArgs=arguments.length-2;var stack=stackSave();var serializedNumCallArgs=numCallArgs;var args=stackAlloc(serializedNumCallArgs*8);var b=args>>3;for(var i=0;i>=2;while(ch=GROWABLE_HEAP_U8()[sigPtr++]){var double=ch<105;if(double&&buf&1)buf++;readAsmConstArgsArray.push(double?GROWABLE_HEAP_F64()[buf++>>1]:GROWABLE_HEAP_I32()[buf]);++buf}return readAsmConstArgsArray}function _emscripten_receive_on_main_thread_js(index,numCallArgs,args){_emscripten_receive_on_main_thread_js_callArgs.length=numCallArgs;var b=args>>3;for(var i=0;i>>16);updateGlobalBufferAndViews(wasmMemory.buffer);return 1}catch(e){}}function _emscripten_resize_heap(requestedSize){var oldSize=_emscripten_get_heap_size();if(requestedSize<=oldSize){return false}var maxHeapSize=2147483648;if(requestedSize>maxHeapSize){return false}for(var cutDown=1;cutDown<=4;cutDown*=2){var overGrownHeapSize=oldSize*(1+.2/cutDown);overGrownHeapSize=Math.min(overGrownHeapSize,requestedSize+100663296);var newSize=Math.min(maxHeapSize,alignUp(Math.max(requestedSize,overGrownHeapSize),65536));var replacement=emscripten_realloc_buffer(newSize);if(replacement){return true}}return false}var JSEvents={inEventHandler:0,removeAllEventListeners:function(){for(var i=JSEvents.eventHandlers.length-1;i>=0;--i){JSEvents._removeHandler(i)}JSEvents.eventHandlers=[];JSEvents.deferredCalls=[]},registerRemoveEventListeners:function(){if(!JSEvents.removeEventListenersRegistered){__ATEXIT__.push(JSEvents.removeAllEventListeners);JSEvents.removeEventListenersRegistered=true}},deferredCalls:[],deferCall:function(targetFunction,precedence,argsList){function arraysHaveEqualContent(arrA,arrB){if(arrA.length!=arrB.length)return false;for(var i in arrA){if(arrA[i]!=arrB[i])return false}return true}for(var i in JSEvents.deferredCalls){var call=JSEvents.deferredCalls[i];if(call.targetFunction==targetFunction&&arraysHaveEqualContent(call.argsList,argsList)){return}}JSEvents.deferredCalls.push({targetFunction:targetFunction,precedence:precedence,argsList:argsList});JSEvents.deferredCalls.sort(function(x,y){return x.precedence>2]=eventTypeId;GROWABLE_HEAP_I32()[varargs+4>>2]=eventData;GROWABLE_HEAP_I32()[varargs+8>>2]=userData;__emscripten_call_on_thread(0,targetThread,637534208,eventHandlerFunc,eventData,varargs);stackRestore(stackTop)},getTargetThreadForEventCallback:function(targetThread){switch(targetThread){case 1:return 0;case 2:return PThread.currentProxiedOperationCallerThread;default:return targetThread}},getNodeNameForTarget:function(target){if(!target)return\"\";if(target==window)return\"#window\";if(target==screen)return\"#screen\";return target&&target.nodeName?target.nodeName:\"\"},fullscreenEnabled:function(){return document.fullscreenEnabled||document.webkitFullscreenEnabled}};function stringToNewUTF8(jsString){var length=lengthBytesUTF8(jsString)+1;var cString=_malloc(length);stringToUTF8(jsString,cString,length);return cString}function _emscripten_set_offscreencanvas_size_on_target_thread_js(targetThread,targetCanvas,width,height){var stackTop=stackSave();var varargs=stackAlloc(12);var targetCanvasPtr=0;if(targetCanvas){targetCanvasPtr=stringToNewUTF8(targetCanvas)}GROWABLE_HEAP_I32()[varargs>>2]=targetCanvasPtr;GROWABLE_HEAP_I32()[varargs+4>>2]=width;GROWABLE_HEAP_I32()[varargs+8>>2]=height;__emscripten_call_on_thread(0,targetThread,657457152,0,targetCanvasPtr,varargs);stackRestore(stackTop)}function _emscripten_set_offscreencanvas_size_on_target_thread(targetThread,targetCanvas,width,height){targetCanvas=targetCanvas?UTF8ToString(targetCanvas):\"\";_emscripten_set_offscreencanvas_size_on_target_thread_js(targetThread,targetCanvas,width,height)}function maybeCStringToJsString(cString){return cString>2?UTF8ToString(cString):cString}var specialHTMLTargets=[0,typeof document!==\"undefined\"?document:0,typeof window!==\"undefined\"?window:0];function findEventTarget(target){target=maybeCStringToJsString(target);var domElement=specialHTMLTargets[target]||(typeof document!==\"undefined\"?document.querySelector(target):undefined);return domElement}function findCanvasEventTarget(target){return findEventTarget(target)}function _emscripten_set_canvas_element_size_calling_thread(target,width,height){var canvas=findCanvasEventTarget(target);if(!canvas)return-4;if(canvas.canvasSharedPtr){GROWABLE_HEAP_I32()[canvas.canvasSharedPtr>>2]=width;GROWABLE_HEAP_I32()[canvas.canvasSharedPtr+4>>2]=height}if(canvas.offscreenCanvas||!canvas.controlTransferredOffscreen){if(canvas.offscreenCanvas)canvas=canvas.offscreenCanvas;var autoResizeViewport=false;if(canvas.GLctxObject&&canvas.GLctxObject.GLctx){var prevViewport=canvas.GLctxObject.GLctx.getParameter(2978);autoResizeViewport=prevViewport[0]===0&&prevViewport[1]===0&&prevViewport[2]===canvas.width&&prevViewport[3]===canvas.height}canvas.width=width;canvas.height=height;if(autoResizeViewport){canvas.GLctxObject.GLctx.viewport(0,0,width,height)}}else if(canvas.canvasSharedPtr){var targetThread=GROWABLE_HEAP_I32()[canvas.canvasSharedPtr+8>>2];_emscripten_set_offscreencanvas_size_on_target_thread(targetThread,target,width,height);return 1}else{return-4}return 0}function _emscripten_set_canvas_element_size_main_thread(target,width,height){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(2,1,target,width,height);return _emscripten_set_canvas_element_size_calling_thread(target,width,height)}function _emscripten_set_canvas_element_size(target,width,height){var canvas=findCanvasEventTarget(target);if(canvas){return _emscripten_set_canvas_element_size_calling_thread(target,width,height)}else{return _emscripten_set_canvas_element_size_main_thread(target,width,height)}}function _emscripten_set_current_thread_status(newStatus){}function _emscripten_set_thread_name(threadId,name){}function __webgl_enable_ANGLE_instanced_arrays(ctx){var ext=ctx.getExtension(\"ANGLE_instanced_arrays\");if(ext){ctx[\"vertexAttribDivisor\"]=function(index,divisor){ext[\"vertexAttribDivisorANGLE\"](index,divisor)};ctx[\"drawArraysInstanced\"]=function(mode,first,count,primcount){ext[\"drawArraysInstancedANGLE\"](mode,first,count,primcount)};ctx[\"drawElementsInstanced\"]=function(mode,count,type,indices,primcount){ext[\"drawElementsInstancedANGLE\"](mode,count,type,indices,primcount)};return 1}}function __webgl_enable_OES_vertex_array_object(ctx){var ext=ctx.getExtension(\"OES_vertex_array_object\");if(ext){ctx[\"createVertexArray\"]=function(){return ext[\"createVertexArrayOES\"]()};ctx[\"deleteVertexArray\"]=function(vao){ext[\"deleteVertexArrayOES\"](vao)};ctx[\"bindVertexArray\"]=function(vao){ext[\"bindVertexArrayOES\"](vao)};ctx[\"isVertexArray\"]=function(vao){return ext[\"isVertexArrayOES\"](vao)};return 1}}function __webgl_enable_WEBGL_draw_buffers(ctx){var ext=ctx.getExtension(\"WEBGL_draw_buffers\");if(ext){ctx[\"drawBuffers\"]=function(n,bufs){ext[\"drawBuffersWEBGL\"](n,bufs)};return 1}}function __webgl_enable_WEBGL_multi_draw(ctx){return!!(ctx.multiDrawWebgl=ctx.getExtension(\"WEBGL_multi_draw\"))}var GL={counter:1,buffers:[],programs:[],framebuffers:[],renderbuffers:[],textures:[],uniforms:[],shaders:[],vaos:[],contexts:{},offscreenCanvases:{},timerQueriesEXT:[],programInfos:{},stringCache:{},unpackAlignment:4,recordError:function recordError(errorCode){if(!GL.lastError){GL.lastError=errorCode}},getNewId:function(table){var ret=GL.counter++;for(var i=table.length;i>2]:-1;source+=UTF8ToString(GROWABLE_HEAP_I32()[string+i*4>>2],len<0?undefined:len)}return source},createContext:function(canvas,webGLContextAttributes){var ctx=canvas.getContext(\"webgl\",webGLContextAttributes);if(!ctx)return 0;var handle=GL.registerContext(ctx,webGLContextAttributes);return handle},registerContext:function(ctx,webGLContextAttributes){var handle=_malloc(8);GROWABLE_HEAP_I32()[handle+4>>2]=_pthread_self();var context={handle:handle,attributes:webGLContextAttributes,version:webGLContextAttributes.majorVersion,GLctx:ctx};if(ctx.canvas)ctx.canvas.GLctxObject=context;GL.contexts[handle]=context;if(typeof webGLContextAttributes.enableExtensionsByDefault===\"undefined\"||webGLContextAttributes.enableExtensionsByDefault){GL.initExtensions(context)}return handle},makeContextCurrent:function(contextHandle){GL.currentContext=GL.contexts[contextHandle];Module.ctx=GLctx=GL.currentContext&&GL.currentContext.GLctx;return!(contextHandle&&!GLctx)},getContext:function(contextHandle){return GL.contexts[contextHandle]},deleteContext:function(contextHandle){if(GL.currentContext===GL.contexts[contextHandle])GL.currentContext=null;if(typeof JSEvents===\"object\")JSEvents.removeAllHandlersOnTarget(GL.contexts[contextHandle].GLctx.canvas);if(GL.contexts[contextHandle]&&GL.contexts[contextHandle].GLctx.canvas)GL.contexts[contextHandle].GLctx.canvas.GLctxObject=undefined;_free(GL.contexts[contextHandle].handle);GL.contexts[contextHandle]=null},initExtensions:function(context){if(!context)context=GL.currentContext;if(context.initExtensionsDone)return;context.initExtensionsDone=true;var GLctx=context.GLctx;__webgl_enable_ANGLE_instanced_arrays(GLctx);__webgl_enable_OES_vertex_array_object(GLctx);__webgl_enable_WEBGL_draw_buffers(GLctx);GLctx.disjointTimerQueryExt=GLctx.getExtension(\"EXT_disjoint_timer_query\");__webgl_enable_WEBGL_multi_draw(GLctx);var exts=GLctx.getSupportedExtensions()||[];exts.forEach(function(ext){if(ext.indexOf(\"lose_context\")<0&&ext.indexOf(\"debug\")<0){GLctx.getExtension(ext)}})},populateUniformTable:function(program){var p=GL.programs[program];var ptable=GL.programInfos[program]={uniforms:{},maxUniformLength:0,maxAttributeLength:-1,maxUniformBlockNameLength:-1};var utable=ptable.uniforms;var numUniforms=GLctx.getProgramParameter(p,35718);for(var i=0;i>2;var powerPreference=GROWABLE_HEAP_I32()[a+(24>>2)];var contextAttributes={\"alpha\":!!GROWABLE_HEAP_I32()[a+(0>>2)],\"depth\":!!GROWABLE_HEAP_I32()[a+(4>>2)],\"stencil\":!!GROWABLE_HEAP_I32()[a+(8>>2)],\"antialias\":!!GROWABLE_HEAP_I32()[a+(12>>2)],\"premultipliedAlpha\":!!GROWABLE_HEAP_I32()[a+(16>>2)],\"preserveDrawingBuffer\":!!GROWABLE_HEAP_I32()[a+(20>>2)],\"powerPreference\":__emscripten_webgl_power_preferences[powerPreference],\"failIfMajorPerformanceCaveat\":!!GROWABLE_HEAP_I32()[a+(28>>2)],majorVersion:GROWABLE_HEAP_I32()[a+(32>>2)],minorVersion:GROWABLE_HEAP_I32()[a+(36>>2)],enableExtensionsByDefault:GROWABLE_HEAP_I32()[a+(40>>2)],explicitSwapControl:GROWABLE_HEAP_I32()[a+(44>>2)],proxyContextToMainThread:GROWABLE_HEAP_I32()[a+(48>>2)],renderViaOffscreenBackBuffer:GROWABLE_HEAP_I32()[a+(52>>2)]};var canvas=findCanvasEventTarget(target);if(!canvas){return 0}if(contextAttributes.explicitSwapControl){return 0}var contextHandle=GL.createContext(canvas,contextAttributes);return contextHandle}function _emscripten_webgl_create_context(a0,a1){return _emscripten_webgl_do_create_context(a0,a1)}var SYSCALLS={mappings:{},buffers:[null,[],[]],printChar:function(stream,curr){var buffer=SYSCALLS.buffers[stream];if(curr===0||curr===10){(stream===1?out:err)(UTF8ArrayToString(buffer,0));buffer.length=0}else{buffer.push(curr)}},varargs:undefined,get:function(){SYSCALLS.varargs+=4;var ret=GROWABLE_HEAP_I32()[SYSCALLS.varargs-4>>2];return ret},getStr:function(ptr){var ret=UTF8ToString(ptr);return ret},get64:function(low,high){return low}};function _fd_close(fd){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(3,1,fd);return 0}function _fd_seek(fd,offset_low,offset_high,whence,newOffset){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(4,1,fd,offset_low,offset_high,whence,newOffset)}function _fd_write(fd,iov,iovcnt,pnum){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(5,1,fd,iov,iovcnt,pnum);var num=0;for(var i=0;i>2];var len=GROWABLE_HEAP_I32()[iov+(i*8+4)>>2];for(var j=0;j>2]=num;return 0}function _pthread_cleanup_pop(execute){var routine=PThread.threadExitHandlers.pop();if(execute)routine()}function _pthread_cleanup_push(routine,arg){PThread.threadExitHandlers.push(function(){wasmTable.get(routine)(arg)})}function spawnThread(threadParams){if(ENVIRONMENT_IS_PTHREAD)throw\"Internal Error! spawnThread() can only ever be called from main application thread!\";var worker=PThread.getNewWorker();if(worker.pthread!==undefined)throw\"Internal error!\";if(!threadParams.pthread_ptr)throw\"Internal error, no pthread ptr!\";PThread.runningWorkers.push(worker);var tlsMemory=_malloc(128*4);for(var i=0;i<128;++i){GROWABLE_HEAP_I32()[tlsMemory+i*4>>2]=0}var stackHigh=threadParams.stackBase+threadParams.stackSize;var pthread=PThread.pthreads[threadParams.pthread_ptr]={worker:worker,stackBase:threadParams.stackBase,stackSize:threadParams.stackSize,allocatedOwnStack:threadParams.allocatedOwnStack,threadInfoStruct:threadParams.pthread_ptr};var tis=pthread.threadInfoStruct>>2;Atomics.store(GROWABLE_HEAP_U32(),tis+(64>>2),threadParams.detached);Atomics.store(GROWABLE_HEAP_U32(),tis+(100>>2),tlsMemory);Atomics.store(GROWABLE_HEAP_U32(),tis+(40>>2),pthread.threadInfoStruct);Atomics.store(GROWABLE_HEAP_U32(),tis+(80>>2),threadParams.stackSize);Atomics.store(GROWABLE_HEAP_U32(),tis+(76>>2),stackHigh);Atomics.store(GROWABLE_HEAP_U32(),tis+(104>>2),threadParams.stackSize);Atomics.store(GROWABLE_HEAP_U32(),tis+(104+8>>2),stackHigh);Atomics.store(GROWABLE_HEAP_U32(),tis+(104+12>>2),threadParams.detached);var global_libc=_emscripten_get_global_libc();var global_locale=global_libc+40;Atomics.store(GROWABLE_HEAP_U32(),tis+(172>>2),global_locale);worker.pthread=pthread;var msg={\"cmd\":\"run\",\"start_routine\":threadParams.startRoutine,\"arg\":threadParams.arg,\"threadInfoStruct\":threadParams.pthread_ptr,\"stackBase\":threadParams.stackBase,\"stackSize\":threadParams.stackSize};worker.runPthread=function(){msg.time=performance.now();worker.postMessage(msg,threadParams.transferList)};if(worker.loaded){worker.runPthread();delete worker.runPthread}}function _pthread_create(pthread_ptr,attr,start_routine,arg){if(typeof SharedArrayBuffer===\"undefined\"){err(\"Current environment does not support SharedArrayBuffer, pthreads are not available!\");return 6}if(!pthread_ptr){err(\"pthread_create called with a null thread pointer!\");return 28}var transferList=[];var error=0;if(ENVIRONMENT_IS_PTHREAD&&(transferList.length===0||error)){return _emscripten_sync_run_in_main_thread_4(687865856,pthread_ptr,attr,start_routine,arg)}if(error)return error;var stackSize=0;var stackBase=0;var detached=0;if(attr&&attr!=-1){stackSize=GROWABLE_HEAP_I32()[attr>>2];stackSize+=81920;stackBase=GROWABLE_HEAP_I32()[attr+8>>2];detached=GROWABLE_HEAP_I32()[attr+12>>2]!==0}else{stackSize=2097152}var allocatedOwnStack=stackBase==0;if(allocatedOwnStack){stackBase=_memalign(16,stackSize)}else{stackBase-=stackSize;assert(stackBase>0)}var threadInfoStruct=_malloc(228);for(var i=0;i<228>>2;++i)GROWABLE_HEAP_U32()[(threadInfoStruct>>2)+i]=0;GROWABLE_HEAP_I32()[pthread_ptr>>2]=threadInfoStruct;GROWABLE_HEAP_I32()[threadInfoStruct+12>>2]=threadInfoStruct;var headPtr=threadInfoStruct+152;GROWABLE_HEAP_I32()[headPtr>>2]=headPtr;var threadParams={stackBase:stackBase,stackSize:stackSize,allocatedOwnStack:allocatedOwnStack,detached:detached,startRoutine:start_routine,pthread_ptr:threadInfoStruct,arg:arg,transferList:transferList};if(ENVIRONMENT_IS_PTHREAD){threadParams.cmd=\"spawnThread\";postMessage(threadParams,transferList)}else{spawnThread(threadParams)}return 0}function _sysconf(name){if(ENVIRONMENT_IS_PTHREAD)return _emscripten_proxy_to_main_thread_js(6,1,name);switch(name){case 30:return 16384;case 85:var maxHeapSize=2147483648;return maxHeapSize/16384;case 132:case 133:case 12:case 137:case 138:case 15:case 235:case 16:case 17:case 18:case 19:case 20:case 149:case 13:case 10:case 236:case 153:case 9:case 21:case 22:case 159:case 154:case 14:case 77:case 78:case 139:case 82:case 68:case 67:case 164:case 11:case 29:case 47:case 48:case 95:case 52:case 51:case 46:return 200809;case 27:case 246:case 127:case 128:case 23:case 24:case 160:case 161:case 181:case 182:case 242:case 183:case 184:case 243:case 244:case 245:case 165:case 178:case 179:case 49:case 50:case 168:case 169:case 175:case 170:case 171:case 172:case 97:case 76:case 32:case 173:case 35:case 80:case 81:case 79:return-1;case 176:case 177:case 7:case 155:case 8:case 157:case 125:case 126:case 92:case 93:case 129:case 130:case 131:case 94:case 91:return 1;case 74:case 60:case 69:case 70:case 4:return 1024;case 31:case 42:case 72:return 32;case 87:case 26:case 33:return 2147483647;case 34:case 1:return 47839;case 38:case 36:return 99;case 43:case 37:return 2048;case 0:return 2097152;case 3:return 65536;case 28:return 32768;case 44:return 32767;case 75:return 16384;case 39:return 1e3;case 89:return 700;case 71:return 256;case 40:return 255;case 2:return 100;case 180:return 64;case 25:return 20;case 5:return 16;case 6:return 6;case 73:return 4;case 84:{if(typeof navigator===\"object\")return navigator[\"hardwareConcurrency\"]||1;return 1}}setErrNo(28);return-1}if(!ENVIRONMENT_IS_PTHREAD)PThread.initMainThreadBlock();var GLctx;var proxiedFunctionTable=[null,_atexit,_emscripten_set_canvas_element_size_main_thread,_fd_close,_fd_seek,_fd_write,_sysconf];var asmLibraryArg={\"e\":___assert_fail,\"r\":___call_main,\"x\":__emscripten_notify_thread_queue,\"b\":_abort,\"y\":_emscripten_asm_const_int,\"j\":_emscripten_conditional_set_current_thread_status,\"c\":_emscripten_futex_wait,\"d\":_emscripten_futex_wake,\"f\":_emscripten_get_now,\"p\":_emscripten_memcpy_big,\"z\":_emscripten_num_logical_cores,\"u\":_emscripten_receive_on_main_thread_js,\"q\":_emscripten_resize_heap,\"v\":_emscripten_set_canvas_element_size,\"i\":_emscripten_set_current_thread_status,\"t\":_emscripten_set_thread_name,\"w\":_emscripten_webgl_create_context,\"m\":_fd_close,\"n\":_fd_seek,\"g\":_fd_write,\"o\":initPthreadsJS,\"a\":wasmMemory||Module[\"wasmMemory\"],\"k\":_pthread_cleanup_pop,\"l\":_pthread_cleanup_push,\"h\":_pthread_create,\"s\":_sysconf};var asm=createWasm();var ___wasm_call_ctors=Module[\"___wasm_call_ctors\"]=function(){return(___wasm_call_ctors=Module[\"___wasm_call_ctors\"]=Module[\"asm\"][\"A\"]).apply(null,arguments)};var _init=Module[\"_init\"]=function(){return(_init=Module[\"_init\"]=Module[\"asm\"][\"B\"]).apply(null,arguments)};var _register_tensor=Module[\"_register_tensor\"]=function(){return(_register_tensor=Module[\"_register_tensor\"]=Module[\"asm\"][\"C\"]).apply(null,arguments)};var _dispose_data=Module[\"_dispose_data\"]=function(){return(_dispose_data=Module[\"_dispose_data\"]=Module[\"asm\"][\"D\"]).apply(null,arguments)};var _dispose=Module[\"_dispose\"]=function(){return(_dispose=Module[\"_dispose\"]=Module[\"asm\"][\"E\"]).apply(null,arguments)};var _Abs=Module[\"_Abs\"]=function(){return(_Abs=Module[\"_Abs\"]=Module[\"asm\"][\"G\"]).apply(null,arguments)};var _Add=Module[\"_Add\"]=function(){return(_Add=Module[\"_Add\"]=Module[\"asm\"][\"H\"]).apply(null,arguments)};var _AddN=Module[\"_AddN\"]=function(){return(_AddN=Module[\"_AddN\"]=Module[\"asm\"][\"I\"]).apply(null,arguments)};var _All=Module[\"_All\"]=function(){return(_All=Module[\"_All\"]=Module[\"asm\"][\"J\"]).apply(null,arguments)};var _Any=Module[\"_Any\"]=function(){return(_Any=Module[\"_Any\"]=Module[\"asm\"][\"K\"]).apply(null,arguments)};var _ArgMax=Module[\"_ArgMax\"]=function(){return(_ArgMax=Module[\"_ArgMax\"]=Module[\"asm\"][\"L\"]).apply(null,arguments)};var _AvgPool=Module[\"_AvgPool\"]=function(){return(_AvgPool=Module[\"_AvgPool\"]=Module[\"asm\"][\"M\"]).apply(null,arguments)};var _BatchMatMul=Module[\"_BatchMatMul\"]=function(){return(_BatchMatMul=Module[\"_BatchMatMul\"]=Module[\"asm\"][\"N\"]).apply(null,arguments)};var _Ceil=Module[\"_Ceil\"]=function(){return(_Ceil=Module[\"_Ceil\"]=Module[\"asm\"][\"O\"]).apply(null,arguments)};var _ClipByValue=Module[\"_ClipByValue\"]=function(){return(_ClipByValue=Module[\"_ClipByValue\"]=Module[\"asm\"][\"P\"]).apply(null,arguments)};var _Conv2D=Module[\"_Conv2D\"]=function(){return(_Conv2D=Module[\"_Conv2D\"]=Module[\"asm\"][\"Q\"]).apply(null,arguments)};var _Conv2DBackpropInput=Module[\"_Conv2DBackpropInput\"]=function(){return(_Conv2DBackpropInput=Module[\"_Conv2DBackpropInput\"]=Module[\"asm\"][\"R\"]).apply(null,arguments)};var _Cos=Module[\"_Cos\"]=function(){return(_Cos=Module[\"_Cos\"]=Module[\"asm\"][\"S\"]).apply(null,arguments)};var _CropAndResize=Module[\"_CropAndResize\"]=function(){return(_CropAndResize=Module[\"_CropAndResize\"]=Module[\"asm\"][\"T\"]).apply(null,arguments)};var _Cumsum=Module[\"_Cumsum\"]=function(){return(_Cumsum=Module[\"_Cumsum\"]=Module[\"asm\"][\"U\"]).apply(null,arguments)};var _DepthToSpace=Module[\"_DepthToSpace\"]=function(){return(_DepthToSpace=Module[\"_DepthToSpace\"]=Module[\"asm\"][\"V\"]).apply(null,arguments)};var _DepthwiseConv2dNative=Module[\"_DepthwiseConv2dNative\"]=function(){return(_DepthwiseConv2dNative=Module[\"_DepthwiseConv2dNative\"]=Module[\"asm\"][\"W\"]).apply(null,arguments)};var _Equal=Module[\"_Equal\"]=function(){return(_Equal=Module[\"_Equal\"]=Module[\"asm\"][\"X\"]).apply(null,arguments)};var _Exp=Module[\"_Exp\"]=function(){return(_Exp=Module[\"_Exp\"]=Module[\"asm\"][\"Y\"]).apply(null,arguments)};var _FlipLeftRight=Module[\"_FlipLeftRight\"]=function(){return(_FlipLeftRight=Module[\"_FlipLeftRight\"]=Module[\"asm\"][\"Z\"]).apply(null,arguments)};var _Floor=Module[\"_Floor\"]=function(){return(_Floor=Module[\"_Floor\"]=Module[\"asm\"][\"_\"]).apply(null,arguments)};var _FloorDiv=Module[\"_FloorDiv\"]=function(){return(_FloorDiv=Module[\"_FloorDiv\"]=Module[\"asm\"][\"$\"]).apply(null,arguments)};var _FusedBatchNorm=Module[\"_FusedBatchNorm\"]=function(){return(_FusedBatchNorm=Module[\"_FusedBatchNorm\"]=Module[\"asm\"][\"aa\"]).apply(null,arguments)};var _FusedConv2D=Module[\"_FusedConv2D\"]=function(){return(_FusedConv2D=Module[\"_FusedConv2D\"]=Module[\"asm\"][\"ba\"]).apply(null,arguments)};var _FusedDepthwiseConv2D=Module[\"_FusedDepthwiseConv2D\"]=function(){return(_FusedDepthwiseConv2D=Module[\"_FusedDepthwiseConv2D\"]=Module[\"asm\"][\"ca\"]).apply(null,arguments)};var _Gather=Module[\"_Gather\"]=function(){return(_Gather=Module[\"_Gather\"]=Module[\"asm\"][\"da\"]).apply(null,arguments)};var _GatherNd=Module[\"_GatherNd\"]=function(){return(_GatherNd=Module[\"_GatherNd\"]=Module[\"asm\"][\"ea\"]).apply(null,arguments)};var _Greater=Module[\"_Greater\"]=function(){return(_Greater=Module[\"_Greater\"]=Module[\"asm\"][\"fa\"]).apply(null,arguments)};var _GreaterEqual=Module[\"_GreaterEqual\"]=function(){return(_GreaterEqual=Module[\"_GreaterEqual\"]=Module[\"asm\"][\"ga\"]).apply(null,arguments)};var _LeakyRelu=Module[\"_LeakyRelu\"]=function(){return(_LeakyRelu=Module[\"_LeakyRelu\"]=Module[\"asm\"][\"ha\"]).apply(null,arguments)};var _Less=Module[\"_Less\"]=function(){return(_Less=Module[\"_Less\"]=Module[\"asm\"][\"ia\"]).apply(null,arguments)};var _LessEqual=Module[\"_LessEqual\"]=function(){return(_LessEqual=Module[\"_LessEqual\"]=Module[\"asm\"][\"ja\"]).apply(null,arguments)};var _Log=Module[\"_Log\"]=function(){return(_Log=Module[\"_Log\"]=Module[\"asm\"][\"ka\"]).apply(null,arguments)};var _LogicalAnd=Module[\"_LogicalAnd\"]=function(){return(_LogicalAnd=Module[\"_LogicalAnd\"]=Module[\"asm\"][\"la\"]).apply(null,arguments)};var _Max=Module[\"_Max\"]=function(){return(_Max=Module[\"_Max\"]=Module[\"asm\"][\"ma\"]).apply(null,arguments)};var _MaxPool=Module[\"_MaxPool\"]=function(){return(_MaxPool=Module[\"_MaxPool\"]=Module[\"asm\"][\"na\"]).apply(null,arguments)};var _Maximum=Module[\"_Maximum\"]=function(){return(_Maximum=Module[\"_Maximum\"]=Module[\"asm\"][\"oa\"]).apply(null,arguments)};var _Mean=Module[\"_Mean\"]=function(){return(_Mean=Module[\"_Mean\"]=Module[\"asm\"][\"pa\"]).apply(null,arguments)};var _Min=Module[\"_Min\"]=function(){return(_Min=Module[\"_Min\"]=Module[\"asm\"][\"qa\"]).apply(null,arguments)};var _Minimum=Module[\"_Minimum\"]=function(){return(_Minimum=Module[\"_Minimum\"]=Module[\"asm\"][\"ra\"]).apply(null,arguments)};var _MirrorPad=Module[\"_MirrorPad\"]=function(){return(_MirrorPad=Module[\"_MirrorPad\"]=Module[\"asm\"][\"sa\"]).apply(null,arguments)};var _Multiply=Module[\"_Multiply\"]=function(){return(_Multiply=Module[\"_Multiply\"]=Module[\"asm\"][\"ta\"]).apply(null,arguments)};var _Neg=Module[\"_Neg\"]=function(){return(_Neg=Module[\"_Neg\"]=Module[\"asm\"][\"ua\"]).apply(null,arguments)};var _NonMaxSuppressionV3=Module[\"_NonMaxSuppressionV3\"]=function(){return(_NonMaxSuppressionV3=Module[\"_NonMaxSuppressionV3\"]=Module[\"asm\"][\"va\"]).apply(null,arguments)};var _NonMaxSuppressionV4=Module[\"_NonMaxSuppressionV4\"]=function(){return(_NonMaxSuppressionV4=Module[\"_NonMaxSuppressionV4\"]=Module[\"asm\"][\"wa\"]).apply(null,arguments)};var _NonMaxSuppressionV5=Module[\"_NonMaxSuppressionV5\"]=function(){return(_NonMaxSuppressionV5=Module[\"_NonMaxSuppressionV5\"]=Module[\"asm\"][\"xa\"]).apply(null,arguments)};var _NotEqual=Module[\"_NotEqual\"]=function(){return(_NotEqual=Module[\"_NotEqual\"]=Module[\"asm\"][\"ya\"]).apply(null,arguments)};var _OneHot=Module[\"_OneHot\"]=function(){return(_OneHot=Module[\"_OneHot\"]=Module[\"asm\"][\"za\"]).apply(null,arguments)};var _PadV2=Module[\"_PadV2\"]=function(){return(_PadV2=Module[\"_PadV2\"]=Module[\"asm\"][\"Aa\"]).apply(null,arguments)};var _Pow=Module[\"_Pow\"]=function(){return(_Pow=Module[\"_Pow\"]=Module[\"asm\"][\"Ba\"]).apply(null,arguments)};var _Prelu=Module[\"_Prelu\"]=function(){return(_Prelu=Module[\"_Prelu\"]=Module[\"asm\"][\"Ca\"]).apply(null,arguments)};var _Prod=Module[\"_Prod\"]=function(){return(_Prod=Module[\"_Prod\"]=Module[\"asm\"][\"Da\"]).apply(null,arguments)};var _RealDiv=Module[\"_RealDiv\"]=function(){return(_RealDiv=Module[\"_RealDiv\"]=Module[\"asm\"][\"Ea\"]).apply(null,arguments)};var _Relu=Module[\"_Relu\"]=function(){return(_Relu=Module[\"_Relu\"]=Module[\"asm\"][\"Fa\"]).apply(null,arguments)};var _Relu6=Module[\"_Relu6\"]=function(){return(_Relu6=Module[\"_Relu6\"]=Module[\"asm\"][\"Ga\"]).apply(null,arguments)};var _ResizeBilinear=Module[\"_ResizeBilinear\"]=function(){return(_ResizeBilinear=Module[\"_ResizeBilinear\"]=Module[\"asm\"][\"Ha\"]).apply(null,arguments)};var _Reverse=Module[\"_Reverse\"]=function(){return(_Reverse=Module[\"_Reverse\"]=Module[\"asm\"][\"Ia\"]).apply(null,arguments)};var _RotateWithOffset=Module[\"_RotateWithOffset\"]=function(){return(_RotateWithOffset=Module[\"_RotateWithOffset\"]=Module[\"asm\"][\"Ja\"]).apply(null,arguments)};var _Round=Module[\"_Round\"]=function(){return(_Round=Module[\"_Round\"]=Module[\"asm\"][\"Ka\"]).apply(null,arguments)};var _Rsqrt=Module[\"_Rsqrt\"]=function(){return(_Rsqrt=Module[\"_Rsqrt\"]=Module[\"asm\"][\"La\"]).apply(null,arguments)};var _ScatterNd=Module[\"_ScatterNd\"]=function(){return(_ScatterNd=Module[\"_ScatterNd\"]=Module[\"asm\"][\"Ma\"]).apply(null,arguments)};var _SelectV2=Module[\"_SelectV2\"]=function(){return(_SelectV2=Module[\"_SelectV2\"]=Module[\"asm\"][\"Na\"]).apply(null,arguments)};var _Sigmoid=Module[\"_Sigmoid\"]=function(){return(_Sigmoid=Module[\"_Sigmoid\"]=Module[\"asm\"][\"Oa\"]).apply(null,arguments)};var _Sin=Module[\"_Sin\"]=function(){return(_Sin=Module[\"_Sin\"]=Module[\"asm\"][\"Pa\"]).apply(null,arguments)};var _Softmax=Module[\"_Softmax\"]=function(){return(_Softmax=Module[\"_Softmax\"]=Module[\"asm\"][\"Qa\"]).apply(null,arguments)};var _Sqrt=Module[\"_Sqrt\"]=function(){return(_Sqrt=Module[\"_Sqrt\"]=Module[\"asm\"][\"Ra\"]).apply(null,arguments)};var _Square=Module[\"_Square\"]=function(){return(_Square=Module[\"_Square\"]=Module[\"asm\"][\"Sa\"]).apply(null,arguments)};var _SquaredDifference=Module[\"_SquaredDifference\"]=function(){return(_SquaredDifference=Module[\"_SquaredDifference\"]=Module[\"asm\"][\"Ta\"]).apply(null,arguments)};var _Step=Module[\"_Step\"]=function(){return(_Step=Module[\"_Step\"]=Module[\"asm\"][\"Ua\"]).apply(null,arguments)};var _StridedSlice=Module[\"_StridedSlice\"]=function(){return(_StridedSlice=Module[\"_StridedSlice\"]=Module[\"asm\"][\"Va\"]).apply(null,arguments)};var _Sub=Module[\"_Sub\"]=function(){return(_Sub=Module[\"_Sub\"]=Module[\"asm\"][\"Wa\"]).apply(null,arguments)};var _Sum=Module[\"_Sum\"]=function(){return(_Sum=Module[\"_Sum\"]=Module[\"asm\"][\"Xa\"]).apply(null,arguments)};var _Tan=Module[\"_Tan\"]=function(){return(_Tan=Module[\"_Tan\"]=Module[\"asm\"][\"Ya\"]).apply(null,arguments)};var _Tanh=Module[\"_Tanh\"]=function(){return(_Tanh=Module[\"_Tanh\"]=Module[\"asm\"][\"Za\"]).apply(null,arguments)};var _Tile=Module[\"_Tile\"]=function(){return(_Tile=Module[\"_Tile\"]=Module[\"asm\"][\"_a\"]).apply(null,arguments)};var _TopK=Module[\"_TopK\"]=function(){return(_TopK=Module[\"_TopK\"]=Module[\"asm\"][\"$a\"]).apply(null,arguments)};var _Transform=Module[\"_Transform\"]=function(){return(_Transform=Module[\"_Transform\"]=Module[\"asm\"][\"ab\"]).apply(null,arguments)};var _Transpose=Module[\"_Transpose\"]=function(){return(_Transpose=Module[\"_Transpose\"]=Module[\"asm\"][\"bb\"]).apply(null,arguments)};var __FusedMatMul=Module[\"__FusedMatMul\"]=function(){return(__FusedMatMul=Module[\"__FusedMatMul\"]=Module[\"asm\"][\"cb\"]).apply(null,arguments)};var _malloc=Module[\"_malloc\"]=function(){return(_malloc=Module[\"_malloc\"]=Module[\"asm\"][\"db\"]).apply(null,arguments)};var _free=Module[\"_free\"]=function(){return(_free=Module[\"_free\"]=Module[\"asm\"][\"eb\"]).apply(null,arguments)};var ___errno_location=Module[\"___errno_location\"]=function(){return(___errno_location=Module[\"___errno_location\"]=Module[\"asm\"][\"fb\"]).apply(null,arguments)};var _emscripten_get_global_libc=Module[\"_emscripten_get_global_libc\"]=function(){return(_emscripten_get_global_libc=Module[\"_emscripten_get_global_libc\"]=Module[\"asm\"][\"gb\"]).apply(null,arguments)};var _pthread_self=Module[\"_pthread_self\"]=function(){return(_pthread_self=Module[\"_pthread_self\"]=Module[\"asm\"][\"hb\"]).apply(null,arguments)};var ___pthread_tsd_run_dtors=Module[\"___pthread_tsd_run_dtors\"]=function(){return(___pthread_tsd_run_dtors=Module[\"___pthread_tsd_run_dtors\"]=Module[\"asm\"][\"ib\"]).apply(null,arguments)};var _emscripten_main_thread_process_queued_calls=Module[\"_emscripten_main_thread_process_queued_calls\"]=function(){return(_emscripten_main_thread_process_queued_calls=Module[\"_emscripten_main_thread_process_queued_calls\"]=Module[\"asm\"][\"jb\"]).apply(null,arguments)};var _emscripten_current_thread_process_queued_calls=Module[\"_emscripten_current_thread_process_queued_calls\"]=function(){return(_emscripten_current_thread_process_queued_calls=Module[\"_emscripten_current_thread_process_queued_calls\"]=Module[\"asm\"][\"kb\"]).apply(null,arguments)};var _emscripten_register_main_browser_thread_id=Module[\"_emscripten_register_main_browser_thread_id\"]=function(){return(_emscripten_register_main_browser_thread_id=Module[\"_emscripten_register_main_browser_thread_id\"]=Module[\"asm\"][\"lb\"]).apply(null,arguments)};var __emscripten_do_dispatch_to_thread=Module[\"__emscripten_do_dispatch_to_thread\"]=function(){return(__emscripten_do_dispatch_to_thread=Module[\"__emscripten_do_dispatch_to_thread\"]=Module[\"asm\"][\"mb\"]).apply(null,arguments)};var _emscripten_sync_run_in_main_thread_4=Module[\"_emscripten_sync_run_in_main_thread_4\"]=function(){return(_emscripten_sync_run_in_main_thread_4=Module[\"_emscripten_sync_run_in_main_thread_4\"]=Module[\"asm\"][\"nb\"]).apply(null,arguments)};var _emscripten_run_in_main_runtime_thread_js=Module[\"_emscripten_run_in_main_runtime_thread_js\"]=function(){return(_emscripten_run_in_main_runtime_thread_js=Module[\"_emscripten_run_in_main_runtime_thread_js\"]=Module[\"asm\"][\"ob\"]).apply(null,arguments)};var __emscripten_call_on_thread=Module[\"__emscripten_call_on_thread\"]=function(){return(__emscripten_call_on_thread=Module[\"__emscripten_call_on_thread\"]=Module[\"asm\"][\"pb\"]).apply(null,arguments)};var _emscripten_tls_init=Module[\"_emscripten_tls_init\"]=function(){return(_emscripten_tls_init=Module[\"_emscripten_tls_init\"]=Module[\"asm\"][\"qb\"]).apply(null,arguments)};var __emscripten_thread_init=Module[\"__emscripten_thread_init\"]=function(){return(__emscripten_thread_init=Module[\"__emscripten_thread_init\"]=Module[\"asm\"][\"rb\"]).apply(null,arguments)};var stackSave=Module[\"stackSave\"]=function(){return(stackSave=Module[\"stackSave\"]=Module[\"asm\"][\"sb\"]).apply(null,arguments)};var stackRestore=Module[\"stackRestore\"]=function(){return(stackRestore=Module[\"stackRestore\"]=Module[\"asm\"][\"tb\"]).apply(null,arguments)};var stackAlloc=Module[\"stackAlloc\"]=function(){return(stackAlloc=Module[\"stackAlloc\"]=Module[\"asm\"][\"ub\"]).apply(null,arguments)};var _emscripten_stack_set_limits=Module[\"_emscripten_stack_set_limits\"]=function(){return(_emscripten_stack_set_limits=Module[\"_emscripten_stack_set_limits\"]=Module[\"asm\"][\"vb\"]).apply(null,arguments)};var _memalign=Module[\"_memalign\"]=function(){return(_memalign=Module[\"_memalign\"]=Module[\"asm\"][\"wb\"]).apply(null,arguments)};var __emscripten_allow_main_runtime_queued_calls=Module[\"__emscripten_allow_main_runtime_queued_calls\"]=9808;var __emscripten_main_thread_futex=Module[\"__emscripten_main_thread_futex\"]=11432;Module[\"cwrap\"]=cwrap;Module[\"PThread\"]=PThread;Module[\"PThread\"]=PThread;Module[\"wasmMemory\"]=wasmMemory;Module[\"ExitStatus\"]=ExitStatus;var calledRun;function ExitStatus(status){this.name=\"ExitStatus\";this.message=\"Program terminated with exit(\"+status+\")\";this.status=status}dependenciesFulfilled=function runCaller(){if(!calledRun)run();if(!calledRun)dependenciesFulfilled=runCaller};function run(args){args=args||arguments_;if(runDependencies>0){return}if(ENVIRONMENT_IS_PTHREAD){readyPromiseResolve(Module);initRuntime();postMessage({\"cmd\":\"loaded\"});return}preRun();if(runDependencies>0){return}function doRun(){if(calledRun)return;calledRun=true;Module[\"calledRun\"]=true;if(ABORT)return;initRuntime();preMain();readyPromiseResolve(Module);if(Module[\"onRuntimeInitialized\"])Module[\"onRuntimeInitialized\"]();postRun()}if(Module[\"setStatus\"]){Module[\"setStatus\"](\"Running...\");setTimeout(function(){setTimeout(function(){Module[\"setStatus\"](\"\")},1);doRun()},1)}else{doRun()}}Module[\"run\"]=run;function exit(status,implicit){if(implicit&&noExitRuntime&&status===0){return}if(!implicit){if(ENVIRONMENT_IS_PTHREAD){postMessage({\"cmd\":\"exitProcess\",\"returnCode\":status});throw new ExitStatus(status)}else{}}if(noExitRuntime){}else{PThread.terminateAllThreads();EXITSTATUS=status;exitRuntime();if(Module[\"onExit\"])Module[\"onExit\"](status);ABORT=true}quit_(status,new ExitStatus(status))}if(Module[\"preInit\"]){if(typeof Module[\"preInit\"]==\"function\")Module[\"preInit\"]=[Module[\"preInit\"]];while(Module[\"preInit\"].length>0){Module[\"preInit\"].pop()()}}if(ENVIRONMENT_IS_PTHREAD){noExitRuntime=false;PThread.initWorker()}run();\n\n\n return WasmBackendModuleThreadedSimd.ready\n}\n);\n})();\nif (typeof exports === 'object' && typeof module === 'object')\n module.exports = WasmBackendModuleThreadedSimd;\nelse if (typeof define === 'function' && define['amd'])\n define([], function() { return WasmBackendModuleThreadedSimd; });\nelse if (typeof exports === 'object')\n exports[\"WasmBackendModuleThreadedSimd\"] = WasmBackendModuleThreadedSimd;\n", "\nvar WasmBackendModule = (function() {\n var _scriptDir = typeof document !== 'undefined' && document.currentScript ? document.currentScript.src : undefined;\n if (typeof __filename !== 'undefined') _scriptDir = _scriptDir || __filename;\n return (\nfunction(WasmBackendModule) {\n WasmBackendModule = WasmBackendModule || {};\n\nvar Module=typeof WasmBackendModule!==\"undefined\"?WasmBackendModule:{};var readyPromiseResolve,readyPromiseReject;Module[\"ready\"]=new Promise(function(resolve,reject){readyPromiseResolve=resolve;readyPromiseReject=reject});var moduleOverrides={};var key;for(key in Module){if(Module.hasOwnProperty(key)){moduleOverrides[key]=Module[key]}}var arguments_=[];var thisProgram=\"./this.program\";var quit_=function(status,toThrow){throw toThrow};var ENVIRONMENT_IS_WEB=false;var ENVIRONMENT_IS_WORKER=false;var ENVIRONMENT_IS_NODE=false;var ENVIRONMENT_IS_SHELL=false;ENVIRONMENT_IS_WEB=typeof window===\"object\";ENVIRONMENT_IS_WORKER=typeof importScripts===\"function\";ENVIRONMENT_IS_NODE=typeof process===\"object\"&&typeof process.versions===\"object\"&&typeof process.versions.node===\"string\";ENVIRONMENT_IS_SHELL=!ENVIRONMENT_IS_WEB&&!ENVIRONMENT_IS_NODE&&!ENVIRONMENT_IS_WORKER;var scriptDirectory=\"\";function locateFile(path){if(Module[\"locateFile\"]){return Module[\"locateFile\"](path,scriptDirectory)}return scriptDirectory+path}var read_,readAsync,readBinary,setWindowTitle;var nodeFS;var nodePath;if(ENVIRONMENT_IS_NODE){if(ENVIRONMENT_IS_WORKER){scriptDirectory=require(\"path\").dirname(scriptDirectory)+\"/\"}else{scriptDirectory=__dirname+\"/\"}read_=function shell_read(filename,binary){if(!nodeFS)nodeFS=require(\"fs\");if(!nodePath)nodePath=require(\"path\");filename=nodePath[\"normalize\"](filename);return nodeFS[\"readFileSync\"](filename,binary?null:\"utf8\")};readBinary=function readBinary(filename){var ret=read_(filename,true);if(!ret.buffer){ret=new Uint8Array(ret)}assert(ret.buffer);return ret};if(process[\"argv\"].length>1){thisProgram=process[\"argv\"][1].replace(/\\\\/g,\"/\")}arguments_=process[\"argv\"].slice(2);process[\"on\"](\"uncaughtException\",function(ex){if(!(ex instanceof ExitStatus)){throw ex}});process[\"on\"](\"unhandledRejection\",abort);quit_=function(status){process[\"exit\"](status)};Module[\"inspect\"]=function(){return\"[Emscripten Module object]\"}}else if(ENVIRONMENT_IS_SHELL){if(typeof read!=\"undefined\"){read_=function shell_read(f){return read(f)}}readBinary=function readBinary(f){var data;if(typeof readbuffer===\"function\"){return new Uint8Array(readbuffer(f))}data=read(f,\"binary\");assert(typeof data===\"object\");return data};if(typeof scriptArgs!=\"undefined\"){arguments_=scriptArgs}else if(typeof arguments!=\"undefined\"){arguments_=arguments}if(typeof quit===\"function\"){quit_=function(status){quit(status)}}if(typeof print!==\"undefined\"){if(typeof console===\"undefined\")console={};console.log=print;console.warn=console.error=typeof printErr!==\"undefined\"?printErr:print}}else if(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER){if(ENVIRONMENT_IS_WORKER){scriptDirectory=self.location.href}else if(typeof document!==\"undefined\"&&document.currentScript){scriptDirectory=document.currentScript.src}if(_scriptDir){scriptDirectory=_scriptDir}if(scriptDirectory.indexOf(\"blob:\")!==0){scriptDirectory=scriptDirectory.substr(0,scriptDirectory.lastIndexOf(\"/\")+1)}else{scriptDirectory=\"\"}{read_=function(url){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,false);xhr.send(null);return xhr.responseText};if(ENVIRONMENT_IS_WORKER){readBinary=function(url){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,false);xhr.responseType=\"arraybuffer\";xhr.send(null);return new Uint8Array(xhr.response)}}readAsync=function(url,onload,onerror){var xhr=new XMLHttpRequest;xhr.open(\"GET\",url,true);xhr.responseType=\"arraybuffer\";xhr.onload=function(){if(xhr.status==200||xhr.status==0&&xhr.response){onload(xhr.response);return}onerror()};xhr.onerror=onerror;xhr.send(null)}}setWindowTitle=function(title){document.title=title}}else{}var out=Module[\"print\"]||console.log.bind(console);var err=Module[\"printErr\"]||console.warn.bind(console);for(key in moduleOverrides){if(moduleOverrides.hasOwnProperty(key)){Module[key]=moduleOverrides[key]}}moduleOverrides=null;if(Module[\"arguments\"])arguments_=Module[\"arguments\"];if(Module[\"thisProgram\"])thisProgram=Module[\"thisProgram\"];if(Module[\"quit\"])quit_=Module[\"quit\"];var wasmBinary;if(Module[\"wasmBinary\"])wasmBinary=Module[\"wasmBinary\"];var noExitRuntime=Module[\"noExitRuntime\"]||true;if(typeof WebAssembly!==\"object\"){abort(\"no native wasm support detected\")}var wasmMemory;var ABORT=false;var EXITSTATUS;function assert(condition,text){if(!condition){abort(\"Assertion failed: \"+text)}}function getCFunc(ident){var func=Module[\"_\"+ident];assert(func,\"Cannot call unknown function \"+ident+\", make sure it is exported\");return func}function ccall(ident,returnType,argTypes,args,opts){var toC={\"string\":function(str){var ret=0;if(str!==null&&str!==undefined&&str!==0){var len=(str.length<<2)+1;ret=stackAlloc(len);stringToUTF8(str,ret,len)}return ret},\"array\":function(arr){var ret=stackAlloc(arr.length);writeArrayToMemory(arr,ret);return ret}};function convertReturnValue(ret){if(returnType===\"string\")return UTF8ToString(ret);if(returnType===\"boolean\")return Boolean(ret);return ret}var func=getCFunc(ident);var cArgs=[];var stack=0;if(args){for(var i=0;i=endIdx))++endPtr;if(endPtr-idx>16&&heap.subarray&&UTF8Decoder){return UTF8Decoder.decode(heap.subarray(idx,endPtr))}else{var str=\"\";while(idx>10,56320|ch&1023)}}}return str}function UTF8ToString(ptr,maxBytesToRead){return ptr?UTF8ArrayToString(HEAPU8,ptr,maxBytesToRead):\"\"}function stringToUTF8Array(str,heap,outIdx,maxBytesToWrite){if(!(maxBytesToWrite>0))return 0;var startIdx=outIdx;var endIdx=outIdx+maxBytesToWrite-1;for(var i=0;i=55296&&u<=57343){var u1=str.charCodeAt(++i);u=65536+((u&1023)<<10)|u1&1023}if(u<=127){if(outIdx>=endIdx)break;heap[outIdx++]=u}else if(u<=2047){if(outIdx+1>=endIdx)break;heap[outIdx++]=192|u>>6;heap[outIdx++]=128|u&63}else if(u<=65535){if(outIdx+2>=endIdx)break;heap[outIdx++]=224|u>>12;heap[outIdx++]=128|u>>6&63;heap[outIdx++]=128|u&63}else{if(outIdx+3>=endIdx)break;heap[outIdx++]=240|u>>18;heap[outIdx++]=128|u>>12&63;heap[outIdx++]=128|u>>6&63;heap[outIdx++]=128|u&63}}heap[outIdx]=0;return outIdx-startIdx}function stringToUTF8(str,outPtr,maxBytesToWrite){return stringToUTF8Array(str,HEAPU8,outPtr,maxBytesToWrite)}function writeArrayToMemory(array,buffer){HEAP8.set(array,buffer)}function alignUp(x,multiple){if(x%multiple>0){x+=multiple-x%multiple}return x}var buffer,HEAP8,HEAPU8,HEAP16,HEAPU16,HEAP32,HEAPU32,HEAPF32,HEAPF64;function updateGlobalBufferAndViews(buf){buffer=buf;Module[\"HEAP8\"]=HEAP8=new Int8Array(buf);Module[\"HEAP16\"]=HEAP16=new Int16Array(buf);Module[\"HEAP32\"]=HEAP32=new Int32Array(buf);Module[\"HEAPU8\"]=HEAPU8=new Uint8Array(buf);Module[\"HEAPU16\"]=HEAPU16=new Uint16Array(buf);Module[\"HEAPU32\"]=HEAPU32=new Uint32Array(buf);Module[\"HEAPF32\"]=HEAPF32=new Float32Array(buf);Module[\"HEAPF64\"]=HEAPF64=new Float64Array(buf)}var INITIAL_MEMORY=Module[\"INITIAL_MEMORY\"]||16777216;var wasmTable;var __ATPRERUN__=[];var __ATINIT__=[];var __ATMAIN__=[];var __ATPOSTRUN__=[];var runtimeInitialized=false;__ATINIT__.push({func:function(){___wasm_call_ctors()}});function preRun(){if(Module[\"preRun\"]){if(typeof Module[\"preRun\"]==\"function\")Module[\"preRun\"]=[Module[\"preRun\"]];while(Module[\"preRun\"].length){addOnPreRun(Module[\"preRun\"].shift())}}callRuntimeCallbacks(__ATPRERUN__)}function initRuntime(){runtimeInitialized=true;callRuntimeCallbacks(__ATINIT__)}function preMain(){callRuntimeCallbacks(__ATMAIN__)}function postRun(){if(Module[\"postRun\"]){if(typeof Module[\"postRun\"]==\"function\")Module[\"postRun\"]=[Module[\"postRun\"]];while(Module[\"postRun\"].length){addOnPostRun(Module[\"postRun\"].shift())}}callRuntimeCallbacks(__ATPOSTRUN__)}function addOnPreRun(cb){__ATPRERUN__.unshift(cb)}function addOnPostRun(cb){__ATPOSTRUN__.unshift(cb)}var runDependencies=0;var runDependencyWatcher=null;var dependenciesFulfilled=null;function addRunDependency(id){runDependencies++;if(Module[\"monitorRunDependencies\"]){Module[\"monitorRunDependencies\"](runDependencies)}}function removeRunDependency(id){runDependencies--;if(Module[\"monitorRunDependencies\"]){Module[\"monitorRunDependencies\"](runDependencies)}if(runDependencies==0){if(runDependencyWatcher!==null){clearInterval(runDependencyWatcher);runDependencyWatcher=null}if(dependenciesFulfilled){var callback=dependenciesFulfilled;dependenciesFulfilled=null;callback()}}}Module[\"preloadedImages\"]={};Module[\"preloadedAudios\"]={};function abort(what){if(Module[\"onAbort\"]){Module[\"onAbort\"](what)}what+=\"\";err(what);ABORT=true;EXITSTATUS=1;what=\"abort(\"+what+\"). Build with -s ASSERTIONS=1 for more info.\";var e=new WebAssembly.RuntimeError(what);readyPromiseReject(e);throw e}function hasPrefix(str,prefix){return String.prototype.startsWith?str.startsWith(prefix):str.indexOf(prefix)===0}var dataURIPrefix=\"data:application/octet-stream;base64,\";function isDataURI(filename){return hasPrefix(filename,dataURIPrefix)}var fileURIPrefix=\"file://\";function isFileURI(filename){return hasPrefix(filename,fileURIPrefix)}var wasmBinaryFile=\"tfjs-backend-wasm.wasm\";if(!isDataURI(wasmBinaryFile)){wasmBinaryFile=locateFile(wasmBinaryFile)}function getBinary(file){try{if(file==wasmBinaryFile&&wasmBinary){return new Uint8Array(wasmBinary)}if(readBinary){return readBinary(file)}else{throw\"both async and sync fetching of the wasm failed\"}}catch(err){abort(err)}}function getBinaryPromise(){if(!wasmBinary&&(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER)){if(typeof fetch===\"function\"&&!isFileURI(wasmBinaryFile)){return fetch(wasmBinaryFile,{credentials:\"same-origin\"}).then(function(response){if(!response[\"ok\"]){throw\"failed to load wasm binary file at '\"+wasmBinaryFile+\"'\"}return response[\"arrayBuffer\"]()}).catch(function(){return getBinary(wasmBinaryFile)})}else{if(readAsync){return new Promise(function(resolve,reject){readAsync(wasmBinaryFile,function(response){resolve(new Uint8Array(response))},reject)})}}}return Promise.resolve().then(function(){return getBinary(wasmBinaryFile)})}function createWasm(){var info={\"a\":asmLibraryArg};function receiveInstance(instance,module){var exports=instance.exports;Module[\"asm\"]=exports;wasmMemory=Module[\"asm\"][\"i\"];updateGlobalBufferAndViews(wasmMemory.buffer);wasmTable=Module[\"asm\"][\"o\"];removeRunDependency(\"wasm-instantiate\")}addRunDependency(\"wasm-instantiate\");function receiveInstantiatedSource(output){receiveInstance(output[\"instance\"])}function instantiateArrayBuffer(receiver){return getBinaryPromise().then(function(binary){return WebAssembly.instantiate(binary,info)}).then(receiver,function(reason){err(\"failed to asynchronously prepare wasm: \"+reason);abort(reason)})}function instantiateAsync(){if(!wasmBinary&&typeof WebAssembly.instantiateStreaming===\"function\"&&!isDataURI(wasmBinaryFile)&&!isFileURI(wasmBinaryFile)&&typeof fetch===\"function\"){return fetch(wasmBinaryFile,{credentials:\"same-origin\"}).then(function(response){var result=WebAssembly.instantiateStreaming(response,info);return result.then(receiveInstantiatedSource,function(reason){err(\"wasm streaming compile failed: \"+reason);err(\"falling back to ArrayBuffer instantiation\");return instantiateArrayBuffer(receiveInstantiatedSource)})})}else{return instantiateArrayBuffer(receiveInstantiatedSource)}}if(Module[\"instantiateWasm\"]){try{var exports=Module[\"instantiateWasm\"](info,receiveInstance);return exports}catch(e){err(\"Module.instantiateWasm callback failed with error: \"+e);return false}}instantiateAsync().catch(readyPromiseReject);return{}}function callRuntimeCallbacks(callbacks){while(callbacks.length>0){var callback=callbacks.shift();if(typeof callback==\"function\"){callback(Module);continue}var func=callback.func;if(typeof func===\"number\"){if(callback.arg===undefined){wasmTable.get(func)()}else{wasmTable.get(func)(callback.arg)}}else{func(callback.arg===undefined?null:callback.arg)}}}function _abort(){abort()}function _emscripten_memcpy_big(dest,src,num){HEAPU8.copyWithin(dest,src,src+num)}function _emscripten_get_heap_size(){return HEAPU8.length}function emscripten_realloc_buffer(size){try{wasmMemory.grow(size-buffer.byteLength+65535>>>16);updateGlobalBufferAndViews(wasmMemory.buffer);return 1}catch(e){}}function _emscripten_resize_heap(requestedSize){var oldSize=_emscripten_get_heap_size();var maxHeapSize=2147483648;if(requestedSize>maxHeapSize){return false}for(var cutDown=1;cutDown<=4;cutDown*=2){var overGrownHeapSize=oldSize*(1+.2/cutDown);overGrownHeapSize=Math.min(overGrownHeapSize,requestedSize+100663296);var newSize=Math.min(maxHeapSize,alignUp(Math.max(requestedSize,overGrownHeapSize),65536));var replacement=emscripten_realloc_buffer(newSize);if(replacement){return true}}return false}var SYSCALLS={mappings:{},buffers:[null,[],[]],printChar:function(stream,curr){var buffer=SYSCALLS.buffers[stream];if(curr===0||curr===10){(stream===1?out:err)(UTF8ArrayToString(buffer,0));buffer.length=0}else{buffer.push(curr)}},varargs:undefined,get:function(){SYSCALLS.varargs+=4;var ret=HEAP32[SYSCALLS.varargs-4>>2];return ret},getStr:function(ptr){var ret=UTF8ToString(ptr);return ret},get64:function(low,high){return low}};function _fd_close(fd){return 0}function _fd_seek(fd,offset_low,offset_high,whence,newOffset){}function _fd_write(fd,iov,iovcnt,pnum){var num=0;for(var i=0;i>2];var len=HEAP32[iov+(i*8+4)>>2];for(var j=0;j>2]=num;return 0}function _pthread_create(){return 6}function setErrNo(value){HEAP32[___errno_location()>>2]=value;return value}function _sysconf(name){switch(name){case 30:return 16384;case 85:var maxHeapSize=2147483648;return maxHeapSize/16384;case 132:case 133:case 12:case 137:case 138:case 15:case 235:case 16:case 17:case 18:case 19:case 20:case 149:case 13:case 10:case 236:case 153:case 9:case 21:case 22:case 159:case 154:case 14:case 77:case 78:case 139:case 82:case 68:case 67:case 164:case 11:case 29:case 47:case 48:case 95:case 52:case 51:case 46:return 200809;case 27:case 246:case 127:case 128:case 23:case 24:case 160:case 161:case 181:case 182:case 242:case 183:case 184:case 243:case 244:case 245:case 165:case 178:case 179:case 49:case 50:case 168:case 169:case 175:case 170:case 171:case 172:case 97:case 76:case 32:case 173:case 35:case 80:case 81:case 79:return-1;case 176:case 177:case 7:case 155:case 8:case 157:case 125:case 126:case 92:case 93:case 129:case 130:case 131:case 94:case 91:return 1;case 74:case 60:case 69:case 70:case 4:return 1024;case 31:case 42:case 72:return 32;case 87:case 26:case 33:return 2147483647;case 34:case 1:return 47839;case 38:case 36:return 99;case 43:case 37:return 2048;case 0:return 2097152;case 3:return 65536;case 28:return 32768;case 44:return 32767;case 75:return 16384;case 39:return 1e3;case 89:return 700;case 71:return 256;case 40:return 255;case 2:return 100;case 180:return 64;case 25:return 20;case 5:return 16;case 6:return 6;case 73:return 4;case 84:{if(typeof navigator===\"object\")return navigator[\"hardwareConcurrency\"]||1;return 1}}setErrNo(28);return-1}var asmLibraryArg={\"a\":_abort,\"d\":_emscripten_memcpy_big,\"e\":_emscripten_resize_heap,\"f\":_fd_close,\"c\":_fd_seek,\"b\":_fd_write,\"g\":_pthread_create,\"h\":_sysconf};var asm=createWasm();var ___wasm_call_ctors=Module[\"___wasm_call_ctors\"]=function(){return(___wasm_call_ctors=Module[\"___wasm_call_ctors\"]=Module[\"asm\"][\"j\"]).apply(null,arguments)};var _init=Module[\"_init\"]=function(){return(_init=Module[\"_init\"]=Module[\"asm\"][\"k\"]).apply(null,arguments)};var _register_tensor=Module[\"_register_tensor\"]=function(){return(_register_tensor=Module[\"_register_tensor\"]=Module[\"asm\"][\"l\"]).apply(null,arguments)};var _dispose_data=Module[\"_dispose_data\"]=function(){return(_dispose_data=Module[\"_dispose_data\"]=Module[\"asm\"][\"m\"]).apply(null,arguments)};var _dispose=Module[\"_dispose\"]=function(){return(_dispose=Module[\"_dispose\"]=Module[\"asm\"][\"n\"]).apply(null,arguments)};var _Abs=Module[\"_Abs\"]=function(){return(_Abs=Module[\"_Abs\"]=Module[\"asm\"][\"p\"]).apply(null,arguments)};var _Add=Module[\"_Add\"]=function(){return(_Add=Module[\"_Add\"]=Module[\"asm\"][\"q\"]).apply(null,arguments)};var _AddN=Module[\"_AddN\"]=function(){return(_AddN=Module[\"_AddN\"]=Module[\"asm\"][\"r\"]).apply(null,arguments)};var _All=Module[\"_All\"]=function(){return(_All=Module[\"_All\"]=Module[\"asm\"][\"s\"]).apply(null,arguments)};var _Any=Module[\"_Any\"]=function(){return(_Any=Module[\"_Any\"]=Module[\"asm\"][\"t\"]).apply(null,arguments)};var _ArgMax=Module[\"_ArgMax\"]=function(){return(_ArgMax=Module[\"_ArgMax\"]=Module[\"asm\"][\"u\"]).apply(null,arguments)};var _AvgPool=Module[\"_AvgPool\"]=function(){return(_AvgPool=Module[\"_AvgPool\"]=Module[\"asm\"][\"v\"]).apply(null,arguments)};var _BatchMatMul=Module[\"_BatchMatMul\"]=function(){return(_BatchMatMul=Module[\"_BatchMatMul\"]=Module[\"asm\"][\"w\"]).apply(null,arguments)};var _Ceil=Module[\"_Ceil\"]=function(){return(_Ceil=Module[\"_Ceil\"]=Module[\"asm\"][\"x\"]).apply(null,arguments)};var _ClipByValue=Module[\"_ClipByValue\"]=function(){return(_ClipByValue=Module[\"_ClipByValue\"]=Module[\"asm\"][\"y\"]).apply(null,arguments)};var _Conv2D=Module[\"_Conv2D\"]=function(){return(_Conv2D=Module[\"_Conv2D\"]=Module[\"asm\"][\"z\"]).apply(null,arguments)};var _Conv2DBackpropInput=Module[\"_Conv2DBackpropInput\"]=function(){return(_Conv2DBackpropInput=Module[\"_Conv2DBackpropInput\"]=Module[\"asm\"][\"A\"]).apply(null,arguments)};var _Cos=Module[\"_Cos\"]=function(){return(_Cos=Module[\"_Cos\"]=Module[\"asm\"][\"B\"]).apply(null,arguments)};var _CropAndResize=Module[\"_CropAndResize\"]=function(){return(_CropAndResize=Module[\"_CropAndResize\"]=Module[\"asm\"][\"C\"]).apply(null,arguments)};var _Cumsum=Module[\"_Cumsum\"]=function(){return(_Cumsum=Module[\"_Cumsum\"]=Module[\"asm\"][\"D\"]).apply(null,arguments)};var _DepthToSpace=Module[\"_DepthToSpace\"]=function(){return(_DepthToSpace=Module[\"_DepthToSpace\"]=Module[\"asm\"][\"E\"]).apply(null,arguments)};var _DepthwiseConv2dNative=Module[\"_DepthwiseConv2dNative\"]=function(){return(_DepthwiseConv2dNative=Module[\"_DepthwiseConv2dNative\"]=Module[\"asm\"][\"F\"]).apply(null,arguments)};var _Equal=Module[\"_Equal\"]=function(){return(_Equal=Module[\"_Equal\"]=Module[\"asm\"][\"G\"]).apply(null,arguments)};var _Exp=Module[\"_Exp\"]=function(){return(_Exp=Module[\"_Exp\"]=Module[\"asm\"][\"H\"]).apply(null,arguments)};var _FlipLeftRight=Module[\"_FlipLeftRight\"]=function(){return(_FlipLeftRight=Module[\"_FlipLeftRight\"]=Module[\"asm\"][\"I\"]).apply(null,arguments)};var _Floor=Module[\"_Floor\"]=function(){return(_Floor=Module[\"_Floor\"]=Module[\"asm\"][\"J\"]).apply(null,arguments)};var _FloorDiv=Module[\"_FloorDiv\"]=function(){return(_FloorDiv=Module[\"_FloorDiv\"]=Module[\"asm\"][\"K\"]).apply(null,arguments)};var _FusedBatchNorm=Module[\"_FusedBatchNorm\"]=function(){return(_FusedBatchNorm=Module[\"_FusedBatchNorm\"]=Module[\"asm\"][\"L\"]).apply(null,arguments)};var _FusedConv2D=Module[\"_FusedConv2D\"]=function(){return(_FusedConv2D=Module[\"_FusedConv2D\"]=Module[\"asm\"][\"M\"]).apply(null,arguments)};var _FusedDepthwiseConv2D=Module[\"_FusedDepthwiseConv2D\"]=function(){return(_FusedDepthwiseConv2D=Module[\"_FusedDepthwiseConv2D\"]=Module[\"asm\"][\"N\"]).apply(null,arguments)};var _Gather=Module[\"_Gather\"]=function(){return(_Gather=Module[\"_Gather\"]=Module[\"asm\"][\"O\"]).apply(null,arguments)};var _GatherNd=Module[\"_GatherNd\"]=function(){return(_GatherNd=Module[\"_GatherNd\"]=Module[\"asm\"][\"P\"]).apply(null,arguments)};var _Greater=Module[\"_Greater\"]=function(){return(_Greater=Module[\"_Greater\"]=Module[\"asm\"][\"Q\"]).apply(null,arguments)};var _GreaterEqual=Module[\"_GreaterEqual\"]=function(){return(_GreaterEqual=Module[\"_GreaterEqual\"]=Module[\"asm\"][\"R\"]).apply(null,arguments)};var _LeakyRelu=Module[\"_LeakyRelu\"]=function(){return(_LeakyRelu=Module[\"_LeakyRelu\"]=Module[\"asm\"][\"S\"]).apply(null,arguments)};var _Less=Module[\"_Less\"]=function(){return(_Less=Module[\"_Less\"]=Module[\"asm\"][\"T\"]).apply(null,arguments)};var _LessEqual=Module[\"_LessEqual\"]=function(){return(_LessEqual=Module[\"_LessEqual\"]=Module[\"asm\"][\"U\"]).apply(null,arguments)};var _Log=Module[\"_Log\"]=function(){return(_Log=Module[\"_Log\"]=Module[\"asm\"][\"V\"]).apply(null,arguments)};var _LogicalAnd=Module[\"_LogicalAnd\"]=function(){return(_LogicalAnd=Module[\"_LogicalAnd\"]=Module[\"asm\"][\"W\"]).apply(null,arguments)};var _Max=Module[\"_Max\"]=function(){return(_Max=Module[\"_Max\"]=Module[\"asm\"][\"X\"]).apply(null,arguments)};var _MaxPool=Module[\"_MaxPool\"]=function(){return(_MaxPool=Module[\"_MaxPool\"]=Module[\"asm\"][\"Y\"]).apply(null,arguments)};var _Maximum=Module[\"_Maximum\"]=function(){return(_Maximum=Module[\"_Maximum\"]=Module[\"asm\"][\"Z\"]).apply(null,arguments)};var _Mean=Module[\"_Mean\"]=function(){return(_Mean=Module[\"_Mean\"]=Module[\"asm\"][\"_\"]).apply(null,arguments)};var _Min=Module[\"_Min\"]=function(){return(_Min=Module[\"_Min\"]=Module[\"asm\"][\"$\"]).apply(null,arguments)};var _Minimum=Module[\"_Minimum\"]=function(){return(_Minimum=Module[\"_Minimum\"]=Module[\"asm\"][\"aa\"]).apply(null,arguments)};var _MirrorPad=Module[\"_MirrorPad\"]=function(){return(_MirrorPad=Module[\"_MirrorPad\"]=Module[\"asm\"][\"ba\"]).apply(null,arguments)};var _Multiply=Module[\"_Multiply\"]=function(){return(_Multiply=Module[\"_Multiply\"]=Module[\"asm\"][\"ca\"]).apply(null,arguments)};var _Neg=Module[\"_Neg\"]=function(){return(_Neg=Module[\"_Neg\"]=Module[\"asm\"][\"da\"]).apply(null,arguments)};var _NonMaxSuppressionV3=Module[\"_NonMaxSuppressionV3\"]=function(){return(_NonMaxSuppressionV3=Module[\"_NonMaxSuppressionV3\"]=Module[\"asm\"][\"ea\"]).apply(null,arguments)};var _NonMaxSuppressionV4=Module[\"_NonMaxSuppressionV4\"]=function(){return(_NonMaxSuppressionV4=Module[\"_NonMaxSuppressionV4\"]=Module[\"asm\"][\"fa\"]).apply(null,arguments)};var _NonMaxSuppressionV5=Module[\"_NonMaxSuppressionV5\"]=function(){return(_NonMaxSuppressionV5=Module[\"_NonMaxSuppressionV5\"]=Module[\"asm\"][\"ga\"]).apply(null,arguments)};var _NotEqual=Module[\"_NotEqual\"]=function(){return(_NotEqual=Module[\"_NotEqual\"]=Module[\"asm\"][\"ha\"]).apply(null,arguments)};var _OneHot=Module[\"_OneHot\"]=function(){return(_OneHot=Module[\"_OneHot\"]=Module[\"asm\"][\"ia\"]).apply(null,arguments)};var _PadV2=Module[\"_PadV2\"]=function(){return(_PadV2=Module[\"_PadV2\"]=Module[\"asm\"][\"ja\"]).apply(null,arguments)};var _Pow=Module[\"_Pow\"]=function(){return(_Pow=Module[\"_Pow\"]=Module[\"asm\"][\"ka\"]).apply(null,arguments)};var _Prelu=Module[\"_Prelu\"]=function(){return(_Prelu=Module[\"_Prelu\"]=Module[\"asm\"][\"la\"]).apply(null,arguments)};var _Prod=Module[\"_Prod\"]=function(){return(_Prod=Module[\"_Prod\"]=Module[\"asm\"][\"ma\"]).apply(null,arguments)};var _RealDiv=Module[\"_RealDiv\"]=function(){return(_RealDiv=Module[\"_RealDiv\"]=Module[\"asm\"][\"na\"]).apply(null,arguments)};var _Relu=Module[\"_Relu\"]=function(){return(_Relu=Module[\"_Relu\"]=Module[\"asm\"][\"oa\"]).apply(null,arguments)};var _Relu6=Module[\"_Relu6\"]=function(){return(_Relu6=Module[\"_Relu6\"]=Module[\"asm\"][\"pa\"]).apply(null,arguments)};var _ResizeBilinear=Module[\"_ResizeBilinear\"]=function(){return(_ResizeBilinear=Module[\"_ResizeBilinear\"]=Module[\"asm\"][\"qa\"]).apply(null,arguments)};var _Reverse=Module[\"_Reverse\"]=function(){return(_Reverse=Module[\"_Reverse\"]=Module[\"asm\"][\"ra\"]).apply(null,arguments)};var _RotateWithOffset=Module[\"_RotateWithOffset\"]=function(){return(_RotateWithOffset=Module[\"_RotateWithOffset\"]=Module[\"asm\"][\"sa\"]).apply(null,arguments)};var _Round=Module[\"_Round\"]=function(){return(_Round=Module[\"_Round\"]=Module[\"asm\"][\"ta\"]).apply(null,arguments)};var _Rsqrt=Module[\"_Rsqrt\"]=function(){return(_Rsqrt=Module[\"_Rsqrt\"]=Module[\"asm\"][\"ua\"]).apply(null,arguments)};var _ScatterNd=Module[\"_ScatterNd\"]=function(){return(_ScatterNd=Module[\"_ScatterNd\"]=Module[\"asm\"][\"va\"]).apply(null,arguments)};var _SelectV2=Module[\"_SelectV2\"]=function(){return(_SelectV2=Module[\"_SelectV2\"]=Module[\"asm\"][\"wa\"]).apply(null,arguments)};var _Sigmoid=Module[\"_Sigmoid\"]=function(){return(_Sigmoid=Module[\"_Sigmoid\"]=Module[\"asm\"][\"xa\"]).apply(null,arguments)};var _Sin=Module[\"_Sin\"]=function(){return(_Sin=Module[\"_Sin\"]=Module[\"asm\"][\"ya\"]).apply(null,arguments)};var _Softmax=Module[\"_Softmax\"]=function(){return(_Softmax=Module[\"_Softmax\"]=Module[\"asm\"][\"za\"]).apply(null,arguments)};var _Sqrt=Module[\"_Sqrt\"]=function(){return(_Sqrt=Module[\"_Sqrt\"]=Module[\"asm\"][\"Aa\"]).apply(null,arguments)};var _Square=Module[\"_Square\"]=function(){return(_Square=Module[\"_Square\"]=Module[\"asm\"][\"Ba\"]).apply(null,arguments)};var _SquaredDifference=Module[\"_SquaredDifference\"]=function(){return(_SquaredDifference=Module[\"_SquaredDifference\"]=Module[\"asm\"][\"Ca\"]).apply(null,arguments)};var _Step=Module[\"_Step\"]=function(){return(_Step=Module[\"_Step\"]=Module[\"asm\"][\"Da\"]).apply(null,arguments)};var _StridedSlice=Module[\"_StridedSlice\"]=function(){return(_StridedSlice=Module[\"_StridedSlice\"]=Module[\"asm\"][\"Ea\"]).apply(null,arguments)};var _Sub=Module[\"_Sub\"]=function(){return(_Sub=Module[\"_Sub\"]=Module[\"asm\"][\"Fa\"]).apply(null,arguments)};var _Sum=Module[\"_Sum\"]=function(){return(_Sum=Module[\"_Sum\"]=Module[\"asm\"][\"Ga\"]).apply(null,arguments)};var _Tan=Module[\"_Tan\"]=function(){return(_Tan=Module[\"_Tan\"]=Module[\"asm\"][\"Ha\"]).apply(null,arguments)};var _Tanh=Module[\"_Tanh\"]=function(){return(_Tanh=Module[\"_Tanh\"]=Module[\"asm\"][\"Ia\"]).apply(null,arguments)};var _Tile=Module[\"_Tile\"]=function(){return(_Tile=Module[\"_Tile\"]=Module[\"asm\"][\"Ja\"]).apply(null,arguments)};var _TopK=Module[\"_TopK\"]=function(){return(_TopK=Module[\"_TopK\"]=Module[\"asm\"][\"Ka\"]).apply(null,arguments)};var _Transform=Module[\"_Transform\"]=function(){return(_Transform=Module[\"_Transform\"]=Module[\"asm\"][\"La\"]).apply(null,arguments)};var _Transpose=Module[\"_Transpose\"]=function(){return(_Transpose=Module[\"_Transpose\"]=Module[\"asm\"][\"Ma\"]).apply(null,arguments)};var __FusedMatMul=Module[\"__FusedMatMul\"]=function(){return(__FusedMatMul=Module[\"__FusedMatMul\"]=Module[\"asm\"][\"Na\"]).apply(null,arguments)};var _malloc=Module[\"_malloc\"]=function(){return(_malloc=Module[\"_malloc\"]=Module[\"asm\"][\"Oa\"]).apply(null,arguments)};var _free=Module[\"_free\"]=function(){return(_free=Module[\"_free\"]=Module[\"asm\"][\"Pa\"]).apply(null,arguments)};var ___errno_location=Module[\"___errno_location\"]=function(){return(___errno_location=Module[\"___errno_location\"]=Module[\"asm\"][\"Qa\"]).apply(null,arguments)};var stackSave=Module[\"stackSave\"]=function(){return(stackSave=Module[\"stackSave\"]=Module[\"asm\"][\"Ra\"]).apply(null,arguments)};var stackRestore=Module[\"stackRestore\"]=function(){return(stackRestore=Module[\"stackRestore\"]=Module[\"asm\"][\"Sa\"]).apply(null,arguments)};var stackAlloc=Module[\"stackAlloc\"]=function(){return(stackAlloc=Module[\"stackAlloc\"]=Module[\"asm\"][\"Ta\"]).apply(null,arguments)};Module[\"cwrap\"]=cwrap;var calledRun;function ExitStatus(status){this.name=\"ExitStatus\";this.message=\"Program terminated with exit(\"+status+\")\";this.status=status}dependenciesFulfilled=function runCaller(){if(!calledRun)run();if(!calledRun)dependenciesFulfilled=runCaller};function run(args){args=args||arguments_;if(runDependencies>0){return}preRun();if(runDependencies>0){return}function doRun(){if(calledRun)return;calledRun=true;Module[\"calledRun\"]=true;if(ABORT)return;initRuntime();preMain();readyPromiseResolve(Module);if(Module[\"onRuntimeInitialized\"])Module[\"onRuntimeInitialized\"]();postRun()}if(Module[\"setStatus\"]){Module[\"setStatus\"](\"Running...\");setTimeout(function(){setTimeout(function(){Module[\"setStatus\"](\"\")},1);doRun()},1)}else{doRun()}}Module[\"run\"]=run;if(Module[\"preInit\"]){if(typeof Module[\"preInit\"]==\"function\")Module[\"preInit\"]=[Module[\"preInit\"]];while(Module[\"preInit\"].length>0){Module[\"preInit\"].pop()()}}run();\n\n\n return WasmBackendModule.ready\n}\n);\n})();\nif (typeof exports === 'object' && typeof module === 'object')\n module.exports = WasmBackendModule;\nelse if (typeof define === 'function' && define['amd'])\n define([], function() { return WasmBackendModule; });\nelse if (typeof exports === 'object')\n exports[\"WasmBackendModule\"] = WasmBackendModule;\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Backend, DataId} from '../tensor';\nimport {BackendValues, DataType} from '../types';\n\nexport const EPSILON_FLOAT32 = 1e-7;\nexport const EPSILON_FLOAT16 = 1e-4;\n\n// Required information for all backends.\nexport interface BackendTimingInfo {\n kernelMs: number|{error: string};\n getExtraProfileInfo?(): string; // a field for additional timing information\n // e.g. packing / unpacking for WebGL backend\n}\n\nexport interface TensorStorage {\n read(dataId: DataId): Promise;\n readSync(dataId: DataId): BackendValues;\n disposeData(dataId: DataId, force?: boolean): boolean;\n write(values: BackendValues, shape: number[], dtype: DataType): DataId;\n move(\n dataId: DataId, values: BackendValues, shape: number[], dtype: DataType,\n refCount: number): void;\n memory(): {unreliable: boolean;}; // Backend-specific information.\n /** Returns number of data ids currently in the storage. */\n numDataIds(): number;\n refCount(dataId: DataId): number;\n}\n\n/** Convenient class for storing tensor-related data. */\nexport class DataStorage {\n private data = new WeakMap();\n private dataIdsCount = 0;\n\n constructor(private backend: KernelBackend, private dataMover: DataMover) {}\n\n get(dataId: DataId) {\n if (!this.data.has(dataId)) {\n this.dataMover.moveData(this.backend, dataId);\n }\n return this.data.get(dataId);\n }\n\n set(dataId: DataId, value: T): void {\n this.dataIdsCount++;\n this.data.set(dataId, value);\n }\n\n has(dataId: DataId): boolean {\n return this.data.has(dataId);\n }\n\n delete(dataId: DataId): boolean {\n this.dataIdsCount--;\n return this.data.delete(dataId);\n }\n\n numDataIds(): number {\n return this.dataIdsCount;\n }\n}\n\nexport interface DataMover {\n /**\n * To be called by backends whenever they see a dataId that they don't own.\n * Upon calling this method, the mover will fetch the tensor from another\n * backend and register it with the current active backend.\n */\n moveData(backend: KernelBackend, dataId: DataId): void;\n}\n\nexport interface BackendTimer {\n // check if backend timer is available\n timerAvailable(): boolean;\n time(f: () => void): Promise;\n}\n\n/**\n * The interface that defines the kernels that should be implemented when\n * adding a new backend. New backends don't need to implement every one of the\n * methods, this can be done gradually (throw an error for unimplemented\n * methods).\n */\nexport class KernelBackend implements TensorStorage, Backend, BackendTimer {\n refCount(dataId: DataId): number {\n return notYetImplemented('refCount');\n }\n incRef(dataId: DataId): void {\n return notYetImplemented('incRef');\n }\n timerAvailable(): boolean {\n return true;\n }\n time(f: () => void): Promise {\n return notYetImplemented('time');\n }\n read(dataId: object): Promise {\n return notYetImplemented('read');\n }\n readSync(dataId: object): BackendValues {\n return notYetImplemented('readSync');\n }\n numDataIds(): number {\n return notYetImplemented('numDataIds');\n }\n disposeData(dataId: object, force?: boolean): boolean {\n return notYetImplemented('disposeData');\n }\n write(values: BackendValues, shape: number[], dtype: DataType): DataId {\n return notYetImplemented('write');\n }\n move(\n dataId: DataId, values: BackendValues, shape: number[], dtype: DataType,\n refCount: number): void {\n return notYetImplemented('move');\n }\n memory(): {unreliable: boolean; reasons?: string[]} {\n return notYetImplemented('memory');\n }\n /** Returns the highest precision for floats in bits (e.g. 16 or 32) */\n floatPrecision(): 16|32 {\n return notYetImplemented('floatPrecision');\n }\n /** Returns the smallest representable number. */\n epsilon(): number {\n return this.floatPrecision() === 32 ? EPSILON_FLOAT32 : EPSILON_FLOAT16;\n }\n dispose(): void {\n return notYetImplemented('dispose');\n }\n}\n\nfunction notYetImplemented(kernelName: string): never {\n throw new Error(\n `'${kernelName}' not yet implemented or not found in the registry. ` +\n `This kernel may not be supported by the tfjs backend you have chosen`);\n}\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, DataTypeMap, FlatVector, NumericDataType, RecursiveArray, TensorLike, TypedArray} from './types';\n\n/**\n * Shuffles the array in-place using Fisher-Yates algorithm.\n *\n * ```js\n * const a = [1, 2, 3, 4, 5];\n * tf.util.shuffle(a);\n * console.log(a);\n * ```\n *\n * @param array The array to shuffle in-place.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\n// tslint:disable-next-line:no-any\nexport function shuffle(array: any[]|Uint32Array|Int32Array|\n Float32Array): void {\n let counter = array.length;\n let temp = 0;\n let index = 0;\n // While there are elements in the array\n while (counter > 0) {\n // Pick a random index\n index = (Math.random() * counter) | 0;\n // Decrease counter by 1\n counter--;\n // And swap the last element with it\n temp = array[counter];\n array[counter] = array[index];\n array[index] = temp;\n }\n}\n\n/**\n * Shuffles two arrays in-place the same way using Fisher-Yates algorithm.\n *\n * ```js\n * const a = [1,2,3,4,5];\n * const b = [11,22,33,44,55];\n * tf.util.shuffleCombo(a, b);\n * console.log(a, b);\n * ```\n *\n * @param array The first array to shuffle in-place.\n * @param array2 The second array to shuffle in-place with the same permutation\n * as the first array.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function shuffleCombo(\n // tslint:disable-next-line:no-any\n array: any[]|Uint32Array|Int32Array|Float32Array,\n // tslint:disable-next-line:no-any\n array2: any[]|Uint32Array|Int32Array|Float32Array): void {\n if (array.length !== array2.length) {\n throw new Error(\n `Array sizes must match to be shuffled together ` +\n `First array length was ${array.length}` +\n `Second array length was ${array2.length}`);\n }\n let counter = array.length;\n let temp, temp2;\n let index = 0;\n // While there are elements in the array\n while (counter > 0) {\n // Pick a random index\n index = (Math.random() * counter) | 0;\n // Decrease counter by 1\n counter--;\n // And swap the last element of each array with it\n temp = array[counter];\n temp2 = array2[counter];\n array[counter] = array[index];\n array2[counter] = array2[index];\n array[index] = temp;\n array2[index] = temp2;\n }\n}\n\n/** Clamps a value to a specified range. */\nexport function clamp(min: number, x: number, max: number): number {\n return Math.max(min, Math.min(x, max));\n}\n\nexport function nearestLargerEven(val: number): number {\n return val % 2 === 0 ? val : val + 1;\n}\n\nexport function sum(arr: number[]): number {\n let sum = 0;\n for (let i = 0; i < arr.length; i++) {\n sum += arr[i];\n }\n return sum;\n}\n\n/**\n * Returns a sample from a uniform [a, b) distribution.\n *\n * @param a The minimum support (inclusive).\n * @param b The maximum support (exclusive).\n * @return A pseudorandom number on the half-open interval [a,b).\n */\nexport function randUniform(a: number, b: number) {\n const r = Math.random();\n return (b * r) + (1 - r) * a;\n}\n\n/** Returns the squared Euclidean distance between two vectors. */\nexport function distSquared(a: FlatVector, b: FlatVector): number {\n let result = 0;\n for (let i = 0; i < a.length; i++) {\n const diff = Number(a[i]) - Number(b[i]);\n result += diff * diff;\n }\n return result;\n}\n\n/**\n * Asserts that the expression is true. Otherwise throws an error with the\n * provided message.\n *\n * ```js\n * const x = 2;\n * tf.util.assert(x === 2, 'x is not 2');\n * ```\n *\n * @param expr The expression to assert (as a boolean).\n * @param msg A function that returns the message to report when throwing an\n * error. We use a function for performance reasons.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function assert(expr: boolean, msg: () => string) {\n if (!expr) {\n throw new Error(typeof msg === 'string' ? msg : msg());\n }\n}\n\nexport function assertShapesMatch(\n shapeA: number[], shapeB: number[], errorMessagePrefix = ''): void {\n assert(\n arraysEqual(shapeA, shapeB),\n () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`);\n}\n\nexport function assertNonNull(a: TensorLike): void {\n assert(\n a != null,\n () => `The input to the tensor constructor must be a non-null value.`);\n}\n\n// NOTE: We explicitly type out what T extends instead of any so that\n// util.flatten on a nested array of number doesn't try to infer T as a\n// number[][], causing us to explicitly type util.flatten().\n/**\n * Flattens an arbitrarily nested array.\n *\n * ```js\n * const a = [[1, 2], [3, 4], [5, [6, [7]]]];\n * const flat = tf.util.flatten(a);\n * console.log(flat);\n * ```\n *\n * @param arr The nested array to flatten.\n * @param result The destination array which holds the elements.\n * @param skipTypedArray If true, avoids flattening the typed arrays. Defaults\n * to false.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function\nflatten|TypedArray>(\n arr: T|RecursiveArray, result: T[] = [], skipTypedArray = false): T[] {\n if (result == null) {\n result = [];\n }\n if (Array.isArray(arr) || isTypedArray(arr) && !skipTypedArray) {\n for (let i = 0; i < arr.length; ++i) {\n flatten(arr[i], result, skipTypedArray);\n }\n } else {\n result.push(arr as T);\n }\n return result;\n}\n\n/**\n * Returns the size (number of elements) of the tensor given its shape.\n *\n * ```js\n * const shape = [3, 4, 2];\n * const size = tf.util.sizeFromShape(shape);\n * console.log(size);\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function sizeFromShape(shape: number[]): number {\n if (shape.length === 0) {\n // Scalar.\n return 1;\n }\n let size = shape[0];\n for (let i = 1; i < shape.length; i++) {\n size *= shape[i];\n }\n return size;\n}\n\nexport function isScalarShape(shape: number[]): boolean {\n return shape.length === 0;\n}\n\nexport function arraysEqual(n1: FlatVector, n2: FlatVector) {\n if (n1 === n2) {\n return true;\n }\n if (n1 == null || n2 == null) {\n return false;\n }\n\n if (n1.length !== n2.length) {\n return false;\n }\n for (let i = 0; i < n1.length; i++) {\n if (n1[i] !== n2[i]) {\n return false;\n }\n }\n return true;\n}\n\nexport function isInt(a: number): boolean {\n return a % 1 === 0;\n}\n\nexport function tanh(x: number): number {\n // tslint:disable-next-line:no-any\n if ((Math as any).tanh != null) {\n // tslint:disable-next-line:no-any\n return (Math as any).tanh(x);\n }\n if (x === Infinity) {\n return 1;\n } else if (x === -Infinity) {\n return -1;\n } else {\n const e2x = Math.exp(2 * x);\n return (e2x - 1) / (e2x + 1);\n }\n}\n\nexport function sizeToSquarishShape(size: number): [number, number] {\n const width = Math.ceil(Math.sqrt(size));\n return [width, Math.ceil(size / width)];\n}\n\n/**\n * Creates a new array with randomized indicies to a given quantity.\n *\n * ```js\n * const randomTen = tf.util.createShuffledIndices(10);\n * console.log(randomTen);\n * ```\n *\n * @param number Quantity of how many shuffled indicies to create.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function createShuffledIndices(n: number): Uint32Array {\n const shuffledIndices = new Uint32Array(n);\n for (let i = 0; i < n; ++i) {\n shuffledIndices[i] = i;\n }\n shuffle(shuffledIndices);\n return shuffledIndices;\n}\n\nexport function rightPad(a: string, size: number): string {\n if (size <= a.length) {\n return a;\n }\n return a + ' '.repeat(size - a.length);\n}\n\nexport function repeatedTry(\n checkFn: () => boolean, delayFn = (counter: number) => 0,\n maxCounter?: number): Promise {\n return new Promise((resolve, reject) => {\n let tryCount = 0;\n\n const tryFn = () => {\n if (checkFn()) {\n resolve();\n return;\n }\n\n tryCount++;\n\n const nextBackoff = delayFn(tryCount);\n\n if (maxCounter != null && tryCount >= maxCounter) {\n reject();\n return;\n }\n setTimeout(tryFn, nextBackoff);\n };\n\n tryFn();\n });\n}\n\n/**\n * Given the full size of the array and a shape that may contain -1 as the\n * implicit dimension, returns the inferred shape where -1 is replaced.\n * E.g. For shape=[2, -1, 3] and size=24, it will return [2, 4, 3].\n *\n * @param shape The shape, which may contain -1 in some dimension.\n * @param size The full size (number of elements) of the array.\n * @return The inferred shape where -1 is replaced with the inferred size.\n */\nexport function inferFromImplicitShape(\n shape: number[], size: number): number[] {\n let shapeProd = 1;\n let implicitIdx = -1;\n\n for (let i = 0; i < shape.length; ++i) {\n if (shape[i] >= 0) {\n shapeProd *= shape[i];\n } else if (shape[i] === -1) {\n if (implicitIdx !== -1) {\n throw Error(\n `Shapes can only have 1 implicit size. ` +\n `Found -1 at dim ${implicitIdx} and dim ${i}`);\n }\n implicitIdx = i;\n } else if (shape[i] < 0) {\n throw Error(`Shapes can not be < 0. Found ${shape[i]} at dim ${i}`);\n }\n }\n\n if (implicitIdx === -1) {\n if (size > 0 && size !== shapeProd) {\n throw Error(`Size(${size}) must match the product of shape ${shape}`);\n }\n return shape;\n }\n\n if (shapeProd === 0) {\n throw Error(\n `Cannot infer the missing size in [${shape}] when ` +\n `there are 0 elements`);\n }\n if (size % shapeProd !== 0) {\n throw Error(\n `The implicit shape can't be a fractional number. ` +\n `Got ${size} / ${shapeProd}`);\n }\n\n const newShape = shape.slice();\n newShape[implicitIdx] = size / shapeProd;\n return newShape;\n}\n\nexport function parseAxisParam(\n axis: number|number[], shape: number[]): number[] {\n const rank = shape.length;\n\n // Normalize input\n axis = axis == null ? shape.map((s, i) => i) : [].concat(axis);\n\n // Check for valid range\n assert(\n axis.every(ax => ax >= -rank && ax < rank),\n () =>\n `All values in axis param must be in range [-${rank}, ${rank}) but ` +\n `got axis ${axis}`);\n\n // Check for only integers\n assert(\n axis.every(ax => isInt(ax)),\n () => `All values in axis param must be integers but ` +\n `got axis ${axis}`);\n\n // Handle negative axis.\n return axis.map(a => a < 0 ? rank + a : a);\n}\n\n/** Reduces the shape by removing all dimensions of shape 1. */\nexport function squeezeShape(shape: number[], axis?: number[]):\n {newShape: number[], keptDims: number[]} {\n const newShape: number[] = [];\n const keptDims: number[] = [];\n const isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0;\n const axes = (axis == null || isEmptyArray) ?\n null :\n parseAxisParam(axis, shape).sort();\n let j = 0;\n for (let i = 0; i < shape.length; ++i) {\n if (axes != null) {\n if (axes[j] === i && shape[i] !== 1) {\n throw new Error(\n `Can't squeeze axis ${i} since its dim '${shape[i]}' is not 1`);\n }\n if ((axes[j] == null || axes[j] > i) && shape[i] === 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n if (axes[j] <= i) {\n j++;\n }\n }\n if (shape[i] !== 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n }\n return {newShape, keptDims};\n}\n\nexport function getTypedArrayFromDType(\n dtype: D, size: number): DataTypeMap[D] {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n } else if (dtype === 'int32') {\n values = new Int32Array(size);\n } else if (dtype === 'bool') {\n values = new Uint8Array(size);\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values as DataTypeMap[D];\n}\n\nexport function getArrayFromDType(\n dtype: D, size: number): DataTypeMap[D] {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n } else if (dtype === 'int32') {\n values = new Int32Array(size);\n } else if (dtype === 'bool') {\n values = new Uint8Array(size);\n } else if (dtype === 'string') {\n values = new Array<'string'>(size);\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values as DataTypeMap[D];\n}\n\nexport function checkConversionForErrors(\n vals: DataTypeMap[D]|number[], dtype: D): void {\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i] as number;\n if (isNaN(num) || !isFinite(num)) {\n throw Error(`A tensor of type ${dtype} being uploaded contains ${num}.`);\n }\n }\n}\n\n/** Returns true if the dtype is valid. */\nexport function isValidDtype(dtype: DataType): boolean {\n return dtype === 'bool' || dtype === 'complex64' || dtype === 'float32' ||\n dtype === 'int32' || dtype === 'string';\n}\n\n/**\n * Returns true if the new type can't encode the old type without loss of\n * precision.\n */\nexport function hasEncodingLoss(oldType: DataType, newType: DataType): boolean {\n if (newType === 'complex64') {\n return false;\n }\n if (newType === 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'int32' && oldType !== 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'bool' && oldType === 'bool') {\n return false;\n }\n return true;\n}\n\nexport function isTypedArray(a: {}): a is Float32Array|Int32Array|Uint8Array {\n return a instanceof Float32Array || a instanceof Int32Array ||\n a instanceof Uint8Array;\n}\n\nexport function bytesPerElement(dtype: DataType): number {\n if (dtype === 'float32' || dtype === 'int32') {\n return 4;\n } else if (dtype === 'complex64') {\n return 8;\n } else if (dtype === 'bool') {\n return 1;\n } else {\n throw new Error(`Unknown dtype ${dtype}`);\n }\n}\n\n/**\n * Returns the approximate number of bytes allocated in the string array - 2\n * bytes per character. Computing the exact bytes for a native string in JS is\n * not possible since it depends on the encoding of the html page that serves\n * the website.\n */\nexport function bytesFromStringArray(arr: Uint8Array[]): number {\n if (arr == null) {\n return 0;\n }\n let bytes = 0;\n arr.forEach(x => bytes += x.length);\n return bytes;\n}\n\n/** Returns true if the value is a string. */\nexport function isString(value: {}): value is string {\n return typeof value === 'string' || value instanceof String;\n}\n\nexport function isBoolean(value: {}): boolean {\n return typeof value === 'boolean';\n}\n\nexport function isNumber(value: {}): boolean {\n return typeof value === 'number';\n}\n\nexport function inferDtype(values: TensorLike): DataType {\n if (Array.isArray(values)) {\n return inferDtype(values[0]);\n }\n if (values instanceof Float32Array) {\n return 'float32';\n } else if (values instanceof Int32Array || values instanceof Uint8Array) {\n return 'int32';\n } else if (isNumber(values)) {\n return 'float32';\n } else if (isString(values)) {\n return 'string';\n } else if (isBoolean(values)) {\n return 'bool';\n }\n return 'float32';\n}\n\nexport function isFunction(f: Function) {\n return !!(f && f.constructor && f.call && f.apply);\n}\n\nexport function nearestDivisor(size: number, start: number): number {\n for (let i = start; i < size; ++i) {\n if (size % i === 0) {\n return i;\n }\n }\n return size;\n}\n\nexport function computeStrides(shape: number[]): number[] {\n const rank = shape.length;\n if (rank < 2) {\n return [];\n }\n\n // Last dimension has implicit stride of 1, thus having D-1 (instead of D)\n // strides.\n const strides = new Array(rank - 1);\n strides[rank - 2] = shape[rank - 1];\n for (let i = rank - 3; i >= 0; --i) {\n strides[i] = strides[i + 1] * shape[i + 1];\n }\n return strides;\n}\n\nfunction createNestedArray(\n offset: number, shape: number[], a: TypedArray, isComplex = false) {\n const ret = new Array();\n if (shape.length === 1) {\n const d = shape[0] * (isComplex ? 2 : 1);\n for (let i = 0; i < d; i++) {\n ret[i] = a[offset + i];\n }\n } else {\n const d = shape[0];\n const rest = shape.slice(1);\n const len = rest.reduce((acc, c) => acc * c) * (isComplex ? 2 : 1);\n for (let i = 0; i < d; i++) {\n ret[i] = createNestedArray(offset + i * len, rest, a, isComplex);\n }\n }\n return ret;\n}\n\n// Provide a nested array of TypedArray in given shape.\nexport function toNestedArray(\n shape: number[], a: TypedArray, isComplex = false) {\n if (shape.length === 0) {\n // Scalar type should return a single number.\n return a[0];\n }\n const size = shape.reduce((acc, c) => acc * c) * (isComplex ? 2 : 1);\n if (size === 0) {\n // A tensor with shape zero should be turned into empty list.\n return [];\n }\n if (size !== a.length) {\n throw new Error(`[${shape}] does not match the input size ${a.length}${\n isComplex ? ' for a complex tensor' : ''}.`);\n }\n\n return createNestedArray(0, shape, a, isComplex);\n}\n\nexport function makeOnesTypedArray(\n size: number, dtype: D): DataTypeMap[D] {\n const array = makeZerosTypedArray(size, dtype);\n for (let i = 0; i < array.length; i++) {\n array[i] = 1;\n }\n return array;\n}\n\nexport function makeZerosTypedArray(\n size: number, dtype: D): DataTypeMap[D] {\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(size) as DataTypeMap[D];\n } else if (dtype === 'int32') {\n return new Int32Array(size) as DataTypeMap[D];\n } else if (dtype === 'bool') {\n return new Uint8Array(size) as DataTypeMap[D];\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n\n/**\n * Make nested `TypedArray` filled with zeros.\n * @param shape The shape information for the nested array.\n * @param dtype dtype of the array element.\n */\nexport function makeZerosNestedTypedArray(\n shape: number[], dtype: D) {\n const size = shape.reduce((prev, curr) => prev * curr, 1);\n if (dtype == null || dtype === 'float32') {\n return toNestedArray(shape, new Float32Array(size));\n } else if (dtype === 'int32') {\n return toNestedArray(shape, new Int32Array(size));\n } else if (dtype === 'bool') {\n return toNestedArray(shape, new Uint8Array(size));\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n\nexport function assertNonNegativeIntegerDimensions(shape: number[]) {\n shape.forEach(dimSize => {\n assert(\n Number.isInteger(dimSize) && dimSize >= 0,\n () =>\n `Tensor must have a shape comprised of positive integers but got ` +\n `shape [${shape}].`);\n });\n}\n\n/**\n * Computes flat index for a given location (multidimentionsal index) in a\n * Tensor/multidimensional array.\n *\n * @param locs Location in the tensor.\n * @param rank Rank of the tensor.\n * @param strides Tensor strides.\n */\nexport function locToIndex(\n locs: number[], rank: number, strides: number[]): number {\n if (rank === 0) {\n return 0;\n } else if (rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += strides[i] * locs[i];\n }\n return index;\n}\n\n/**\n * Computes the location (multidimensional index) in a tensor/multidimentional\n * array for a given flat index.\n *\n * @param index Index in flat array.\n * @param rank Rank of tensor.\n * @param strides Strides of tensor.\n */\nexport function indexToLoc(\n index: number, rank: number, strides: number[]): number[] {\n if (rank === 0) {\n return [];\n } else if (rank === 1) {\n return [index];\n }\n const locs: number[] = new Array(rank);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / strides[i]);\n index -= locs[i] * strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n}\n\n/**\n * This method asserts whether an object is a Promise instance.\n * @param object\n */\n// tslint:disable-next-line: no-any\nexport function isPromise(object: any) {\n // We chose to not use 'obj instanceOf Promise' for two reasons:\n // 1. It only reliably works for es6 Promise, not other Promise\n // implementations.\n // 2. It doesn't work with framework that uses zone.js. zone.js monkey patch\n // the async calls, so it is possible the obj (patched) is comparing to a\n // pre-patched Promise.\n return object && object.then && typeof object.then === 'function';\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Platform} from './platforms/platform';\nimport {isPromise} from './util_base';\n\n// Expects flags from URL in the format ?tfjsflags=FLAG1:1,FLAG2:true.\nconst TENSORFLOWJS_FLAGS_PREFIX = 'tfjsflags';\n\ntype FlagValue = number|boolean;\ntype FlagEvaluationFn = (() => FlagValue)|(() => Promise);\nexport type Flags = {\n [featureName: string]: FlagValue\n};\nexport type FlagRegistryEntry = {\n evaluationFn: FlagEvaluationFn;\n setHook?: (value: FlagValue) => void;\n};\n\n/**\n * The environment contains evaluated flags as well as the registered platform.\n * This is always used as a global singleton and can be retrieved with\n * `tf.env()`.\n *\n * @doc {heading: 'Environment'}\n */\nexport class Environment {\n private flags: Flags = {};\n private flagRegistry: {[flagName: string]: FlagRegistryEntry} = {};\n\n private urlFlags: Flags = {};\n\n platformName: string;\n platform: Platform;\n\n // Jasmine spies on this in 'environment_test.ts'\n getQueryParams = getQueryParams;\n\n // tslint:disable-next-line: no-any\n constructor(public global: any) {\n this.populateURLFlags();\n }\n\n setPlatform(platformName: string, platform: Platform) {\n if (this.platform != null) {\n console.warn(\n `Platform ${this.platformName} has already been set. ` +\n `Overwriting the platform with ${platform}.`);\n }\n this.platformName = platformName;\n this.platform = platform;\n }\n\n registerFlag(\n flagName: string, evaluationFn: FlagEvaluationFn,\n setHook?: (value: FlagValue) => void) {\n this.flagRegistry[flagName] = {evaluationFn, setHook};\n\n // Override the flag value from the URL. This has to happen here because the\n // environment is initialized before flags get registered.\n if (this.urlFlags[flagName] != null) {\n const flagValue = this.urlFlags[flagName];\n console.warn(\n `Setting feature override from URL ${flagName}: ${flagValue}.`);\n this.set(flagName, flagValue);\n }\n }\n\n async getAsync(flagName: string): Promise {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n\n this.flags[flagName] = await this.evaluateFlag(flagName);\n return this.flags[flagName];\n }\n\n get(flagName: string): FlagValue {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n\n const flagValue = this.evaluateFlag(flagName);\n if (isPromise(flagValue)) {\n throw new Error(\n `Flag ${flagName} cannot be synchronously evaluated. ` +\n `Please use getAsync() instead.`);\n }\n\n this.flags[flagName] = flagValue as number | boolean;\n\n return this.flags[flagName];\n }\n\n getNumber(flagName: string): number {\n return this.get(flagName) as number;\n }\n\n getBool(flagName: string): boolean {\n return this.get(flagName) as boolean;\n }\n\n getFlags(): Flags {\n return this.flags;\n }\n // For backwards compatibility.\n get features(): Flags {\n return this.flags;\n }\n\n set(flagName: string, value: FlagValue): void {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(\n `Cannot set flag ${flagName} as it has not been registered.`);\n }\n this.flags[flagName] = value;\n if (this.flagRegistry[flagName].setHook != null) {\n this.flagRegistry[flagName].setHook(value);\n }\n }\n\n private evaluateFlag(flagName: string): FlagValue|Promise {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(\n `Cannot evaluate flag '${flagName}': no evaluation function found.`);\n }\n return this.flagRegistry[flagName].evaluationFn();\n }\n\n setFlags(flags: Flags) {\n this.flags = Object.assign({}, flags);\n }\n\n reset() {\n this.flags = {};\n this.urlFlags = {};\n this.populateURLFlags();\n }\n\n private populateURLFlags(): void {\n if (typeof this.global === 'undefined' ||\n typeof this.global.location === 'undefined' ||\n typeof this.global.location.search === 'undefined') {\n return;\n }\n\n const urlParams = this.getQueryParams(this.global.location.search);\n if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) {\n const keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(',');\n keyValues.forEach(keyValue => {\n const [key, value] = keyValue.split(':') as [string, string];\n this.urlFlags[key] = parseValue(key, value);\n });\n }\n }\n}\n\nexport function getQueryParams(queryString: string): {[key: string]: string} {\n const params = {};\n queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, (s, ...t) => {\n decodeParam(params, t[0], t[1]);\n return t.join('=');\n });\n return params;\n}\n\nfunction decodeParam(\n params: {[key: string]: string}, name: string, value?: string) {\n params[decodeURIComponent(name)] = decodeURIComponent(value || '');\n}\n\nfunction parseValue(flagName: string, value: string): FlagValue {\n value = value.toLowerCase();\n if (value === 'true' || value === 'false') {\n return value === 'true';\n } else if (`${+ value}` === value) {\n return +value;\n }\n throw new Error(\n `Could not parse value flag value ${value} for flag ${flagName}.`);\n}\n\n/**\n * Returns the current environment (a global singleton).\n *\n * The environment object contains the evaluated feature values as well as the\n * active platform.\n *\n * @doc {heading: 'Environment'}\n */\nexport function env() {\n return ENV;\n}\n\nexport let ENV: Environment = null;\nexport function setEnvironmentGlobal(environment: Environment) {\n ENV = environment;\n}\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Note that the identifier globalNameSpace is scoped to this module, but will\n// always resolve to the same global object regardless of how the module is\n// resolved.\n// tslint:disable-next-line:no-any\nlet globalNameSpace: {_tfGlobals: Map};\n// tslint:disable-next-line:no-any\nexport function getGlobalNamespace(): {_tfGlobals: Map} {\n if (globalNameSpace == null) {\n // tslint:disable-next-line:no-any\n let ns: any;\n if (typeof (window) !== 'undefined') {\n ns = window;\n } else if (typeof (global) !== 'undefined') {\n ns = global;\n } else if (typeof (process) !== 'undefined') {\n ns = process;\n } else if (typeof (self) !== 'undefined') {\n ns = self;\n } else {\n throw new Error('Could not find a global object');\n }\n globalNameSpace = ns;\n }\n return globalNameSpace;\n}\n\n// tslint:disable-next-line:no-any\nfunction getGlobalMap(): Map {\n const ns = getGlobalNamespace();\n if (ns._tfGlobals == null) {\n ns._tfGlobals = new Map();\n }\n return ns._tfGlobals;\n}\n\n/**\n * Returns a globally accessible 'singleton' object.\n *\n * @param key the name of the object\n * @param init a function to initialize to initialize this object\n * the first time it is fetched.\n */\nexport function getGlobal(key: string, init: () => T): T {\n const globalMap = getGlobalMap();\n if (globalMap.has(key)) {\n return globalMap.get(key);\n } else {\n const singleton = init();\n globalMap.set(key, singleton);\n return globalMap.get(key);\n }\n}\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Allow UpperCamelCase variable names\n// tslint:disable: variable-name\n// Unfortunately just enabling PascalCase per file (tslint:enable:\n// allow-pascal-case) doesn't work.\nimport {NamedTensorInfoMap, TensorInfo} from './kernel_registry';\nimport {ExplicitPadding} from './ops/conv_util';\nimport {Activation} from './ops/fused_types';\nimport {DataType, PixelData} from './types';\n\nexport const Abs = 'Abs';\nexport type AbsInputs = UnaryInputs;\n\nexport const Acos = 'Acos';\nexport type AcosInputs = UnaryInputs;\n\nexport const Acosh = 'Acosh';\nexport type AcoshInputs = UnaryInputs;\n\nexport const Add = 'Add';\nexport type AddInputs = BinaryInputs;\n\nexport const AddN = 'AddN';\nexport type AddNInputs = TensorInfo[];\n\nexport const All = 'All';\nexport type AllInputs = Pick;\nexport interface AllAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const Any = 'Any';\nexport type AnyInputs = Pick;\nexport interface AnyAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const ArgMax = 'ArgMax';\nexport type ArgMaxInputs = Pick;\nexport interface ArgMaxAttrs {\n axis: number;\n}\n\nexport const ArgMin = 'ArgMin';\nexport type ArgMinInputs = Pick;\nexport interface ArgMinAttrs {\n axis: number;\n}\n\nexport const Asin = 'Asin';\nexport type AsinInputs = UnaryInputs;\n\nexport const Asinh = 'Asinh';\nexport type AsinhInputs = UnaryInputs;\n\nexport const Atan = 'Atan';\nexport type AtanInputs = UnaryInputs;\n\nexport const Atanh = 'Atanh';\nexport type AtanhInputs = UnaryInputs;\n\nexport const Atan2 = 'Atan2';\nexport type Atan2Inputs = BinaryInputs;\n\nexport const AvgPool = 'AvgPool';\nexport type AvgPoolInputs = Pick;\nexport interface AvgPoolAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const AvgPoolGrad = 'AvgPoolGrad';\nexport type AvgPoolGradInputs = Pick;\nexport interface AvgPoolGradAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number;\n}\n\nexport const AvgPool3D = 'AvgPool3D';\nexport type AvgPool3DInputs = Pick;\nexport interface AvgPool3DAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n dataFormat: 'NDHWC'|'NCDHW';\n}\n\nexport const AvgPool3DGrad = 'AvgPool3DGrad';\nexport type AvgPool3DGradInputs = Pick;\nexport interface AvgPool3DGradAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const BatchMatMul = 'BatchMatMul';\nexport type BatchMatMulInputs = Pick;\nexport interface BatchMatMulAttrs {\n transposeA: boolean;\n transposeB: boolean;\n}\n\nexport const BatchToSpaceND = 'BatchToSpaceND';\nexport type BatchToSpaceNDInputs = Pick;\nexport interface BatchToSpaceNDAttrs {\n blockShape: number[];\n crops: number[][];\n}\n\nexport type BinaryInputs = Pick;\n\nexport const Bincount = 'Bincount';\nexport type BincountInputs = Pick;\nexport interface BincountAttrs {\n size: number;\n}\n\nexport const BroadcastTo = 'BroadcastTo';\nexport type BroadcastToInputs = Pick;\nexport interface BroadCastToAttrs {\n shape: number[];\n inputShape: number[]; // for gradient\n}\n\nexport const Cast = 'Cast';\nexport type CastInputs = UnaryInputs;\nexport interface CastAttrs {\n dtype: DataType;\n}\n\nexport const Ceil = 'Ceil';\nexport type CeilInputs = UnaryInputs;\n\nexport const ClipByValue = 'ClipByValue';\nexport type ClipByValueInputs = UnaryInputs;\nexport interface ClipByValueAttrs {\n clipValueMin: number;\n clipValueMax: number;\n}\n\nexport const Complex = 'Complex';\nexport type ComplexInputs = Pick;\n\nexport const ComplexAbs = 'ComplexAbs';\nexport type ComplexAbsInputs = UnaryInputs;\n\nexport const Concat = 'Concat';\nexport type ConcatInputs = TensorInfo[];\nexport interface ConcatAttrs {\n axis: number;\n}\n\nexport const Conv2D = 'Conv2D';\nexport type Conv2DInputs = Pick;\nexport interface Conv2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const Conv2DBackpropFilter = 'Conv2DBackpropFilter';\nexport type Conv2DBackpropFilterInputs = Pick;\nexport interface Conv2DBackpropFilterAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dimRoundingMode?: 'floor'|'round'|'ceil';\n filterShape: [number, number, number, number];\n}\n\nexport const Conv2DBackpropInput = 'Conv2DBackpropInput';\nexport type Conv2DBackpropInputInputs = Pick;\nexport interface Conv2DBackpropInputAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dimRoundingMode?: 'floor'|'round'|'ceil';\n inputShape: [number, number, number, number];\n}\n\nexport const Conv3D = 'Conv3D';\nexport type Conv3DInputs = Pick;\nexport interface Conv3DAttrs {\n strides: [number, number, number]|number;\n pad: 'valid'|'same';\n dataFormat: 'NDHWC'|'NCDHW';\n dilations: [number, number, number]|number;\n}\n\nexport const Conv3DBackpropFilterV2 = 'Conv3DBackpropFilterV2';\nexport type Conv3DBackpropFilterV2Inputs = Pick;\n\nexport interface Conv3DBackpropFilterV2Attrs {\n strides: [number, number, number]|number;\n pad: 'valid'|'same';\n filterShape: [number, number, number, number, number];\n}\n\nexport const Conv3DBackpropInputV2 = 'Conv3DBackpropInputV2';\nexport type Conv3DBackpropInputV2Inputs =\n Pick;\nexport interface Conv3DBackpropInputV2Attrs {\n strides: [number, number, number]|number;\n pad: 'valid'|'same';\n inputShape: [number, number, number, number, number];\n}\n\nexport const Cos = 'Cos';\nexport type CosInputs = UnaryInputs;\n\nexport const Cosh = 'Cosh';\nexport type CoshInputs = UnaryInputs;\n\nexport const Cumsum = 'Cumsum';\nexport type CumsumInputs = Pick;\nexport interface CumsumAttrs {\n axis: number;\n exclusive: boolean;\n reverse: boolean;\n}\n\nexport const CropAndResize = 'CropAndResize';\nexport type CropAndResizeInputs =\n Pick;\nexport interface CropAndResizeAttrs {\n cropSize: [number, number];\n method: 'bilinear'|'nearest';\n extrapolationValue: number;\n}\n\nexport const DenseBincount = 'DenseBincount';\nexport type DenseBincountInputs = Pick;\nexport interface DenseBincountAttrs {\n size: number;\n binaryOutput?: boolean;\n}\n\nexport const DepthToSpace = 'DepthToSpace';\nexport type DepthToSpaceInputs = Pick;\nexport interface DepthToSpaceAttrs {\n blockSize: number;\n dataFormat: 'NHWC'|'NCHW';\n}\n\nexport const DepthwiseConv2dNative = 'DepthwiseConv2dNative';\nexport type DepthwiseConv2dNativeInputs =\n Pick;\nexport interface DepthwiseConv2dNativeAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const DepthwiseConv2dNativeBackpropFilter =\n 'DepthwiseConv2dNativeBackpropFilter';\nexport type DepthwiseConv2dNativeBackpropFilterInputs =\n Pick;\nexport interface DepthwiseConv2dNativeBackpropFilterAttrs {\n strides: [number, number]|number;\n dilations: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n filterShape: [number, number, number, number];\n}\n\nexport const DepthwiseConv2dNativeBackpropInput =\n 'DepthwiseConv2dNativeBackpropInput';\nexport type DepthwiseConv2dNativeBackpropInputInputs =\n Pick;\nexport interface DepthwiseConv2dNativeBackpropInputAttrs {\n strides: [number, number]|number;\n dilations: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n inputShape: [number, number, number, number];\n}\n\nexport const Diag = 'Diag';\nexport type DiagInputs = Pick;\n\nexport const Dilation2D = 'Dilation2D';\nexport type Dilation2DInputs = Pick;\nexport interface Dilation2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number;\n dilations: [number, number]|number;\n}\n\nexport const Dilation2DBackpropInput = 'Dilation2DBackpropInput';\nexport type Dilation2DBackpropInputInputs =\n Pick;\n\nexport const Dilation2DBackpropFilter = 'Dilation2DBackpropFilter';\nexport type Dilation2DBackpropFilterInputs =\n Pick;\n\nexport const RealDiv = 'RealDiv';\nexport type RealDivInputs = BinaryInputs;\n\nexport const Einsum = 'Einsum';\nexport type EinsumInputs = TensorInfo[];\nexport interface EinsumAttrs {\n equation: string;\n}\n\nexport const Elu = 'Elu';\nexport type EluInputs = Pick;\n\nexport const EluGrad = 'EluGrad';\nexport type EluGradInputs = Pick;\n\nexport const Erf = 'Erf';\nexport type ErfInputs = UnaryInputs;\n\nexport const Equal = 'Equal';\nexport type EqualInputs = BinaryInputs;\n\nexport const Exp = 'Exp';\nexport type ExpInputs = UnaryInputs;\n\nexport const ExpandDims = 'ExpandDims';\nexport type ExpandDimsInputs = Pick;\nexport interface ExpandDimsAttrs {\n dim: number;\n}\n\nexport const Expm1 = 'Expm1';\nexport type Expm1Inputs = UnaryInputs;\n\nexport const FFT = 'FFT';\nexport type FFTInputs = Pick;\n\nexport const Fill = 'Fill';\nexport interface FillAttrs {\n shape: number[];\n value: number|string;\n dtype: DataType;\n}\n\nexport const FlipLeftRight = 'FlipLeftRight';\nexport type FlipLeftRightInputs = Pick;\n\nexport const Floor = 'Floor';\nexport type FloorInputs = UnaryInputs;\n\nexport const FloorDiv = 'FloorDiv';\nexport type FloorDivInputs = BinaryInputs;\n\nexport const FusedBatchNorm = 'FusedBatchNorm';\nexport type FusedBatchNormInputs =\n Pick;\nexport interface FusedBatchNormAttrs {\n varianceEpsilon: number;\n}\n\nexport const GatherV2 = 'GatherV2';\nexport type GatherV2Inputs = Pick;\nexport interface GatherV2Attrs {\n axis: number;\n batchDims: number;\n}\n\nexport const GatherNd = 'GatherNd';\nexport type GatherNdInputs = Pick;\n\nexport const Greater = 'Greater';\nexport type GreaterInputs = BinaryInputs;\n\nexport const GreaterEqual = 'GreaterEqual';\nexport type GreaterEqualInputs = BinaryInputs;\n\nexport const Identity = 'Identity';\nexport type IdentityInputs = Pick;\n\nexport const IFFT = 'IFFT';\nexport type IFFTInputs = Pick;\n\nexport const Imag = 'Imag';\nexport type ImagInputs = Pick;\n\nexport const IsFinite = 'IsFinite';\nexport type IsFiniteInputs = UnaryInputs;\n\nexport const IsInf = 'IsInf';\nexport type IsInfInputs = UnaryInputs;\n\nexport const IsNan = 'IsNan';\nexport type IsNanInputs = UnaryInputs;\n\nexport const LeakyRelu = 'LeakyRelu';\nexport type LeakyReluInputs = Pick;\nexport interface LeakyReluAttrs {\n alpha: number;\n}\n\nexport const Less = 'Less';\nexport type LessInputs = BinaryInputs;\n\nexport const LessEqual = 'LessEqual';\nexport type LessEqualInputs = BinaryInputs;\n\nexport const LinSpace = 'LinSpace';\nexport interface LinSpaceAttrs {\n start: number;\n stop: number;\n num: number;\n}\nexport const Log = 'Log';\nexport type LogInputs = UnaryInputs;\n\nexport const Log1p = 'Log1p';\nexport type Log1pInputs = UnaryInputs;\n\nexport const LogicalAnd = 'LogicalAnd';\nexport type LogicalAndInputs = BinaryInputs;\n\nexport const LogicalNot = 'LogicalNot';\nexport type LogicalNotInputs = Pick;\n\nexport const LogicalOr = 'LogicalOr';\nexport type LogicalOrInputs = BinaryInputs;\n\nexport const LogSoftmax = 'LogSoftmax';\nexport type LogSoftmaxInputs = Pick;\nexport interface LogSoftmaxAttrs {\n axis: number;\n}\n\nexport const LRN = 'LRN';\nexport type LRNInputs = Pick;\nexport interface LRNAttrs {\n depthRadius: number;\n bias: number;\n alpha: number;\n beta: number;\n}\n\nexport const LRNGrad = 'LRNGrad';\nexport type LRNGradInputs = Pick;\nexport interface LRNGradAttrs {\n depthRadius: number;\n bias: number;\n alpha: number;\n beta: number;\n}\n\nexport const Max = 'Max';\nexport type MaxInputs = Pick;\nexport interface MaxAttrs {\n reductionIndices: number|number[];\n keepDims: boolean;\n}\n\nexport const Maximum = 'Maximum';\nexport type MaximumInputs = BinaryInputs;\n\nexport const MaxPool = 'MaxPool';\nexport type MaxPoolInputs = Pick;\nexport interface MaxPoolAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPoolGrad = 'MaxPoolGrad';\nexport type MaxPoolGradInputs = Pick;\nexport interface MaxPoolGradAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPool3D = 'MaxPool3D';\nexport type MaxPool3DInputs = Pick;\nexport interface MaxPool3DAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dataFormat: 'NDHWC'|'NCDHW';\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPool3DGrad = 'MaxPool3DGrad';\nexport type MaxPool3DGradInputs =\n Pick;\nexport interface MaxPool3DGradAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPoolWithArgmax = 'MaxPoolWithArgmax';\nexport type MaxPoolWithArgmaxInputs = Pick;\nexport interface MaxPoolWithArgmaxAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number;\n includeBatchInIndex: boolean;\n}\n\nexport const Mean = 'Mean';\nexport type MeanInputs = Pick;\nexport interface MeanAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const Min = 'Min';\nexport type MinInputs = Pick;\nexport interface MinAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const Minimum = 'Minimum';\nexport type MinimumInputs = BinaryInputs;\n\nexport const MirrorPad = 'MirrorPad';\nexport type MirrorPadInputs = Pick;\nexport interface MirrorPadAttrs {\n paddings: Array<[number, number]>;\n mode: 'reflect'|'symmetric';\n}\n\nexport const Mod = 'Mod';\nexport type ModInputs = BinaryInputs;\n\nexport const Multinomial = 'Multinomial';\nexport type MultinomialInputs = Pick;\nexport interface MultinomialAttrs {\n numSamples: number;\n seed: number;\n normalized: boolean;\n}\n\nexport const Multiply = 'Multiply';\nexport type MultiplyInputs = BinaryInputs;\n\nexport const Neg = 'Neg';\nexport type NegInputs = UnaryInputs;\n\nexport const NotEqual = 'NotEqual';\nexport type NotEqualInputs = BinaryInputs;\n\nexport const NonMaxSuppressionV3 = 'NonMaxSuppressionV3';\nexport type NonMaxSuppressionV3Inputs =\n Pick;\nexport interface NonMaxSuppressionV3Attrs {\n maxOutputSize: number;\n iouThreshold: number;\n scoreThreshold: number;\n}\n\nexport const NonMaxSuppressionV4 = 'NonMaxSuppressionV4';\nexport type NonMaxSuppressionV4Inputs =\n Pick;\nexport interface NonMaxSuppressionV4Attrs {\n maxOutputSize: number;\n iouThreshold: number;\n scoreThreshold: number;\n padToMaxOutputSize: boolean;\n}\n\nexport const NonMaxSuppressionV5 = 'NonMaxSuppressionV5';\nexport type NonMaxSuppressionV5Inputs =\n Pick;\nexport interface NonMaxSuppressionV5Attrs {\n maxOutputSize: number;\n iouThreshold: number;\n scoreThreshold: number;\n softNmsSigma: number;\n}\n\nexport const OnesLike = 'OnesLike';\nexport type OnesLikeInputs = UnaryInputs;\n\nexport const OneHot = 'OneHot';\nexport type OneHotInputs = Pick;\nexport interface OneHotAttrs {\n depth: number;\n onValue: number;\n offValue: number;\n}\n\nexport const Pack = 'Pack';\nexport type PackInputs = TensorInfo[];\nexport interface PackAttrs {\n axis: number;\n}\n\nexport const PadV2 = 'PadV2';\nexport type PadV2Inputs = Pick;\nexport interface PadV2Attrs {\n paddings: Array<[number, number]>;\n constantValue: number;\n}\n\nexport const Pool = 'Pool';\nexport type PoolInputs = Pick;\n\nexport const Pow = 'Pow';\nexport type PowInputs = BinaryInputs;\n\nexport const Prelu = 'Prelu';\nexport type PreluInputs = Pick;\n\nexport const Prod = 'Prod';\nexport type ProdInputs = Pick;\nexport interface ProdAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const Range = 'Range';\nexport interface RangeAttrs {\n start: number;\n stop: number;\n step: number;\n dtype: 'float32'|'int32';\n}\n\nexport const Real = 'Real';\nexport type RealInputs = Pick;\n\nexport const Reciprocal = 'Reciprocal';\nexport type ReciprocalInputs = UnaryInputs;\n\nexport const Relu = 'Relu';\nexport type ReluInputs = Pick;\n\nexport const Reshape = 'Reshape';\nexport type ReshapeInputs = Pick;\nexport interface ReshapeAttrs {\n shape: number[];\n}\n\nexport const ResizeNearestNeighbor = 'ResizeNearestNeighbor';\nexport type ResizeNearestNeighborInputs = Pick;\nexport interface ResizeNearestNeighborAttrs {\n alignCorners: boolean;\n halfPixelCenters: boolean;\n size: [number, number];\n}\n\nexport const ResizeNearestNeighborGrad = 'ResizeNearestNeighborGrad';\nexport type ResizeNearestNeighborGradInputs =\n Pick;\nexport type ResizeNearestNeighborGradAttrs = ResizeNearestNeighborAttrs;\n\nexport const ResizeBilinear = 'ResizeBilinear';\nexport type ResizeBilinearInputs = Pick;\nexport interface ResizeBilinearAttrs {\n alignCorners: boolean;\n halfPixelCenters: boolean;\n size: [number, number];\n}\n\nexport const ResizeBilinearGrad = 'ResizeBilinearGrad';\nexport type ResizeBilinearGradInputs = Pick;\nexport type ResizeBilinearGradAttrs = ResizeBilinearAttrs;\n\nexport const Relu6 = 'Relu6';\nexport type Relu6Inputs = Pick;\n\nexport const Reverse = 'Reverse';\nexport type ReverseInputs = Pick;\nexport interface ReverseAttrs {\n dims: number|number[];\n}\n\nexport const Round = 'Round';\nexport type RoundInputs = UnaryInputs;\n\nexport const Rsqrt = 'Rsqrt';\nexport type RsqrtInputs = UnaryInputs;\n\nexport const ScatterNd = 'ScatterNd';\nexport type ScatterNdInputs = Pick;\nexport interface ScatterNdAttrs {\n shape: number[];\n}\n\nexport const Select = 'Select';\nexport type SelectInputs = Pick;\n\nexport const Selu = 'Selu';\nexport type SeluInputs = Pick;\n\nexport const Slice = 'Slice';\nexport type SliceInputs = Pick;\nexport interface SliceAttrs {\n begin: number|number[];\n size: number|number[];\n}\nexport const Sin = 'Sin';\nexport type SinInputs = UnaryInputs;\n\nexport const Sinh = 'Sinh';\nexport type SinhInputs = UnaryInputs;\n\nexport const Sign = 'Sign';\nexport type SignInputs = UnaryInputs;\n\nexport const Sigmoid = 'Sigmoid';\nexport type SigmoidInputs = UnaryInputs;\n\nexport const Softplus = 'Softplus';\nexport type SoftplusInputs = UnaryInputs;\n\nexport const Sqrt = 'Sqrt';\nexport type SqrtInputs = UnaryInputs;\n\nexport const Sum = 'Sum';\nexport type SumInputs = Pick;\nexport interface SumAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const SpaceToBatchND = 'SpaceToBatchND';\nexport type SpaceToBatchNDInputs = Pick;\nexport interface SpaceToBatchNDAttrs {\n blockShape: number[];\n paddings: number[][];\n}\n\nexport const SplitV = 'SplitV';\nexport type SplitVInputs = Pick;\nexport interface SplitVAttrs {\n numOrSizeSplits: number[]|number;\n axis: number;\n}\n\nexport const Softmax = 'Softmax';\nexport type SoftmaxInputs = Pick;\nexport interface SoftmaxAttrs {\n dim: number;\n}\n\nexport const SparseFillEmptyRows = 'SparseFillEmptyRows';\nexport type SparseFillEmptyRowsInputs =\n Pick;\n\nexport const SparseReshape = 'SparseReshape';\nexport type SparseReshapeInputs =\n Pick;\n\nexport const SparseToDense = 'SparseToDense';\nexport type SparseToDenseInputs =\n Pick;\nexport interface SparseToDenseAttrs {\n outputShape: number[];\n}\n\nexport const SquaredDifference = 'SquaredDifference';\nexport type SquaredDifferenceInputs = BinaryInputs;\n\nexport const Square = 'Square';\nexport type SquareInputs = Pick;\n\nexport const StridedSlice = 'StridedSlice';\nexport type StridedSliceInputs = Pick;\nexport interface StridedSliceAttrs {\n begin: number[];\n end: number[];\n strides: number[];\n beginMask: number;\n endMask: number;\n ellipsisMask: number;\n newAxisMask: number;\n shrinkAxisMask: number;\n}\n\nexport const Sub = 'Sub';\nexport type SubInputs = BinaryInputs;\n\nexport const Tan = 'Tan';\nexport type TanInputs = UnaryInputs;\n\nexport const Tanh = 'Tanh';\nexport type TanhInputs = UnaryInputs;\n\nexport const Tile = 'Tile';\nexport type TileInputs = Pick;\nexport interface TileAttrs {\n reps: number[];\n}\n\nexport const TopK = 'TopK';\nexport type TopKInputs = Pick;\nexport interface TopKAttrs {\n k: number;\n sorted: boolean;\n}\n\nexport const Transform = 'Transform';\nexport type TransformInputs = Pick;\nexport interface TransformAttrs {\n interpolation: 'nearest'|'bilinear';\n fillMode: 'constant'|'reflect'|'wrap'|'nearest';\n fillValue: number;\n outputShape?: [number, number];\n}\n\nexport const Transpose = 'Transpose';\nexport type TransposeInputs = Pick;\nexport interface TransposeAttrs {\n perm: number[];\n}\n\nexport const Unique = 'Unique';\nexport type UniqueInputs = Pick;\nexport interface UniqueAttrs {\n axis: number;\n}\n\nexport type UnaryInputs = Pick;\n\nexport const Unpack = 'Unpack';\nexport type UnpackInputs = Pick;\nexport interface UnpackAttrs {\n axis: number;\n}\n\nexport const UnsortedSegmentSum = 'UnsortedSegmentSum';\nexport type UnsortedSegmentSumInputs =\n Pick;\nexport interface UnsortedSegmentSumAttrs {\n numSegments: number;\n}\n\nexport const ZerosLike = 'ZerosLike';\nexport type ZerosLikeInputs = UnaryInputs;\n\n/**\n * TensorFlow.js-only kernels\n */\nexport const Step = 'Step';\nexport type StepInputs = UnaryInputs;\nexport interface StepAttrs {\n alpha: number;\n}\n\nexport const FromPixels = 'FromPixels';\nexport interface FromPixelsInputs {\n pixels: PixelData|ImageData|HTMLImageElement|HTMLCanvasElement|\n HTMLVideoElement|ImageBitmap;\n}\nexport interface FromPixelsAttrs {\n numChannels: number;\n}\n\nexport const RotateWithOffset = 'RotateWithOffset';\nexport type RotateWithOffsetInputs = Pick;\nexport interface RotateWithOffsetAttrs {\n radians: number;\n fillValue: number|[number, number, number];\n center: number|[number, number];\n}\n\nexport const _FusedMatMul = '_FusedMatMul';\n// tslint:disable-next-line: class-name\nexport interface _FusedMatMulInputs extends NamedTensorInfoMap {\n a: TensorInfo;\n b: TensorInfo;\n bias?: TensorInfo;\n preluActivationWeights?: TensorInfo;\n}\n// tslint:disable-next-line: class-name\nexport interface _FusedMatMulAttrs {\n transposeA: boolean;\n transposeB: boolean;\n activation: Activation;\n leakyreluAlpha?: number;\n}\n\nexport const FusedConv2D = 'FusedConv2D';\nexport interface FusedConv2DInputs extends NamedTensorInfoMap {\n x: TensorInfo;\n filter: TensorInfo;\n bias?: TensorInfo;\n preluActivationWeights?: TensorInfo;\n}\nexport interface FusedConv2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode: 'floor'|'round'|'ceil';\n activation: Activation;\n leakyreluAlpha?: number;\n}\n\nexport const FusedDepthwiseConv2D = 'FusedDepthwiseConv2D';\nexport interface FusedDepthwiseConv2DInputs extends NamedTensorInfoMap {\n x: TensorInfo;\n filter: TensorInfo;\n bias?: TensorInfo;\n preluActivationWeights?: TensorInfo;\n}\nexport interface FusedDepthwiseConv2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode: 'floor'|'round'|'ceil';\n activation: Activation;\n leakyreluAlpha?: number;\n}\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {env} from './environment';\n\nimport {getGlobal} from './global_util';\nimport {NamedGradientMap} from './tape';\nimport {Tensor} from './tensor';\nimport {DataType, RecursiveArray} from './types';\n\nconst kernelRegistry =\n getGlobal('kernelRegistry', () => new Map());\nconst gradRegistry =\n getGlobal('gradRegistry', () => new Map());\n\nexport type DataId = object;\n\ntype AttributeValue =\n number|number[]|boolean|boolean[]|string|string[]|NamedAttrMap;\n\n/** These are extra non-tensor/primitive params passed to kernel functions. */\nexport type Attribute = AttributeValue|RecursiveArray;\n\n/** Specifies the code to run when executing a kernel. */\nexport type KernelFunc = (params: {\n inputs: NamedTensorInfoMap,\n backend: {},\n attrs?: NamedAttrMap,\n}) => TensorInfo|TensorInfo[];\n\n/** The function to run when computing a gradient during backprop. */\nexport type GradFunc =\n (dy: Tensor|Tensor[], saved: Tensor[], attrs: NamedAttrMap) =>\n NamedGradientMap;\n\n/** Function that gets called after the backend initializes. */\nexport type KernelSetupFunc = (backend: {}) => void;\n/** Function that gets called right before the backend is disposed. */\nexport type KernelDisposeFunc = KernelSetupFunc;\n\n/** Config object for registering a kernel in the global registry. */\nexport interface KernelConfig {\n kernelName: string;\n backendName: string;\n kernelFunc: KernelFunc;\n setupFunc?: KernelSetupFunc;\n disposeFunc?: KernelDisposeFunc;\n}\n\n/** Config object for registering a gradient in the global registry. */\nexport interface GradConfig {\n kernelName: string;\n inputsToSave?: string[];\n // When saveAllInputs is true, all inputs will be saved. Only use this flag\n // if inputs is an array of Tensors.\n saveAllInputs?: boolean;\n outputsToSave?: boolean[];\n gradFunc: GradFunc;\n}\n\n/** Holds metadata for a given tensor. */\nexport interface TensorInfo {\n dataId: DataId;\n shape: number[];\n dtype: DataType;\n}\n\nexport interface NamedTensorInfoMap {\n [name: string]: TensorInfo;\n}\n\nexport interface NamedAttrMap {\n [name: string]: Attribute;\n}\n\n/**\n * Returns the kernel function (code) associated with the provided names.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n */\nexport function getKernel(\n kernelName: string, backendName: string): KernelConfig {\n const key = makeKey(kernelName, backendName);\n return kernelRegistry.get(key);\n}\n\n/**\n * Returns the registered gradient info associated with the provided kernel.\n * @param kernelName The official TF kernel name.\n */\nexport function getGradient(kernelName: string): GradConfig {\n return gradRegistry.get(kernelName);\n}\n\nexport function getKernelsForBackend(backendName: string): KernelConfig[] {\n const it = kernelRegistry.entries();\n const result: KernelConfig[] = [];\n\n while (true) {\n const {done, value} = it.next();\n if (done) {\n break;\n }\n const [key, config] = value;\n const [backend, ] = key.split('_');\n if (backend === backendName) {\n result.push(config);\n }\n }\n return result;\n}\n\n/**\n * Registers the function (forward pass) for the kernel in a global registry.\n *\n * @param config A config object with the following properties:\n * - `kernelName` The official name of the kernel.\n * - `backendName` The official name of the backend.\n * - `kernelFunc` The function to run during the forward pass of the kernel.\n * - `setupFunc` Optional. Gets called once, after the backend initializes.\n * - `disposeFunc` Optional. Gets called once, right before the backend is\n * disposed.\n */\nexport function registerKernel(config: KernelConfig) {\n const {kernelName, backendName} = config;\n const key = makeKey(kernelName, backendName);\n if (kernelRegistry.has(key)) {\n console.warn(\n `The kernel '${kernelName}' for backend ` +\n `'${backendName}' is already registered`);\n }\n kernelRegistry.set(key, config);\n}\n\n/**\n * Registers a gradient function for a given kernel in the global registry,\n * to be used during the back-propagation of that kernel.\n *\n * @param config An object with the following properties:\n * - `kernelName` The name of the kernel that the gradient function is for.\n * - `gradFunc` The function to run during back-propagation.\n */\nexport function registerGradient(config: GradConfig) {\n const {kernelName} = config;\n\n if (gradRegistry.has(kernelName)) {\n // TODO (yassogba) after 3.0 assess whether we need to keep this gated\n // to debug mode.\n if (env().getBool('DEBUG')) {\n console.warn(`Overriding the gradient for '${kernelName}'`);\n }\n }\n gradRegistry.set(kernelName, config);\n}\n\n/**\n * Removes the kernel function from the registry.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n *\n */\nexport function unregisterKernel(\n kernelName: string, backendName: string): void {\n const key = makeKey(kernelName, backendName);\n if (!kernelRegistry.has(key)) {\n throw new Error(\n `The kernel '${kernelName}' for backend ` +\n `'${backendName}' is not registered`);\n }\n kernelRegistry.delete(key);\n}\n\n/** Removes the registered gradient from the global registry. */\nexport function unregisterGradient(kernelName: string): void {\n if (!gradRegistry.has(kernelName)) {\n throw new Error(\n `The gradient '${kernelName}' for backend is not registered`);\n }\n gradRegistry.delete(kernelName);\n}\n\n/**\n * Finds kernels that have already been registered to a backend and re-registers\n * them for a new backend. Useful for registering custom backends.\n * @param registeredBackendName Already registered backend.\n * @param newBackendName New backend.\n */\nexport function copyRegisteredKernels(\n registeredBackendName: string, newBackendName: string): void {\n const kernels = getKernelsForBackend(registeredBackendName);\n kernels.forEach(kernelConfig => {\n const newKernelConfig =\n Object.assign({}, kernelConfig, {backendName: newBackendName});\n registerKernel(newKernelConfig);\n });\n}\n\nfunction makeKey(kernelName: string, backendName: string) {\n return `${backendName}_${kernelName}`;\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from './environment';\nimport {BackendValues, DataType, TensorLike, TypedArray} from './types';\nimport * as base from './util_base';\nexport * from './util_base';\n\n/**\n * Create typed array for scalar value. Used for storing in `DataStorage`.\n */\nexport function createScalarValue(\n value: DataType, dtype: DataType): BackendValues {\n if (dtype === 'string') {\n return encodeString(value);\n }\n\n return toTypedArray([value], dtype);\n}\n\nfunction noConversionNeeded(a: TensorLike, dtype: DataType): boolean {\n return (a instanceof Float32Array && dtype === 'float32') ||\n (a instanceof Int32Array && dtype === 'int32') ||\n (a instanceof Uint8Array && dtype === 'bool');\n}\n\nexport function toTypedArray(a: TensorLike, dtype: DataType): TypedArray {\n if (dtype === 'string') {\n throw new Error('Cannot convert a string[] to a TypedArray');\n }\n if (Array.isArray(a)) {\n a = base.flatten(a);\n }\n\n if (env().getBool('DEBUG')) {\n base.checkConversionForErrors(a as number[], dtype);\n }\n if (noConversionNeeded(a, dtype)) {\n return a as TypedArray;\n }\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(a as number[]);\n } else if (dtype === 'int32') {\n return new Int32Array(a as number[]);\n } else if (dtype === 'bool') {\n const bool = new Uint8Array((a as number[]).length);\n for (let i = 0; i < bool.length; ++i) {\n if (Math.round((a as number[])[i]) !== 0) {\n bool[i] = 1;\n }\n }\n return bool;\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n\n/**\n * Returns the current high-resolution time in milliseconds relative to an\n * arbitrary time in the past. It works across different platforms (node.js,\n * browsers).\n *\n * ```js\n * console.log(tf.util.now());\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function now(): number {\n return env().platform.now();\n}\n\n/**\n * Returns a platform-specific implementation of\n * [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n *\n * If `fetch` is defined on the global object (`window`, `process`, etc.),\n * `tf.util.fetch` returns that function.\n *\n * If not, `tf.util.fetch` returns a platform-specific solution.\n *\n * ```js\n * const resource = await tf.util.fetch('https://unpkg.com/@tensorflow/tfjs');\n * // handle response\n * ```\n *\n * @doc {heading: 'Util'}\n */\nexport function fetch(\n path: string, requestInits?: RequestInit): Promise {\n return env().platform.fetch(path, requestInits);\n}\n\n/**\n * Encodes the provided string into bytes using the provided encoding scheme.\n *\n * @param s The string to encode.\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function encodeString(s: string, encoding = 'utf-8'): Uint8Array {\n encoding = encoding || 'utf-8';\n return env().platform.encode(s, encoding);\n}\n\n/**\n * Decodes the provided bytes into a string using the provided encoding scheme.\n * @param bytes The bytes to decode.\n *\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function decodeString(bytes: Uint8Array, encoding = 'utf-8'): string {\n encoding = encoding || 'utf-8';\n return env().platform.decode(bytes, encoding);\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BackendTimer, BackendTimingInfo} from './backends/backend';\nimport {env} from './environment';\nimport {Tensor} from './tensor';\nimport {NamedTensorMap} from './tensor_types';\nimport {DataType, DataTypeMap, TypedArray} from './types';\nimport * as util from './util';\n\nexport type KernelProfile = {\n kernelName: string,\n outputs: Tensor[],\n inputs: NamedTensorMap,\n timeMs: Promise,\n extraInfo: Promise\n};\n\nexport class Profiler {\n constructor(private backendTimer: BackendTimer, private logger?: Logger) {\n if (logger == null) {\n this.logger = new Logger();\n }\n }\n\n profileKernel(kernelName: string, inputs: NamedTensorMap, f: () => Tensor[]):\n KernelProfile {\n let outputs: Tensor[];\n const holdResultWrapperFn = () => {\n outputs = f();\n };\n let timer: Promise;\n const start = util.now();\n if (this.backendTimer.timerAvailable()) {\n timer = this.backendTimer.time(holdResultWrapperFn);\n } else {\n holdResultWrapperFn();\n for (const output of outputs) {\n output.dataSync();\n }\n timer = Promise.resolve({kernelMs: util.now() - start});\n }\n if (env().getBool('CHECK_COMPUTATION_FOR_ERRORS')) {\n for (let i = 0; i < outputs.length; i++) {\n const output = outputs[i];\n // Dangling promise here because we don't want to propagate up\n // asynchronicity.\n output.data().then(tensorVals => {\n checkComputationForErrors(tensorVals, output.dtype, kernelName);\n });\n }\n }\n\n const kernelProfile = {\n kernelName,\n outputs,\n inputs,\n timeMs: timer.then(timing => timing.kernelMs),\n extraInfo: timer.then(\n timing => timing.getExtraProfileInfo != null ?\n timing.getExtraProfileInfo() :\n '')\n };\n return kernelProfile;\n }\n\n logKernelProfile(kernelProfile: KernelProfile): void {\n const {kernelName, outputs, timeMs, inputs, extraInfo} = kernelProfile;\n\n outputs.forEach(result => {\n Promise.all([result.data(), timeMs, extraInfo]).then(valueContainer => {\n this.logger.logKernelProfile(\n kernelName, result, valueContainer[0], valueContainer[1], inputs,\n valueContainer[2]);\n });\n });\n }\n}\n\nexport function checkComputationForErrors(\n vals: DataTypeMap[D], dtype: D, kernelName: string): boolean {\n if (dtype !== 'float32') {\n // Only floating point computations will generate NaN values\n return false;\n }\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i] as number;\n if (isNaN(num) || !isFinite(num)) {\n // Throwing custom exception so behavior is testable.\n console.warn(`Found ${num} in the result of '${kernelName}'`);\n return true;\n }\n }\n return false;\n}\n\nexport class Logger {\n logKernelProfile(\n name: string, result: Tensor, vals: TypedArray,\n timeMs: number|{error: string}, inputs: NamedTensorMap,\n extraInfo?: string) {\n const time = typeof timeMs === 'number' ? util.rightPad(`${timeMs}ms`, 9) :\n timeMs['error'];\n const paddedName = util.rightPad(name, 25);\n const rank = result.rank;\n const size = result.size;\n const shape = util.rightPad(result.shape.toString(), 14);\n let inputShapesDescription = '';\n\n for (const name in inputs) {\n const input = inputs[name];\n if (input != null) {\n // The input might be a non-tensor (e.g HTMLImageElement), in which case\n // we claim the output shape as input shape.\n const inputShape = input.shape || result.shape;\n const inputRank = inputShape.length;\n inputShapesDescription +=\n `${name}: ${inputRank}D ${inputRank > 0 ? inputShape : ''} `;\n }\n }\n\n console.log(\n `%c${paddedName}\\t%c${time}\\t%c${rank}D ${shape}\\t%c${size}\\t%c${\n inputShapesDescription}\\t%c${extraInfo}`,\n 'font-weight:bold', 'color:red', 'color:blue', 'color: orange',\n 'color: green', 'color: steelblue');\n }\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from './tensor';\nimport {NamedTensorMap} from './tensor_types';\nimport * as util from './util';\n\nexport interface TapeNode {\n id: number;\n kernelName: string;\n outputs: Tensor[];\n inputs: NamedTensorMap;\n // Optional params, defined only for ops with gradient impl.\n gradient?: (dys: Tensor[]) => NamedGradientMap;\n saved?: Tensor[];\n}\n\nexport type NamedGradientMap = {\n [inputName: string]: () => Tensor;\n};\n\n/**\n * Computes a list of TapeNodes that connect x to y, filtering everything else\n * out and preserving the order of the original tape elements.\n *\n * @param tape The tape elements to filter.\n * @param xs The input Tensors.\n * @param y The output Tensor.\n */\nexport function getFilteredNodesXToY(\n tape: TapeNode[], xs: Tensor[], y: Tensor): TapeNode[] {\n // Forward pass to compute all the nodes and Tensors that are transitively a\n // function of x.\n const tensorsFromX: {[tensorId: number]: boolean} = {};\n const nodesFromX: {[nodeId: number]: boolean} = {};\n for (let i = 0; i < xs.length; i++) {\n tensorsFromX[xs[i].id] = true;\n }\n\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n for (const inputName in nodeInputs) {\n const input = nodeInputs[inputName];\n\n let anyInputFromX = false;\n for (let j = 0; j < xs.length; j++) {\n if (tensorsFromX[input.id]) {\n node.outputs.forEach(output => tensorsFromX[output.id] = true);\n anyInputFromX = true;\n nodesFromX[node.id] = true;\n break;\n }\n }\n\n if (anyInputFromX) {\n break;\n }\n }\n }\n\n // Backward pass to find all of the nodes and Tensors that lead to y.\n const tensorsLeadToY: {[tensorId: number]: boolean} = {};\n tensorsLeadToY[y.id] = true;\n const nodesToY: {[nodeId: number]: boolean} = {};\n\n for (let i = tape.length - 1; i >= 0; i--) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n\n // If any of the outputs lead to y, mark all of the inputs as leading to y.\n for (let j = 0; j < node.outputs.length; j++) {\n if (tensorsLeadToY[node.outputs[j].id]) {\n for (const inputName in nodeInputs) {\n tensorsLeadToY[nodeInputs[inputName].id] = true;\n nodesToY[node.id] = true;\n }\n break;\n }\n }\n }\n\n // Return the paths that come from x and lead to y.\n const filteredTape: TapeNode[] = [];\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n\n if (nodesFromX[node.id] && nodesToY[node.id]) {\n // Prune the inputs from the node that aren't a function of x.\n const prunedInputs: {[inputName: string]: Tensor} = {};\n for (const inputName in node.inputs) {\n const nodeInput = node.inputs[inputName];\n if (tensorsFromX[nodeInput.id]) {\n prunedInputs[inputName] = nodeInput;\n }\n }\n\n // Copy the node and overwrite inputsAndArgs to the pruned version.\n const prunedNode = Object.assign({}, node);\n prunedNode.inputs = prunedInputs;\n prunedNode.outputs = node.outputs;\n\n filteredTape.push(prunedNode);\n }\n }\n\n return filteredTape;\n}\n\n/**\n * Backpropagate gradients through the filtered TapeNodes.\n *\n * @param tensorAccumulatedGradientMap A map of Tensor to its gradient. This map\n * is mutated by this method.\n * @param filteredTape The filtered TapeNodes to backprop through.\n */\nexport function backpropagateGradients(\n tensorAccumulatedGradientMap: {[tensorId: number]: Tensor},\n filteredTape: TapeNode[], tidy: (f: Function) => Tensor,\n add: (a: Tensor, b: Tensor) => Tensor) {\n // Walk the tape backward and keep a map of Tensor to its gradient.\n for (let i = filteredTape.length - 1; i >= 0; i--) {\n const node = filteredTape[i];\n\n const dys: Tensor[] = [];\n node.outputs.forEach(o => {\n const gradTensor = tensorAccumulatedGradientMap[o.id];\n if (gradTensor != null) {\n dys.push(gradTensor);\n } else {\n // This particular output is not in the back-propagation subgraph, so it\n // does not affect the final output, thus we put null for its dy.\n dys.push(null);\n }\n });\n\n if (node.gradient == null) {\n throw new Error(\n `Cannot compute gradient: gradient function not found ` +\n `for ${node.kernelName}.`);\n }\n\n // Backprop dy through this node and accumulate gradients over the inputs.\n const inputGradients = node.gradient(dys);\n\n for (const inputName in node.inputs) {\n if (!(inputName in inputGradients)) {\n throw new Error(\n `Cannot backprop through input ${inputName}. ` +\n `Available gradients found: ${Object.keys(inputGradients)}.`);\n }\n\n // Call the gradient function.\n const dx = tidy(() => inputGradients[inputName]());\n if (dx.dtype !== 'float32') {\n throw new Error(\n `Error in gradient for op ${\n node.kernelName}. The gradient of input ` +\n `${inputName} must have 'float32' dtype, but has '${dx.dtype}'`);\n }\n const x = node.inputs[inputName];\n if (!util.arraysEqual(dx.shape, x.shape)) {\n throw new Error(\n `Error in gradient for op ${\n node.kernelName}. The gradient of input ` +\n `'${inputName}' has shape '${dx.shape}', which does not match ` +\n `the shape of the input '${x.shape}'`);\n }\n\n if (tensorAccumulatedGradientMap[x.id] == null) {\n tensorAccumulatedGradientMap[x.id] = dx;\n } else {\n const curGradient = tensorAccumulatedGradientMap[x.id];\n tensorAccumulatedGradientMap[x.id] = add(curGradient, dx);\n curGradient.dispose();\n }\n }\n }\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, TypedArray} from './types';\nimport {computeStrides, isString, rightPad, sizeFromShape} from './util';\n\n// Maximum number of values before we decide to show ellipsis.\nconst FORMAT_LIMIT_NUM_VALS = 20;\n// Number of first and last values to show when displaying a, b,...,y, z.\nconst FORMAT_NUM_FIRST_LAST_VALS = 3;\n// Number of significant digits to show.\nconst FORMAT_NUM_SIG_DIGITS = 7;\n\nexport function tensorToString(\n vals: TypedArray|string[], shape: number[], dtype: DataType,\n verbose: boolean) {\n const strides = computeStrides(shape);\n const padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides);\n const rank = shape.length;\n const valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol);\n const lines = ['Tensor'];\n if (verbose) {\n lines.push(` dtype: ${dtype}`);\n lines.push(` rank: ${rank}`);\n lines.push(` shape: [${shape}]`);\n lines.push(` values:`);\n }\n lines.push(valsLines.map(l => ' ' + l).join('\\n'));\n return lines.join('\\n');\n}\n\nfunction computeMaxSizePerColumn(\n vals: TypedArray|string[], shape: number[], dtype: DataType,\n strides: number[]): number[] {\n const n = sizeFromShape(shape);\n const numCols = strides[strides.length - 1];\n const padPerCol = new Array(numCols).fill(0);\n const rank = shape.length;\n const valuesOrTuples =\n dtype === 'complex64' ? createComplexTuples(vals) : vals;\n\n if (rank > 1) {\n for (let row = 0; row < n / numCols; row++) {\n const offset = row * numCols;\n for (let j = 0; j < numCols; j++) {\n padPerCol[j] = Math.max(\n padPerCol[j],\n valToString(valuesOrTuples[offset + j], 0, dtype).length);\n }\n }\n }\n return padPerCol;\n}\n\nfunction valToString(\n val: number|string|[number, number], pad: number, dtype: DataType) {\n let valStr: string;\n if (Array.isArray(val)) {\n valStr = `${parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS))} + ` +\n `${parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS))}j`;\n } else if (isString(val)) {\n valStr = `'${val}'`;\n } else if (dtype === 'bool') {\n valStr = boolNumToString(val);\n } else {\n valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString();\n }\n\n return rightPad(valStr, pad);\n}\n\nfunction boolNumToString(v: number): string {\n return v === 0 ? 'false' : 'true';\n}\n\nfunction subTensorToString(\n vals: TypedArray|string[], shape: number[], dtype: DataType,\n strides: number[], padPerCol: number[], isLast = true): string[] {\n const storagePerElement = dtype === 'complex64' ? 2 : 1;\n\n const size = shape[0];\n const rank = shape.length;\n if (rank === 0) {\n if (dtype === 'complex64') {\n const complexTuple = createComplexTuples(vals);\n return [valToString(complexTuple[0], 0, dtype)];\n }\n if (dtype === 'bool') {\n return [boolNumToString(vals[0] as number)];\n }\n return [vals[0].toString()];\n }\n\n if (rank === 1) {\n if (size > FORMAT_LIMIT_NUM_VALS) {\n const firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement;\n\n let firstVals = Array.from(\n vals.slice(0, firstValsSize));\n let lastVals = Array.from(vals.slice(\n (size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement,\n size * storagePerElement));\n if (dtype === 'complex64') {\n firstVals = createComplexTuples(firstVals);\n lastVals = createComplexTuples(lastVals);\n }\n return [\n '[' +\n firstVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ', ..., ' +\n lastVals\n .map(\n (x, i) => valToString(\n x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype))\n .join(', ') +\n ']'\n ];\n }\n const displayVals: Array =\n dtype === 'complex64' ? createComplexTuples(vals) :\n Array.from(vals);\n\n return [\n '[' +\n displayVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ']'\n ];\n }\n\n // The array is rank 2 or more.\n const subshape = shape.slice(1);\n const substrides = strides.slice(1);\n const stride = strides[0] * storagePerElement;\n const lines: string[] = [];\n if (size > FORMAT_LIMIT_NUM_VALS) {\n for (let i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(\n vals.slice(start, end), subshape, dtype, substrides, padPerCol,\n false /* isLast */));\n }\n lines.push('...');\n for (let i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(\n vals.slice(start, end), subshape, dtype, substrides, padPerCol,\n i === size - 1 /* isLast */));\n }\n } else {\n for (let i = 0; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(\n vals.slice(start, end), subshape, dtype, substrides, padPerCol,\n i === size - 1 /* isLast */));\n }\n }\n const sep = rank === 2 ? ',' : '';\n lines[0] = '[' + lines[0] + sep;\n for (let i = 1; i < lines.length - 1; i++) {\n lines[i] = ' ' + lines[i] + sep;\n }\n let newLineSep = ',\\n';\n for (let i = 2; i < rank; i++) {\n newLineSep += '\\n';\n }\n lines[lines.length - 1] =\n ' ' + lines[lines.length - 1] + ']' + (isLast ? '' : newLineSep);\n return lines;\n}\n\nfunction createComplexTuples(vals: Array<{}>|\n TypedArray): Array<[number, number]> {\n const complexTuples: Array<[number, number]> = [];\n for (let i = 0; i < vals.length; i += 2) {\n complexTuples.push([vals[i], vals[i + 1]] as [number, number]);\n }\n return complexTuples;\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getGlobal} from './global_util';\nimport {tensorToString} from './tensor_format';\nimport {ArrayMap, BackendValues, DataType, DataTypeMap, DataValues, NumericDataType, Rank, ShapeMap, SingleValueMap, TypedArray} from './types';\nimport * as util from './util';\nimport {computeStrides, toNestedArray} from './util';\n\nexport interface TensorData {\n dataId?: DataId;\n values?: DataTypeMap[D];\n}\n\n// This interface mimics KernelBackend (in backend.ts), which would create a\n// circular dependency if imported.\nexport interface Backend {}\n\n/**\n * A mutable object, similar to `tf.Tensor`, that allows users to set values\n * at locations before converting to an immutable `tf.Tensor`.\n *\n * See `tf.buffer` for creating a tensor buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class TensorBuffer {\n size: number;\n shape: ShapeMap[R];\n strides: number[];\n values: DataTypeMap[D];\n\n constructor(shape: ShapeMap[R], public dtype: D, values?: DataTypeMap[D]) {\n this.shape = shape.slice() as ShapeMap[R];\n this.size = util.sizeFromShape(shape);\n\n if (values != null) {\n const n = values.length;\n util.assert(\n n === this.size,\n () => `Length of values '${n}' does not match the size ` +\n `inferred by the shape '${this.size}'.`);\n }\n if (dtype === 'complex64') {\n throw new Error(\n `complex64 dtype TensorBuffers are not supported. Please create ` +\n `a TensorBuffer for the real and imaginary parts separately and ` +\n `call tf.complex(real, imag).`);\n }\n this.values = values || util.getArrayFromDType(dtype, this.size);\n this.strides = computeStrides(shape);\n }\n\n /**\n * Sets a value in the buffer at a given location.\n *\n * @param value The value to set.\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n set(value: SingleValueMap[D], ...locs: number[]): void {\n if (locs.length === 0) {\n locs = [0];\n }\n util.assert(\n locs.length === this.rank,\n () => `The number of provided coordinates (${locs.length}) must ` +\n `match the rank (${this.rank})`);\n\n const index = this.locToIndex(locs);\n this.values[index] = value as number;\n }\n\n /**\n * Returns the value in the buffer at the provided location.\n *\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n get(...locs: number[]): SingleValueMap[D] {\n if (locs.length === 0) {\n locs = [0];\n }\n let i = 0;\n for (const loc of locs) {\n if (loc < 0 || loc >= this.shape[i]) {\n const msg = `Requested out of range element at ${locs}. ` +\n ` Buffer shape=${this.shape}`;\n throw new Error(msg);\n }\n i++;\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return this.values[index] as SingleValueMap[D];\n }\n\n locToIndex(locs: number[]): number {\n if (this.rank === 0) {\n return 0;\n } else if (this.rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return index;\n }\n\n indexToLoc(index: number): number[] {\n if (this.rank === 0) {\n return [];\n } else if (this.rank === 1) {\n return [index];\n }\n const locs: number[] = new Array(this.shape.length);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / this.strides[i]);\n index -= locs[i] * this.strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n }\n\n get rank() {\n return this.shape.length;\n }\n\n /**\n * Creates an immutable `tf.Tensor` object from the buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n toTensor(): Tensor {\n return trackerFn().makeTensor(this.values, this.shape, this.dtype) as\n Tensor;\n }\n}\n\nexport interface TensorTracker {\n makeTensor(\n values: DataValues, shape: number[], dtype: DataType,\n backend?: Backend): Tensor;\n makeVariable(\n initialValue: Tensor, trainable?: boolean, name?: string,\n dtype?: DataType): Variable;\n incRef(a: Tensor, backend: Backend): void;\n disposeTensor(t: Tensor): void;\n disposeVariable(v: Variable): void;\n read(dataId: DataId): Promise;\n readSync(dataId: DataId): BackendValues;\n}\n\n/**\n * The Tensor class calls into this handler to delegate chaining operations.\n */\nexport interface OpHandler {\n cast(x: T, dtype: DataType): T;\n buffer(\n shape: ShapeMap[R], dtype: D,\n values?: DataTypeMap[D]): TensorBuffer;\n print(x: T, verbose: boolean): void;\n clone(x: T): T;\n // TODO(yassogba) bring reshape back?\n}\n\n// For tracking tensor creation and disposal.\nlet trackerFn: () => TensorTracker = null;\n// Used by chaining methods to call into ops.\nlet opHandler: OpHandler = null;\n// Used to warn about deprecated methods.\nlet deprecationWarningFn: (msg: string) => void = null;\n// This here so that we can use this method on dev branches and keep the\n// functionality at master.\n// tslint:disable-next-line:no-unused-expression\n[deprecationWarningFn];\n\n/**\n * An external consumer can register itself as the tensor tracker. This way\n * the Tensor class can notify the tracker for every tensor created and\n * disposed.\n */\nexport function setTensorTracker(fn: () => TensorTracker) {\n trackerFn = fn;\n}\n\n/**\n * An external consumer can register itself as the op handler. This way the\n * Tensor class can have chaining methods that call into ops via the op\n * handler.\n */\nexport function setOpHandler(handler: OpHandler) {\n opHandler = handler;\n}\n\n/**\n * Sets the deprecation warning function to be used by this file. This way the\n * Tensor class can be a leaf but still use the environment.\n */\nexport function setDeprecationWarningFn(fn: (msg: string) => void) {\n deprecationWarningFn = fn;\n}\n\n/**\n * We wrap data id since we use weak map to avoid memory leaks.\n * Since we have our own memory management, we have a reference counter\n * mapping a tensor to its data, so there is always a pointer (even if that\n * data is otherwise garbage collectable).\n * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/\n * Global_Objects/WeakMap\n */\nexport type DataId = object; // object instead of {} to force non-primitive.\n\n// Declare this namespace to make Tensor class augmentation work in google3.\nexport declare namespace Tensor {}\n/**\n * A `tf.Tensor` object represents an immutable, multidimensional array of\n * numbers that has a shape and a data type.\n *\n * For performance reasons, functions that create tensors do not necessarily\n * perform a copy of the data passed to them (e.g. if the data is passed as a\n * `Float32Array`), and changes to the data will change the tensor. This is not\n * a feature and is not supported. To avoid this behavior, use the tensor before\n * changing the input data or create a copy with `copy = tf.add(yourTensor, 0)`.\n *\n * See `tf.tensor` for details on how to create a `tf.Tensor`.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Tensor {\n /** Unique id of this tensor. */\n readonly id: number;\n /**\n * Id of the bucket holding the data for this tensor. Multiple arrays can\n * point to the same bucket (e.g. when calling array.reshape()).\n */\n dataId: DataId;\n /** The shape of the tensor. */\n readonly shape: ShapeMap[R];\n /** Number of elements in the tensor. */\n readonly size: number;\n /** The data type for the array. */\n readonly dtype: DataType;\n /** The rank type for the array (see `Rank` enum). */\n readonly rankType: R;\n\n /** Whether this tensor has been globally kept. */\n kept = false;\n /** The id of the scope this tensor is being tracked in. */\n scopeId: number;\n\n /**\n * Number of elements to skip in each dimension when indexing. See\n * https://docs.scipy.org/doc/numpy/reference/generated/\\\n * numpy.ndarray.strides.html\n */\n readonly strides: number[];\n\n constructor(shape: ShapeMap[R], dtype: DataType, dataId: DataId, id: number) {\n this.shape = shape.slice() as ShapeMap[R];\n this.dtype = dtype || 'float32';\n this.size = util.sizeFromShape(shape);\n this.strides = computeStrides(shape);\n this.dataId = dataId;\n this.id = id;\n this.rankType = (this.rank < 5 ? this.rank.toString() : 'higher') as R;\n }\n\n get rank(): number {\n return this.shape.length;\n }\n\n /**\n * Returns a promise of `tf.TensorBuffer` that holds the underlying data.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async buffer(): Promise> {\n const vals = await this.data();\n return opHandler.buffer(this.shape, this.dtype as D, vals);\n }\n\n /**\n * Returns a `tf.TensorBuffer` that holds the underlying data.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n bufferSync(): TensorBuffer {\n return opHandler.buffer(this.shape, this.dtype as D, this.dataSync());\n }\n\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * asynchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async array(): Promise {\n const vals = await this.data();\n return toNestedArray(this.shape, vals, this.dtype === 'complex64') as\n ArrayMap[R];\n }\n\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * synchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n arraySync(): ArrayMap[R] {\n return toNestedArray(\n this.shape, this.dataSync(), this.dtype === 'complex64') as\n ArrayMap[R];\n }\n\n /**\n * Asynchronously downloads the values from the `tf.Tensor`. Returns a\n * promise of `TypedArray` that resolves when the computation has finished.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async data(): Promise {\n this.throwIfDisposed();\n const data = trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n const bytes = await data as Uint8Array[];\n try {\n return bytes.map(b => util.decodeString(b)) as DataTypeMap[D];\n } catch {\n throw new Error(\n 'Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data as Promise;\n }\n\n /**\n * Synchronously downloads the values from the `tf.Tensor`. This blocks the\n * UI thread until the values are ready, which can cause performance issues.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dataSync(): DataTypeMap[D] {\n this.throwIfDisposed();\n const data = trackerFn().readSync(this.dataId);\n if (this.dtype === 'string') {\n try {\n return (data as Uint8Array[]).map(b => util.decodeString(b)) as\n DataTypeMap[D];\n } catch {\n throw new Error(\n 'Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data as DataTypeMap[D];\n }\n\n /** Returns the underlying bytes of the tensor's data. */\n async bytes(): Promise {\n this.throwIfDisposed();\n const data = await trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n return data as Uint8Array[];\n } else {\n return new Uint8Array((data as TypedArray).buffer);\n }\n }\n\n /**\n * Disposes `tf.Tensor` from memory.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dispose(): void {\n if (this.isDisposed) {\n return;\n }\n trackerFn().disposeTensor(this);\n this.isDisposedInternal = true;\n }\n\n protected isDisposedInternal = false;\n get isDisposed(): boolean {\n return this.isDisposedInternal;\n }\n\n throwIfDisposed() {\n if (this.isDisposed) {\n throw new Error(`Tensor is disposed.`);\n }\n }\n\n /**\n * Prints the `tf.Tensor`. See `tf.print` for details.\n *\n * @param verbose Whether to print verbose information about the tensor,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n print(verbose = false): void {\n return opHandler.print(this, verbose);\n }\n\n /**\n * Returns a copy of the tensor. See `tf.clone` for details.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n clone(this: T): T {\n this.throwIfDisposed();\n return opHandler.clone(this);\n }\n\n /**\n * Returns a human-readable description of the tensor. Useful for logging.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n toString(verbose = false): string {\n const vals = this.dataSync();\n return tensorToString(vals, this.shape, this.dtype, verbose);\n }\n\n cast(dtype: DataType): T {\n this.throwIfDisposed();\n return opHandler.cast(this as T, dtype);\n }\n variable(trainable = true, name?: string, dtype?: DataType): Variable {\n this.throwIfDisposed();\n return trackerFn().makeVariable(this, trainable, name, dtype) as\n Variable;\n }\n}\nObject.defineProperty(Tensor, Symbol.hasInstance, {\n value: (instance: Tensor) => {\n // Implementation note: we should use properties of the object that will be\n // defined before the constructor body has finished executing (methods).\n // This is because when this code is transpiled by babel, babel will call\n // classCallCheck before the constructor body is run.\n // See https://github.com/tensorflow/tfjs/issues/3384 for backstory.\n return !!instance && instance.data != null && instance.dataSync != null &&\n instance.throwIfDisposed != null;\n }\n});\n\nexport function getGlobalTensorClass() {\n // Use getGlobal so that we can augment the Tensor class across package\n // boundaries becase the node resolution alg may result in different modules\n // being returned for this file depending on the path they are loaded from.\n return getGlobal('Tensor', () => {\n return Tensor;\n });\n}\n\n// Global side effect. Cache global reference to Tensor class\ngetGlobalTensorClass();\n\nexport interface NumericTensor extends Tensor {\n dtype: NumericDataType;\n dataSync(): DataTypeMap[D];\n data(): Promise;\n}\n\nexport interface StringTensor extends Tensor {\n dtype: 'string';\n dataSync(): DataTypeMap[D];\n data(): Promise;\n}\n\n/** @doclink Tensor */\nexport type Scalar = Tensor;\n/** @doclink Tensor */\nexport type Tensor1D = Tensor;\n/** @doclink Tensor */\nexport type Tensor2D = Tensor;\n/** @doclink Tensor */\nexport type Tensor3D = Tensor;\n/** @doclink Tensor */\nexport type Tensor4D = Tensor;\n/** @doclink Tensor */\nexport type Tensor5D = Tensor;\n/** @doclink Tensor */\nexport type Tensor6D = Tensor;\n\n/**\n * A mutable `tf.Tensor`, useful for persisting state, e.g. for training.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Variable extends Tensor {\n name: string;\n\n constructor(\n initialValue: Tensor, public trainable: boolean, name: string,\n tensorId: number) {\n super(\n initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId);\n this.name = name;\n }\n\n /**\n * Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have\n * the same shape and dtype as the old `tf.Tensor`.\n *\n * @param newValue New tensor to be assigned to this variable.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n assign(newValue: Tensor): void {\n if (newValue.dtype !== this.dtype) {\n throw new Error(\n `dtype of the new value (${newValue.dtype}) and ` +\n `previous value (${this.dtype}) must match`);\n }\n if (!util.arraysEqual(newValue.shape, this.shape)) {\n throw new Error(\n `shape of the new value (${newValue.shape}) and ` +\n `previous value (${this.shape}) must match`);\n }\n trackerFn().disposeTensor(this);\n this.dataId = newValue.dataId;\n trackerFn().incRef(this, null /* backend */);\n }\n\n dispose(): void {\n trackerFn().disposeVariable(this);\n this.isDisposedInternal = true;\n }\n}\n\nObject.defineProperty(Variable, Symbol.hasInstance, {\n value: (instance: Variable) => {\n return instance instanceof Tensor && instance.assign != null &&\n instance.assign instanceof Function;\n }\n});\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from './tensor';\nimport {TensorContainer, TensorContainerArray} from './tensor_types';\nimport {upcastType} from './types';\nimport {assert} from './util';\n\nexport function makeTypesMatch(a: T, b: T): [T, T] {\n if (a.dtype === b.dtype) {\n return [a, b];\n }\n const dtype = upcastType(a.dtype, b.dtype);\n return [a.cast(dtype), b.cast(dtype)];\n}\n\nexport function assertTypesMatch(a: Tensor, b: Tensor): void {\n assert(\n a.dtype === b.dtype,\n () => `The dtypes of the first(${a.dtype}) and` +\n ` second(${b.dtype}) input must match`);\n}\n\nexport function isTensorInList(tensor: Tensor, tensorList: Tensor[]): boolean {\n return tensorList.some(x => x.id === tensor.id);\n}\n\n/**\n * Extracts any `Tensor`s found within the provided object.\n *\n * @param container an object that may be a `Tensor` or may directly contain\n * `Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. In general it\n * is safe to pass any object here, except that `Promise`s are not\n * supported.\n * @returns An array of `Tensors` found within the passed object. If the\n * argument is simply a `Tensor', a list containing that `Tensor` is\n * returned. If the object is not a `Tensor` or does not\n * contain `Tensors`, an empty list is returned.\n */\nexport function getTensorsInContainer(result: TensorContainer): Tensor[] {\n const list: Tensor[] = [];\n const seen = new Set<{}|void>();\n walkTensorContainer(result, list, seen);\n return list;\n}\n\nfunction walkTensorContainer(\n container: TensorContainer, list: Tensor[], seen: Set<{}|void>): void {\n if (container == null) {\n return;\n }\n if (container instanceof Tensor) {\n list.push(container);\n return;\n }\n if (!isIterable(container)) {\n return;\n }\n // Iteration over keys works also for arrays.\n const iterable = container as TensorContainerArray;\n for (const k in iterable) {\n const val = iterable[k];\n if (!seen.has(val)) {\n seen.add(val);\n walkTensorContainer(val, list, seen);\n }\n }\n}\n\n// tslint:disable-next-line:no-any\nfunction isIterable(obj: any): boolean {\n return Array.isArray(obj) || typeof obj === 'object';\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/** @docalias number[] */\nexport interface ShapeMap {\n R0: number[];\n R1: [number];\n R2: [number, number];\n R3: [number, number, number];\n R4: [number, number, number, number];\n R5: [number, number, number, number, number];\n R6: [number, number, number, number, number, number];\n}\n\n/** @docalias number[] */\nexport interface ArrayMap {\n R0: number;\n R1: number[];\n R2: number[][];\n R3: number[][][];\n R4: number[][][][];\n R5: number[][][][][];\n R6: number[][][][][][];\n}\n\nexport interface DataTypeMap {\n float32: Float32Array;\n int32: Int32Array;\n bool: Uint8Array;\n complex64: Float32Array;\n string: string[];\n}\n\nexport interface SingleValueMap {\n bool: boolean;\n int32: number;\n float32: number;\n complex64: number;\n string: string;\n}\n\n/** @docalias 'float32'|'int32'|'bool'|'complex64'|'string' */\nexport type DataType = keyof DataTypeMap;\nexport type NumericDataType = 'float32'|'int32'|'bool'|'complex64';\nexport type TypedArray = Float32Array|Int32Array|Uint8Array;\n/** Tensor data used in tensor creation and user-facing API. */\nexport type DataValues = DataTypeMap[DataType];\n/** The underlying tensor data that gets stored in a backend. */\nexport type BackendValues = Float32Array|Int32Array|Uint8Array|Uint8Array[];\n\nexport enum Rank {\n R0 = 'R0',\n R1 = 'R1',\n R2 = 'R2',\n R3 = 'R3',\n R4 = 'R4',\n R5 = 'R5',\n R6 = 'R6'\n}\n\nexport type FlatVector = boolean[]|number[]|TypedArray;\nexport type RegularArray =\n T[]|T[][]|T[][][]|T[][][][]|T[][][][][]|T[][][][][][];\n\n// tslint:disable-next-line:no-any\nexport interface RecursiveArray {\n [index: number]: T|RecursiveArray;\n}\n\n// Looks for upcasting types. Used, for example, in operations with mixed dtype\n// inputs.\nenum UpcastInt32AndMap {\n 'float32' = 'float32',\n 'int32' = 'int32',\n 'bool' = 'int32',\n 'complex64' = 'complex64'\n}\n\nenum UpcastBoolAndMap {\n 'float32' = 'float32',\n 'int32' = 'int32',\n 'bool' = 'bool',\n 'complex64' = 'complex64'\n}\n\nenum UpcastFloat32AndMap {\n 'float32' = 'float32',\n 'int32' = 'float32',\n 'bool' = 'float32',\n 'complex64' = 'complex64'\n}\n\nenum UpcastComplex64AndMap {\n 'float32' = 'complex64',\n 'int32' = 'complex64',\n 'bool' = 'complex64',\n 'complex64' = 'complex64'\n}\n\nconst upcastTypeMap = {\n 'float32': UpcastFloat32AndMap,\n 'int32': UpcastInt32AndMap,\n 'bool': UpcastBoolAndMap,\n 'complex64': UpcastComplex64AndMap\n};\n\nexport function upcastType(typeA: DataType, typeB: DataType): DataType {\n if (typeA === 'string' || typeB === 'string') {\n if (typeA === 'string' && typeB === 'string') {\n return 'string';\n }\n throw new Error(`Can not upcast ${typeA} with ${typeB}`);\n }\n return upcastTypeMap[typeA][typeB];\n}\n\n/** Returns the output type after summation. */\nexport function sumOutType(type: DataType): DataType {\n return upcastType(type, 'int32');\n}\n\n/** @docalias TypedArray|Array */\nexport type TensorLike =\n TypedArray|number|boolean|string|RecursiveArray|\n RecursiveArray|RecursiveArray|Uint8Array[];\nexport type ScalarLike = number|boolean|string|Uint8Array;\n/** @docalias TypedArray|Array */\nexport type TensorLike1D = TypedArray|number[]|boolean[]|string[]|Uint8Array[];\n/** @docalias TypedArray|Array */\nexport type TensorLike2D = TypedArray|number[]|number[][]|boolean[]|boolean[][]|\n string[]|string[][]|Uint8Array[]|Uint8Array[][];\n/** @docalias TypedArray|Array */\nexport type TensorLike3D = TypedArray|number[]|number[][][]|boolean[]|\n boolean[][][]|string[]|string[][][]|Uint8Array[]|Uint8Array[][][];\n/** @docalias TypedArray|Array */\nexport type TensorLike4D = TypedArray|number[]|number[][][][]|boolean[]|\n boolean[][][][]|string[]|string[][][][]|Uint8Array[]|Uint8Array[][][][];\n/** @docalias TypedArray|Array */\nexport type TensorLike5D =\n TypedArray|number[]|number[][][][][]|boolean[]|boolean[][][][][]|string[]|\n string[][][][][]|Uint8Array[]|Uint8Array[][][][][];\n/** @docalias TypedArray|Array */\nexport type TensorLike6D =\n TypedArray|number[]|number[][][][][][]|boolean[]|boolean[][][][][][]|\n string[]|string[][][][][][]|Uint8Array[]|Uint8Array[][][][][];\n\n/** Type for representing image data in Uint8Array type. */\nexport interface PixelData {\n width: number;\n height: number;\n data: Uint8Array;\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BackendTimingInfo, DataMover, KernelBackend} from './backends/backend';\nimport {Environment, setEnvironmentGlobal} from './environment';\nimport {getGlobalNamespace} from './global_util';\nimport {Add, Cast, Identity} from './kernel_names';\nimport {getGradient, getKernel, getKernelsForBackend, GradFunc, NamedAttrMap, TensorInfo} from './kernel_registry';\nimport {KernelProfile, Profiler} from './profiler';\nimport {backpropagateGradients, getFilteredNodesXToY, TapeNode} from './tape';\nimport {DataId, setTensorTracker, Tensor, TensorTracker, Variable} from './tensor';\nimport {GradSaveFunc, NamedTensorMap, NamedVariableMap, TensorContainer} from './tensor_types';\nimport {getTensorsInContainer} from './tensor_util';\nimport {BackendValues, DataType, DataValues} from './types';\nimport * as util from './util';\nimport {bytesFromStringArray, makeOnesTypedArray, now, sizeFromShape} from './util';\n\n/**\n * A function that computes an output. The save function is for saving tensors\n * computed in the forward pass, that we need in the backward pass.\n */\nexport type ForwardFunc = (backend: KernelBackend, save?: GradSaveFunc) => T;\n\n/**\n * @docalias (a: Tensor, b: Tensor,..., save?: Function) => {\n * value: Tensor,\n * gradFunc: (dy: Tensor, saved?: NamedTensorMap) => Tensor | Tensor[]\n * }\n */\nexport type CustomGradientFunc =\n (...inputs: Array) => {\n value: T;\n gradFunc: (dy: T, saved: Tensor[]) => Tensor | Tensor[];\n };\n\nexport type MemoryInfo = {\n numTensors: number; numDataBuffers: number; numBytes: number;\n unreliable?: boolean; reasons: string[];\n};\n\ntype KernelInfo = {\n name: string; bytesAdded: number; totalBytesSnapshot: number;\n tensorsAdded: number;\n totalTensorsSnapshot: number;\n inputShapes: number[][];\n outputShapes: number[][];\n kernelTimeMs: number | {error: string} | Promise;\n extraInfo: string | Promise;\n};\n\nexport type ProfileInfo = {\n newBytes: number; newTensors: number; peakBytes: number;\n kernels: KernelInfo[];\n result: TensorContainer;\n kernelNames: string[];\n};\n\nexport interface TimingInfo extends BackendTimingInfo {\n wallMs: number;\n}\n\n/** @docalias Function */\nexport type ScopeFn = () => T;\n\ninterface ScopeState {\n track: Tensor[];\n name: string;\n id: number;\n}\n\ninterface RegisteredKernelInvocation {\n kernelName: string;\n inputs: I;\n attrs?: NamedAttrMap;\n}\n\ninterface CustomGradKernelInvocation {\n forwardFunc: ForwardFunc;\n backwardsFunc: (dy: T, saved: Tensor[]) => {\n [P in keyof I]: () => I[P]\n };\n inputs: I;\n attrs?: NamedAttrMap;\n}\n\nfunction isRegisteredKernelInvocation(\n kernelInvocation: RegisteredKernelInvocation|\n CustomGradKernelInvocation):\n kernelInvocation is RegisteredKernelInvocation {\n return (kernelInvocation as RegisteredKernelInvocation).kernelName != null;\n}\n\nclass EngineState {\n // Public since optimizers will use it.\n registeredVariables: NamedVariableMap = {};\n\n nextTapeNodeId = 0;\n numBytes = 0;\n numTensors = 0;\n numStringTensors = 0;\n numDataBuffers = 0;\n\n activeTape: TapeNode[];\n // Number of nested tf.grad() statements when computing higher-order\n // gradients. E.g. `1` for first-order gradients and `2` for second-order\n // gradients. Used to track if the tape should be removed after a backprop.\n gradientDepth = 0;\n // Number of nested kernel calls. When kernel depth is greater than 1, we turn\n // off the tape.\n kernelDepth = 0;\n\n // Keep Tensors that parallel the tapes.\n activeScope: ScopeState;\n scopeStack: ScopeState[] = [];\n /**\n * Keeps track of the number of data moves during a kernel execution. We\n * maintain a stack since kernels can call other kernels, recursively.\n */\n numDataMovesStack: number[] = [];\n nextScopeId = 0;\n\n tensorInfo = new WeakMap();\n\n profiling = false;\n activeProfile: ProfileInfo = {\n newBytes: 0,\n newTensors: 0,\n peakBytes: 0,\n kernels: [],\n result: null,\n get kernelNames():\n string[] {\n return Array.from(new Set(this.kernels.map(k => k.name)));\n }\n };\n\n dispose() {\n for (const variableName in this.registeredVariables) {\n this.registeredVariables[variableName].dispose();\n }\n }\n}\n\nexport class Engine implements TensorTracker, DataMover {\n state: EngineState;\n backendName: string;\n registry: {[id: string]: KernelBackend} = {};\n registryFactory: {\n [id: string]: {\n factory: () => KernelBackend | Promise,\n priority: number\n }\n } = {};\n\n private profiler: Profiler;\n private backendInstance: KernelBackend;\n private pendingBackendInit: Promise;\n private pendingBackendInitId = 0;\n\n constructor(public ENV: Environment) {\n this.state = new EngineState();\n }\n\n async ready(): Promise {\n if (this.pendingBackendInit != null) {\n return this.pendingBackendInit.then(() => {});\n }\n if (this.backendInstance != null) {\n return;\n }\n const sortedBackends = this.getSortedBackends();\n\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const success = await this.initializeBackend(backendName).success;\n if (success) {\n await this.setBackend(backendName);\n return;\n }\n }\n\n throw new Error(\n `Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n\n get backend(): KernelBackend {\n if (this.pendingBackendInit != null) {\n throw new Error(\n `Backend '${this.backendName}' has not yet been initialized. Make ` +\n `sure to await tf.ready() or await tf.setBackend() before calling ` +\n `other methods`);\n }\n if (this.backendInstance == null) {\n const {name, asyncInit} = this.initializeBackendsAndReturnBest();\n if (asyncInit) {\n throw new Error(\n `The highest priority backend '${name}' has not yet been ` +\n `initialized. Make sure to await tf.ready() or ` +\n `await tf.setBackend() before calling other methods`);\n }\n this.setBackend(name);\n }\n return this.backendInstance;\n }\n\n backendNames(): string[] {\n return Object.keys(this.registryFactory);\n }\n\n findBackend(backendName: string): KernelBackend {\n if (!(backendName in this.registry)) {\n // If the backend hasn't been initialized but we have a registry entry for\n // it, initialize it and return it.\n if (backendName in this.registryFactory) {\n const {asyncInit} = this.initializeBackend(backendName);\n if (asyncInit) {\n // Backend is not ready yet.\n return null;\n }\n } else {\n return null;\n }\n }\n return this.registry[backendName];\n }\n\n findBackendFactory(backendName: string):\n () => KernelBackend | Promise {\n if (!(backendName in this.registryFactory)) {\n return null;\n }\n return this.registryFactory[backendName].factory;\n }\n\n registerBackend(\n backendName: string,\n factory: () => KernelBackend | Promise,\n priority = 1): boolean {\n if (backendName in this.registryFactory) {\n console.warn(\n `${backendName} backend was already registered. ` +\n `Reusing existing backend factory.`);\n return false;\n }\n this.registryFactory[backendName] = {factory, priority};\n return true;\n }\n\n async setBackend(backendName: string): Promise {\n if (this.registryFactory[backendName] == null) {\n throw new Error(`Backend name '${backendName}' not found in registry`);\n }\n this.backendName = backendName;\n if (this.registry[backendName] == null) {\n this.backendInstance = null;\n const {success, asyncInit} = this.initializeBackend(backendName);\n const result = asyncInit ? await success : success;\n if (!result) {\n return false;\n }\n }\n this.backendInstance = this.registry[backendName];\n this.setupRegisteredKernels();\n // Reset the profiler.\n this.profiler = new Profiler(this.backendInstance);\n\n return true;\n }\n\n private setupRegisteredKernels(): void {\n const kernels = getKernelsForBackend(this.backendName);\n kernels.forEach(kernel => {\n if (kernel.setupFunc != null) {\n kernel.setupFunc(this.backendInstance);\n }\n });\n }\n\n private disposeRegisteredKernels(backendName: string): void {\n const kernels = getKernelsForBackend(backendName);\n kernels.forEach(kernel => {\n if (kernel.disposeFunc != null) {\n kernel.disposeFunc(this.registry[backendName]);\n }\n });\n }\n\n /**\n * Initializes a backend by looking up the backend name in the factory\n * registry and calling the factory method. Returns a boolean representing\n * whether the initialization of the backend suceeded. Throws an error if\n * there is no backend in the factory registry.\n */\n private initializeBackend(backendName: string):\n {success: boolean|Promise, asyncInit: boolean} {\n const registryFactoryEntry = this.registryFactory[backendName];\n if (registryFactoryEntry == null) {\n throw new Error(\n `Cannot initialize backend ${backendName}, no registration found.`);\n }\n\n try {\n const backend = registryFactoryEntry.factory();\n /* Test if the factory returns a promise.\n Done in a more liberal way than\n previous 'Promise.resolve(backend)===backend'\n as we needed to account for custom Promise\n implementations (e.g. Angular) */\n if (backend && !(backend instanceof KernelBackend) &&\n typeof backend.then === 'function') {\n const promiseId = ++this.pendingBackendInitId;\n const success =\n backend\n .then(backendInstance => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.registry[backendName] = backendInstance;\n this.pendingBackendInit = null;\n return true;\n })\n .catch(err => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.pendingBackendInit = null;\n console.warn(\n `Initialization of backend ${backendName} failed`);\n console.warn(err.stack || err.message);\n return false;\n });\n this.pendingBackendInit = success;\n return {success, asyncInit: true};\n } else {\n this.registry[backendName] = backend as KernelBackend;\n return {success: true, asyncInit: false};\n }\n } catch (err) {\n console.warn(`Initialization of backend ${backendName} failed`);\n console.warn(err.stack || err.message);\n return {success: false, asyncInit: false};\n }\n }\n\n removeBackend(backendName: string): void {\n if (!(backendName in this.registryFactory)) {\n throw new Error(`${backendName} backend not found in registry`);\n }\n if (this.backendName === backendName && this.pendingBackendInit != null) {\n // There is a pending promise of the backend we want to remove. Make it\n // obsolete.\n this.pendingBackendInitId++;\n }\n\n if (backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n\n delete this.registryFactory[backendName];\n\n // Unset the backend if it is active.\n if (this.backendName === backendName) {\n this.pendingBackendInit = null;\n this.backendName = null;\n this.backendInstance = null;\n }\n }\n\n private getSortedBackends(): string[] {\n if (Object.keys(this.registryFactory).length === 0) {\n throw new Error('No backend found in registry.');\n }\n return Object.keys(this.registryFactory).sort((a: string, b: string) => {\n // Highest priority comes first.\n return this.registryFactory[b].priority -\n this.registryFactory[a].priority;\n });\n }\n\n private initializeBackendsAndReturnBest():\n {name: string, asyncInit: boolean} {\n const sortedBackends = this.getSortedBackends();\n\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const {success, asyncInit} = this.initializeBackend(backendName);\n if (asyncInit || success) {\n return {name: backendName, asyncInit};\n }\n }\n throw new Error(\n `Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n\n moveData(backend: KernelBackend, dataId: DataId) {\n const info = this.state.tensorInfo.get(dataId);\n const srcBackend = info.backend;\n const values = this.readSync(dataId);\n const refCount = srcBackend.refCount(dataId);\n // Delete the tensor from the old backend and move it to the new\n // backend.\n srcBackend.disposeData(dataId, true);\n info.backend = backend;\n backend.move(dataId, values, info.shape, info.dtype, refCount);\n if (this.shouldCheckForMemLeaks()) {\n // Track the number of moves during a kernel execution to correctly\n // detect memory leaks.\n this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++;\n }\n }\n\n tidy(nameOrFn: string|ScopeFn, fn?: ScopeFn):\n T {\n let name: string = null;\n if (fn == null) {\n // Called with only 1 argument.\n if (typeof nameOrFn !== 'function') {\n throw new Error('Please provide a function to tidy()');\n }\n fn = nameOrFn;\n } else {\n // Called with 2 arguments.\n if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) {\n throw new Error(\n 'When calling with two arguments, the first argument ' +\n 'to tidy() must be a string');\n }\n if (typeof fn !== 'function') {\n throw new Error(\n 'When calling with two arguments, the 2nd argument ' +\n 'to tidy() must be a function');\n }\n name = nameOrFn as string;\n // TODO(nsthorat,smilkov): Do operation logging and performance\n // profiling.\n }\n let result: T;\n return this.scopedRun(\n () => this.startScope(name), () => this.endScope(result), () => {\n result = fn();\n if (result instanceof Promise) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n return result;\n });\n }\n\n private scopedRun(start: () => void, end: () => void, f: () => T): T {\n start();\n try {\n const res = f();\n end();\n return res;\n } catch (ex) {\n end();\n throw ex;\n }\n }\n\n private static nextTensorId = 0;\n private nextTensorId(): number {\n return Engine.nextTensorId++;\n }\n\n private static nextVariableId = 0;\n private nextVariableId(): number {\n return Engine.nextVariableId++;\n }\n\n /**\n * This method is called instead of the public-facing tensor.clone() when\n * saving a tensor for backwards pass. It makes sure to add the clone\n * operation to the tape regardless of being called inside a kernel\n * execution.\n */\n private clone(x: Tensor): Tensor {\n const y: Tensor = ENGINE.runKernel(Identity, {x} as {} as NamedTensorMap);\n const inputs = {x};\n const grad = (dy: Tensor) => ({\n x: () => {\n const dtype = 'float32';\n const gradInputs = {x: dy};\n const attrs = {dtype};\n\n return ENGINE.runKernel(\n Cast, gradInputs as {} as NamedTensorMap,\n // tslint:disable-next-line: no-unnecessary-type-assertion\n attrs as {} as NamedAttrMap) as Tensor;\n }\n });\n const saved: Tensor[] = [];\n this.addTapeNode(this.state.activeScope.name, inputs, [y], grad, saved, {});\n return y;\n }\n\n /**\n * Execute a kernel with the given name and return the output tensor.\n *\n * @param kernelName The name of the kernel to execute.\n * @param inputs A map of input names to tensors.\n * @param attrs A map of attribute names to their values. An attribute is a\n * primitive (non-tensor) input to the kernel.\n * @param inputsToSave A list of tensors, inputs to save for the backprop\n * computation.\n * @param outputsToSave A list of booleans, specifying which output to save\n * for the backprop computation. These are booleans since the output\n * tensors are not visible to the user.\n */\n runKernel(\n kernelName: string, inputs: NamedTensorMap, attrs?: NamedAttrMap): T {\n const hasKernel = getKernel(kernelName, this.backendName) != null;\n if (!hasKernel) {\n throw new Error(`Kernel '${kernelName}' not registered for backend '${\n this.backendName}'`);\n }\n return this.runKernelFunc({kernelName, inputs, attrs});\n }\n\n private shouldCheckForMemLeaks(): boolean {\n return this.ENV.getBool('IS_TEST');\n }\n\n private checkKernelForMemLeak(\n kernelName: string, numDataIdsBefore: number,\n outInfos: TensorInfo[]): void {\n const numDataIdsAfter = this.backend.numDataIds();\n\n // Count the number of data ids associated with the result of the kernel.\n let numOutputDataIds = 0;\n outInfos.forEach(info => {\n // Complex numbers allocate 3 data ids, one for 'real', one for\n // 'imaginary', and one for the container that holds the former two.\n numOutputDataIds += (info.dtype === 'complex64' ? 3 : 1);\n });\n\n // Account for the number of moves during kernel execution. A \"data move\"\n // can happen in the middle of a kernel execution, placing a new (key,value)\n // pair in the data storage. Since data moves have net zero effect (we\n // always remove the data from the old backend), we have to cancel them out\n // when detecting memory leaks.\n const numMoves =\n this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1];\n const dataIdsLeaked =\n numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves;\n if (dataIdsLeaked > 0) {\n throw new Error(\n `Backend '${this.backendName}' has an internal memory leak ` +\n `(${dataIdsLeaked} data ids) after running '${kernelName}'`);\n }\n }\n\n /**\n * Internal helper method to execute a kernel Func\n *\n * Use `runKernel` to execute kernels from outside of engine.\n */\n private runKernelFunc(\n kernelParams: RegisteredKernelInvocation|\n CustomGradKernelInvocation): T {\n let outputs: Tensor[];\n let saved: Tensor[] = [];\n const isTapeOn = this.isTapeOn();\n\n const startingBytecount = this.state.numBytes;\n const startingNumTensors = this.state.numTensors;\n\n if (this.shouldCheckForMemLeaks()) {\n this.state.numDataMovesStack.push(0);\n }\n\n let kernelFunc: () => Tensor[];\n if (this.backendName == null) {\n // backend has not been initialized yet (backend initialization is lazy\n // can be deferred until an op/ kernel is run).\n // The below getter has side effects that will try to initialize the\n // backend and set properties like this.backendName\n // tslint:disable-next-line: no-unused-expression\n this.backend;\n }\n\n let out: TensorInfo|TensorInfo[];\n\n const kernelOrScopeName = isRegisteredKernelInvocation(kernelParams) ?\n kernelParams.kernelName :\n this.state.activeScope != null ? this.state.activeScope.name : '';\n\n // Create the kernelFunc from either a registered kernel OR passed in\n // forward/backward functions (used by custom grad). In this context a\n // kernelFunc wraps a kernel implementation with some bookkeeping.\n\n if (isRegisteredKernelInvocation(kernelParams)) {\n const {kernelName, inputs, attrs} = kernelParams;\n if (this.backendName == null) {\n // backend has not been initialized yet (backend initialization is lazy\n // can be deferred until an op/ kernel is run).\n // The below getter has side effects that will try to initialize the\n // backend and set properties like this.backendName\n // tslint:disable-next-line: no-unused-expression\n this.backend;\n }\n const kernel = getKernel(kernelName, this.backendName);\n util.assert(\n kernel != null,\n () => `Cannot find registered kernel '${kernelName}' for backend '${\n this.backendName}'`);\n\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = kernel.kernelFunc({inputs, attrs, backend: this.backend});\n const outInfos = Array.isArray(out) ? out : [out];\n if (this.shouldCheckForMemLeaks()) {\n this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos);\n }\n\n const outTensors = outInfos.map((outInfo: TensorInfo|Tensor) => {\n // todo (yassogba) remove this option (Tensor) when node backend\n // methods have been modularized and they all return tensorInfo.\n // TensorInfos do not have a rank attribute.\n if ((outInfo as Tensor).rank != null) {\n return outInfo as Tensor;\n }\n const {dataId, shape, dtype} = outInfo as TensorInfo;\n return this.makeTensorFromDataId(dataId, shape, dtype);\n });\n\n // Save any required inputs and outputs.\n\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since there would be no backprop for these tensors\n // (which would otherwise dispose them).\n if (isTapeOn) {\n const tensorsToSave =\n this.getTensorsForGradient(kernelName, inputs, outTensors);\n saved = this.saveTensorsForBackwardMode(tensorsToSave);\n }\n return outTensors;\n };\n } else {\n const {forwardFunc} = kernelParams;\n // Running a customGrad op.\n const saveFunc: GradSaveFunc = (tensors) => {\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since we would never run backprop, which disposes\n // the kept tensors.\n if (!isTapeOn) {\n return;\n }\n saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n };\n\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = this.tidy(() => forwardFunc(this.backend, saveFunc));\n const outs = (Array.isArray(out) ? out : [out]) as Tensor[];\n if (this.shouldCheckForMemLeaks()) {\n // Scope name is used to print a more helpful error message if needed.\n this.checkKernelForMemLeak(kernelOrScopeName, numDataIdsBefore, outs);\n }\n return outs;\n };\n }\n\n //\n // Run the kernelFunc. Optionally profiling it.\n //\n const {inputs, attrs} = kernelParams;\n const backwardsFunc = isRegisteredKernelInvocation(kernelParams) ?\n null :\n kernelParams.backwardsFunc;\n\n let kernelProfile: KernelProfile;\n this.scopedRun(\n // Stop recording to a tape when running a kernel.\n () => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {\n if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {\n outputs = kernelFunc();\n } else {\n kernelProfile = this.profiler.profileKernel(\n kernelOrScopeName, inputs, () => kernelFunc());\n if (this.ENV.getBool('DEBUG')) {\n this.profiler.logKernelProfile(kernelProfile);\n }\n outputs = kernelProfile.outputs;\n }\n });\n\n if (isTapeOn) {\n this.addTapeNode(\n kernelOrScopeName, inputs, outputs, backwardsFunc, saved, attrs);\n }\n\n if (this.state.profiling) {\n this.state.activeProfile.kernels.push({\n name: kernelOrScopeName,\n bytesAdded: this.state.numBytes - startingBytecount,\n totalBytesSnapshot: this.state.numBytes,\n tensorsAdded: this.state.numTensors - startingNumTensors,\n totalTensorsSnapshot: this.state.numTensors,\n inputShapes: Object.keys(inputs).map(\n key => inputs[key] != null ? inputs[key].shape : null),\n outputShapes: outputs.map(item => item.shape),\n kernelTimeMs: kernelProfile.timeMs,\n extraInfo: kernelProfile.extraInfo\n });\n }\n return (Array.isArray(out) ? outputs : outputs[0]) as T;\n }\n\n /**\n * Saves tensors used in forward mode for use in backward mode.\n *\n * @param tensors the list of tensors to save.\n */\n private saveTensorsForBackwardMode(tensors: Tensor[]): Tensor[] {\n const saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n return saved;\n }\n\n /**\n * Returns a list of tensors to save for a given gradient calculation.\n *\n * @param kernelName name of kernel to look up gradient for.\n * @param inputs a map of input tensors.\n * @param outputs an array of output tensors from forward mode of kernel.\n */\n private getTensorsForGradient(\n kernelName: string, inputs: NamedTensorMap,\n outputs: Tensor[]): Tensor[]|null {\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n const inputsToSave: string[] = gradConfig.inputsToSave || [];\n const outputsToSave: boolean[] = gradConfig.outputsToSave || [];\n\n // If saveAllInputs is true, all inputs will be saved. Otherwise, inputs\n // specified in inputsToSave will be saved.\n let inputTensorsToSave: Tensor[];\n if (gradConfig.saveAllInputs) {\n util.assert(\n Array.isArray(inputs),\n () => 'saveAllInputs is true, expected inputs to be an array.');\n\n inputTensorsToSave = Object.keys(inputs).map((key) => inputs[key]);\n } else {\n inputTensorsToSave = inputsToSave.map((inputName) => inputs[inputName]);\n }\n\n const outputTensorsToSave: Tensor[] =\n outputs.filter((_, i) => outputsToSave[i]);\n\n return inputTensorsToSave.concat(outputTensorsToSave);\n }\n // We return an empty list rather than throw an error because the kernel we\n // are looking up may not actually be relevant to backproping through the\n // overall function\n //\n // See 'does not error if irrelevant (pruned) ops are missing grads' test\n // in gradients_test.ts for an example.\n return [];\n }\n\n /**\n * Internal method used by public APIs for tensor creation. Makes a new\n * tensor with the provided shape, dtype and values. It always\n * creates a new data id and writes the values to the underlying backend.\n */\n makeTensor(\n values: DataValues, shape: number[], dtype: DataType,\n backend?: KernelBackend): Tensor {\n if (values == null) {\n throw new Error('Values passed to engine.makeTensor() are null');\n }\n dtype = dtype || 'float32';\n backend = backend || this.backend;\n let backendVals = values as BackendValues;\n if (dtype === 'string' && util.isString(values[0])) {\n backendVals = (values as string[]).map(d => util.encodeString(d));\n }\n const dataId = backend.write(backendVals, shape, dtype);\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.trackTensor(t, backend);\n\n // Count bytes for string tensors.\n if (dtype === 'string') {\n const info = this.state.tensorInfo.get(dataId);\n const newBytes = bytesFromStringArray(backendVals as Uint8Array[]);\n this.state.numBytes += newBytes - info.bytes;\n info.bytes = newBytes;\n }\n return t;\n }\n\n /**\n * Internal method used by backends. Makes a new tensor\n * that is a wrapper around an existing data id. It doesn't create\n * a new data id, only increments the ref count used in memory tracking.\n */\n makeTensorFromDataId(\n dataId: DataId, shape: number[], dtype: DataType,\n backend?: KernelBackend): Tensor {\n dtype = dtype || 'float32';\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.trackTensor(t, backend);\n return t;\n }\n\n makeVariable(\n initialValue: Tensor, trainable = true, name?: string,\n dtype?: DataType): Variable {\n name = name || this.nextVariableId().toString();\n if (dtype != null && dtype !== initialValue.dtype) {\n initialValue = initialValue.cast(dtype);\n }\n const v = new Variable(initialValue, trainable, name, this.nextTensorId());\n if (this.state.registeredVariables[v.name] != null) {\n throw new Error(`Variable with name ${v.name} was already registered`);\n }\n this.state.registeredVariables[v.name] = v;\n this.incRef(v, this.backend);\n return v;\n }\n\n trackTensor(a: Tensor, backend: KernelBackend): void {\n this.state.numTensors++;\n if (a.dtype === 'string') {\n this.state.numStringTensors++;\n }\n // Bytes for complex numbers are counted by their components. Bytes for\n // string tensors are counted when writing values.\n let bytes = 0;\n if (a.dtype !== 'complex64' && a.dtype !== 'string') {\n bytes = a.size * util.bytesPerElement(a.dtype);\n }\n this.state.numBytes += bytes;\n\n if (!this.state.tensorInfo.has(a.dataId)) {\n this.state.numDataBuffers++;\n this.state.tensorInfo.set(a.dataId, {\n backend: backend || this.backend,\n dtype: a.dtype,\n shape: a.shape,\n bytes\n });\n }\n\n if (!(a instanceof Variable)) {\n this.track(a);\n }\n }\n\n // Track the tensor by dataId and increase the refCount for the dataId in the\n // backend.\n // TODO(pyu10055): This is currently used by makeVariable method, to increase\n // refCount on the backend for the dataId. It can potentially be replaced with\n // Identity op indead of calling backend directly.\n incRef(a: Tensor, backend: KernelBackend): void {\n this.trackTensor(a, backend);\n this.backend.incRef(a.dataId);\n }\n\n removeDataId(dataId: DataId, backend: KernelBackend) {\n if (this.state.tensorInfo.has(dataId) &&\n this.state.tensorInfo.get(dataId).backend === backend) {\n this.state.tensorInfo.delete(dataId);\n this.state.numDataBuffers--;\n }\n }\n disposeTensor(a: Tensor): void {\n if (!this.state.tensorInfo.has(a.dataId)) {\n return;\n }\n const info = this.state.tensorInfo.get(a.dataId);\n\n this.state.numTensors--;\n if (a.dtype === 'string') {\n this.state.numStringTensors--;\n this.state.numBytes -= info.bytes;\n }\n // Don't count bytes for complex numbers as they are counted by their\n // components.\n if (a.dtype !== 'complex64' && a.dtype !== 'string') {\n const bytes = a.size * util.bytesPerElement(a.dtype);\n this.state.numBytes -= bytes;\n }\n\n // Remove the reference to dataId if backend dispose the data successfully\n if (info.backend.disposeData(a.dataId)) {\n this.removeDataId(a.dataId, info.backend);\n }\n\n // TODO(nsthorat): Construct an error and save the stack trace for\n // debugging when in debug mode. Creating a stack trace is too expensive\n // to do unconditionally.\n }\n\n disposeVariables(): void {\n for (const varName in this.state.registeredVariables) {\n const v = this.state.registeredVariables[varName];\n this.disposeVariable(v);\n }\n }\n\n disposeVariable(v: Variable): void {\n this.disposeTensor(v);\n if (this.state.registeredVariables[v.name] != null) {\n delete this.state.registeredVariables[v.name];\n }\n }\n\n memory(): MemoryInfo {\n const info = this.backend.memory() as MemoryInfo;\n info.numTensors = this.state.numTensors;\n info.numDataBuffers = this.state.numDataBuffers;\n info.numBytes = this.state.numBytes;\n if (this.state.numStringTensors > 0) {\n info.unreliable = true;\n if (info.reasons == null) {\n info.reasons = [];\n }\n info.reasons.push(\n 'Memory usage by string tensors is approximate ' +\n '(2 bytes per character)');\n }\n return info;\n }\n\n async profile(query: () => (TensorContainer | Promise)):\n Promise {\n this.state.profiling = true;\n\n const startBytes = this.state.numBytes;\n const startNumTensors = this.state.numTensors;\n\n this.state.activeProfile.kernels = [];\n this.state.activeProfile.result = await query();\n\n this.state.profiling = false;\n\n this.state.activeProfile.peakBytes = Math.max(\n ...this.state.activeProfile.kernels.map(d => d.totalBytesSnapshot));\n this.state.activeProfile.newBytes = this.state.numBytes - startBytes;\n this.state.activeProfile.newTensors =\n this.state.numTensors - startNumTensors;\n for (const kernel of this.state.activeProfile.kernels) {\n kernel.kernelTimeMs = await kernel.kernelTimeMs;\n kernel.extraInfo = await kernel.extraInfo;\n }\n return this.state.activeProfile;\n }\n\n isTapeOn(): boolean {\n return this.state.gradientDepth > 0 && this.state.kernelDepth === 0;\n }\n\n private addTapeNode(\n kernelName: string, inputs: NamedTensorMap, outputs: Tensor[],\n gradientsFunc: GradFunc, saved: Tensor[], attrs: NamedAttrMap): void {\n const tapeNode: TapeNode =\n {id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved};\n\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n gradientsFunc = gradConfig.gradFunc;\n }\n if (gradientsFunc != null) {\n tapeNode.gradient = (dys: Tensor[]) => {\n // TODO(smilkov): To optimize back-prop, pass dys that are not used in\n // the backprop graph to the user as null instead of zeros\n dys = dys.map((dy, i) => {\n if (dy == null) {\n const output = outputs[i];\n const vals = util.makeZerosTypedArray(output.size, output.dtype);\n return this.makeTensor(vals, output.shape, output.dtype);\n }\n return dy;\n });\n // Grad functions of ops with single outputs expect a dy, while ops\n // with multiple outputs expect dys (array of dy).\n return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs);\n };\n }\n this.state.activeTape.push(tapeNode);\n }\n\n keep(result: T): T {\n result.kept = true;\n return result;\n }\n\n private startTape() {\n if (this.state.gradientDepth === 0) {\n this.state.activeTape = [];\n }\n this.state.gradientDepth++;\n }\n\n private endTape() {\n this.state.gradientDepth--;\n }\n\n /**\n * Start a scope. Use this with endScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n startScope(name?: string) {\n const scopeInfo: ScopeState = {\n track: [],\n name: 'unnamed scope',\n id: this.state.nextScopeId++\n };\n if (name) {\n scopeInfo.name = name;\n }\n this.state.scopeStack.push(scopeInfo);\n this.state.activeScope = scopeInfo;\n }\n\n /**\n * End a scope. Use this with startScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n endScope(result?: TensorContainer) {\n const tensorsToTrackInParent = getTensorsInContainer(result);\n const tensorsToTrackInParentSet =\n new Set(tensorsToTrackInParent.map(t => t.id));\n\n // Dispose the arrays tracked in this scope.\n for (let i = 0; i < this.state.activeScope.track.length; i++) {\n const tensor = this.state.activeScope.track[i];\n if (!tensor.kept && !tensorsToTrackInParentSet.has(tensor.id)) {\n tensor.dispose();\n }\n }\n\n const oldScope = this.state.scopeStack.pop();\n this.state.activeScope = this.state.scopeStack.length === 0 ?\n null :\n this.state.scopeStack[this.state.scopeStack.length - 1];\n\n // Track the current result in the parent scope.\n tensorsToTrackInParent.forEach(tensor => {\n // Only track the tensor if was allocated in the inner scope and is not\n // globally kept.\n if (!tensor.kept && tensor.scopeId === oldScope.id) {\n this.track(tensor);\n }\n });\n }\n\n /**\n * Returns gradients of `f` with respect to each of the `xs`. The gradients\n * returned are of the same length as `xs`, but some might be null if `f`\n * was not a function of that `x`. It also takes optional dy to multiply the\n * gradient, which defaults to `1`.\n */\n gradients(\n f: () => T, xs: Tensor[], dy?: T,\n allowNoGradients = false): {value: T, grads: Tensor[]} {\n util.assert(\n xs.length > 0, () => 'gradients() received an empty list of xs.');\n if (dy != null && dy.dtype !== 'float32') {\n throw new Error(`dy must have 'float32' dtype, but has '${dy.dtype}'`);\n }\n\n const y = this.scopedRun(\n () => this.startTape(), () => this.endTape(),\n () => this.tidy('forward', f));\n\n util.assert(\n y instanceof Tensor,\n () => 'The result y returned by f() must be a tensor.');\n // Filter out the nodes that don't connect x => y.\n const filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y);\n if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) {\n throw new Error(\n 'Cannot compute gradient of y=f(x) with respect to x. Make sure ' +\n 'that the f you passed encloses all operations that lead from x ' +\n 'to y.');\n }\n\n return this.tidy('backward', () => {\n const accumulatedGradientMap: {[tensorId: number]: Tensor} = {};\n accumulatedGradientMap[y.id] = (dy == null) ? ones(y.shape) : dy;\n\n // Backprop gradients through the filtered nodes.\n backpropagateGradients(\n accumulatedGradientMap, filteredTape,\n // Pass the tidy function to avoid circular dep with `tape.ts`.\n f => this.tidy(f as ScopeFn),\n // Pass an add function to avoide a circular dep with `tape.ts`.\n add);\n const grads = xs.map(x => accumulatedGradientMap[x.id]);\n\n if (this.state.gradientDepth === 0) {\n // This means that we are not computing higher-order gradients\n // and can clean up the tape.\n this.state.activeTape.forEach(node => {\n for (const tensor of node.saved) {\n tensor.dispose();\n }\n });\n this.state.activeTape = null;\n }\n return {value: y, grads};\n });\n }\n\n customGrad(f: CustomGradientFunc):\n (...args: Array) => T {\n util.assert(\n util.isFunction(f),\n () => 'The f passed in customGrad(f) must be a function.');\n return (...inputs: Tensor[]): T => {\n util.assert(\n inputs.every(t => t instanceof Tensor),\n () => 'The args passed in customGrad(f)(x1, x2,...) must all be ' +\n 'tensors');\n\n let res: {\n value: T,\n gradFunc: (dy: T, saved: Tensor[]) => Tensor | Tensor[],\n };\n const inputMap: NamedTensorMap = {};\n inputs.forEach((input, i) => {\n inputMap[i] = input;\n });\n\n const forwardFunc: ForwardFunc = (_, save) => {\n res = f(...[...inputs, save]);\n util.assert(\n res.value instanceof Tensor,\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.value` is a tensor');\n util.assert(\n util.isFunction(res.gradFunc),\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function.');\n return res.value;\n };\n\n const backwardsFunc = (dy: T, saved: Tensor[]) => {\n const gradRes = res.gradFunc(dy, saved);\n const grads: Tensor[] = Array.isArray(gradRes) ? gradRes : [gradRes];\n util.assert(\n grads.length === inputs.length,\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'the same number of tensors as inputs passed to f(...).');\n util.assert(\n grads.every(t => t instanceof Tensor),\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'a list of only tensors.');\n const gradMap: {[key: string]: () => Tensor} = {};\n grads.forEach((grad, i) => {\n gradMap[i] = () => grad;\n });\n return gradMap;\n };\n\n return this.runKernelFunc({\n forwardFunc,\n backwardsFunc,\n inputs: inputMap,\n });\n };\n }\n\n readSync(dataId: DataId): BackendValues {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.readSync(dataId);\n }\n read(dataId: DataId): Promise {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.read(dataId);\n }\n\n async time(query: () => void): Promise {\n const start = now();\n const timingInfo = await this.backend.time(query) as TimingInfo;\n timingInfo.wallMs = now() - start;\n return timingInfo;\n }\n\n /**\n * Tracks a Tensor in the current scope to be automatically cleaned up\n * when the current scope ends, and returns the value.\n *\n * @param result The Tensor to track in the current scope.\n */\n private track(result: T): T {\n if (this.state.activeScope != null) {\n result.scopeId = this.state.activeScope.id;\n this.state.activeScope.track.push(result);\n }\n\n return result;\n }\n\n get registeredVariables(): NamedVariableMap {\n return this.state.registeredVariables;\n }\n\n /**\n * Resets the engine state. Removes all backends but does not remove\n * registered backend factories.\n */\n reset(): void {\n // Make any pending promise obsolete.\n this.pendingBackendInitId++;\n\n this.state.dispose();\n this.ENV.reset();\n this.state = new EngineState();\n\n for (const backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n this.backendName = null;\n this.backendInstance = null;\n this.pendingBackendInit = null;\n }\n}\n\nfunction ones(shape: number[]): Tensor {\n const values = makeOnesTypedArray(sizeFromShape(shape), 'float32');\n return ENGINE.makeTensor(values, shape, 'float32');\n}\n\nexport function getOrMakeEngine(): Engine {\n const ns = getGlobalNamespace() as {} as {_tfengine: Engine};\n if (ns._tfengine == null) {\n const environment = new Environment(ns);\n ns._tfengine = new Engine(environment);\n }\n setEnvironmentGlobal(ns._tfengine.ENV);\n\n // Tell the current tensor interface that the global engine is responsible\n // for tracking.\n setTensorTracker(() => ns._tfengine);\n return ns._tfengine;\n}\n\nexport const ENGINE = getOrMakeEngine();\n\n/**\n * A implementation of the add op for use within engine and tape.\n *\n * This allows us to avoid a circular dependency between add.ts and engine.\n * It is exported to be available in tape tests.\n */\nexport function add(a: Tensor, b: Tensor): Tensor {\n // We duplicate Add here to avoid a circular dependency with add.ts.\n const inputs = {a, b};\n return ENGINE.runKernel(Add, inputs as {} as NamedTensorMap);\n}\n", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// tslint:disable-next-line:no-any\nfunction _isNavigatorDefined(): boolean {\n return typeof navigator !== 'undefined' && navigator != null;\n}\n\nexport function isMobile(nav?: Navigator): boolean {\n if (nav || _isNavigatorDefined()) {\n if (!nav) {\n nav = navigator;\n }\n if (nav.product === 'ReactNative') {\n return true;\n }\n\n // tslint:disable-next-line:no-any\n const a = nav.userAgent || nav.vendor || (window as any).opera;\n // tslint:disable-next-line:max-line-length\n return /(android|bb\\d+|meego).+mobile|avantgo|bada\\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i\n .test(a) ||\n // tslint:disable-next-line:max-line-length\n /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\\-(n|u)|c55\\/|capi|ccwa|cdm\\-|cell|chtm|cldc|cmd\\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\\-s|devi|dica|dmob|do(c|p)o|ds(12|\\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\\-|_)|g1 u|g560|gene|gf\\-5|g\\-mo|go(\\.w|od)|gr(ad|un)|haie|hcit|hd\\-(m|p|t)|hei\\-|hi(pt|ta)|hp( i|ip)|hs\\-c|ht(c(\\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\\-(20|go|ma)|i230|iac( |\\-|\\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\\/)|klon|kpt |kwc\\-|kyo(c|k)|le(no|xi)|lg( g|\\/(k|l|u)|50|54|\\-[a-w])|libw|lynx|m1\\-w|m3ga|m50\\/|ma(te|ui|xo)|mc(01|21|ca)|m\\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\\-2|po(ck|rt|se)|prox|psio|pt\\-g|qa\\-a|qc(07|12|21|32|60|\\-[2-7]|i\\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\\-|oo|p\\-)|sdk\\/|se(c(\\-|0|1)|47|mc|nd|ri)|sgh\\-|shar|sie(\\-|m)|sk\\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\\-|v\\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\\-|tdg\\-|tel(i|m)|tim\\-|t\\-mo|to(pl|sh)|ts(70|m\\-|m3|m5)|tx\\-9|up(\\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\\-|your|zeto|zte\\-/i\n .test(a.substr(0, 4));\n }\n return false;\n}\n\nexport function isBrowser(): boolean {\n return (typeof window !== 'undefined' && window.document != null) ||\n //@ts-ignore\n (typeof WorkerGlobalScope !== 'undefined');\n}\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport './engine';\n\nimport * as device_util from './device_util';\nimport {env} from './environment';\n\nconst ENV = env();\n\n/**\n * This file contains environment-related flag registrations.\n */\n\n/** Whether to enable debug mode. */\nENV.registerFlag('DEBUG', () => false, debugValue => {\n if (debugValue) {\n console.warn(\n 'Debugging mode is ON. The output of every math call will ' +\n 'be downloaded to CPU and checked for NaNs. ' +\n 'This significantly impacts performance.');\n }\n});\n\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag('IS_BROWSER', () => device_util.isBrowser());\n\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag(\n 'IS_NODE',\n () => (typeof process !== 'undefined') &&\n (typeof process.versions !== 'undefined') &&\n (typeof process.versions.node !== 'undefined'));\n\n/** Whether this browser is Chrome. */\nENV.registerFlag(\n 'IS_CHROME',\n () => typeof navigator !== 'undefined' && navigator != null &&\n navigator.userAgent != null && /Chrome/.test(navigator.userAgent) &&\n /Google Inc/.test(navigator.vendor));\n\n/**\n * True when the environment is \"production\" where we disable safety checks\n * to gain performance.\n */\nENV.registerFlag('PROD', () => false);\n\n/**\n * Whether to do sanity checks when inferring a shape from user-provided\n * values, used when creating a new tensor.\n */\nENV.registerFlag(\n 'TENSORLIKE_CHECK_SHAPE_CONSISTENCY', () => ENV.getBool('DEBUG'));\n\n/** Whether deprecation warnings are enabled. */\nENV.registerFlag('DEPRECATION_WARNINGS_ENABLED', () => true);\n\n/** True if running unit tests. */\nENV.registerFlag('IS_TEST', () => false);\n\n/** Whether to check computation result for errors. */\nENV.registerFlag('CHECK_COMPUTATION_FOR_ERRORS', () => true);\n\n/** Whether the backend needs to wrap input to imageBitmap. */\nENV.registerFlag('WRAP_TO_IMAGEBITMAP', () => false);\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from './engine';\nimport {env} from './environment';\nimport {Tensor} from './tensor';\nimport {DataType, TensorLike} from './types';\nimport {assert, flatten, inferDtype, isTypedArray, toTypedArray} from './util';\n\nexport function inferShape(val: TensorLike, dtype?: DataType): number[] {\n let firstElem: typeof val = val;\n\n if (isTypedArray(val)) {\n return dtype === 'string' ? [] : [val.length];\n }\n if (!Array.isArray(val)) {\n return []; // Scalar.\n }\n const shape: number[] = [];\n\n while (Array.isArray(firstElem) ||\n isTypedArray(firstElem) && dtype !== 'string') {\n shape.push(firstElem.length);\n firstElem = firstElem[0];\n }\n if (Array.isArray(val) &&\n env().getBool('TENSORLIKE_CHECK_SHAPE_CONSISTENCY')) {\n deepAssertShapeConsistency(val, shape, []);\n }\n\n return shape;\n}\n\nfunction deepAssertShapeConsistency(\n val: TensorLike, shape: number[], indices: number[]) {\n indices = indices || [];\n if (!(Array.isArray(val)) && !isTypedArray(val)) {\n assert(\n shape.length === 0,\n () => `Element arr[${indices.join('][')}] is a primitive, ` +\n `but should be an array/TypedArray of ${shape[0]} elements`);\n return;\n }\n assert(\n shape.length > 0,\n () => `Element arr[${indices.join('][')}] should be a primitive, ` +\n `but is an array of ${val.length} elements`);\n assert(\n val.length === shape[0],\n () => `Element arr[${indices.join('][')}] should have ${shape[0]} ` +\n `elements, but has ${val.length} elements`);\n const subShape = shape.slice(1);\n for (let i = 0; i < val.length; ++i) {\n deepAssertShapeConsistency(val[i], subShape, indices.concat(i));\n }\n}\n\nfunction assertDtype(\n expectedDtype: DataType|'numeric'|'string_or_numeric',\n actualDType: DataType, argName: string, functionName: string) {\n if (expectedDtype === 'string_or_numeric') {\n return;\n }\n if (expectedDtype == null) {\n throw new Error(`Expected dtype cannot be null.`);\n }\n if (expectedDtype !== 'numeric' && expectedDtype !== actualDType ||\n expectedDtype === 'numeric' && actualDType === 'string') {\n throw new Error(\n `Argument '${argName}' passed to '${functionName}' must ` +\n `be ${expectedDtype} tensor, but got ${actualDType} tensor`);\n }\n}\n\nexport function convertToTensor(\n x: T|TensorLike, argName: string, functionName: string,\n parseAsDtype: DataType|'numeric'|'string_or_numeric' = 'numeric'): T {\n if (x instanceof Tensor) {\n assertDtype(parseAsDtype, x.dtype, argName, functionName);\n return x;\n }\n let inferredDtype = inferDtype(x);\n // If the user expects a bool/int/float, use that info to update the\n // inferredDtype when it is not a string.\n if (inferredDtype !== 'string' &&\n ['bool', 'int32', 'float32'].indexOf(parseAsDtype) >= 0) {\n inferredDtype = parseAsDtype as DataType;\n }\n assertDtype(parseAsDtype, inferredDtype, argName, functionName);\n\n if ((x == null) ||\n (!isTypedArray(x) && !Array.isArray(x) && typeof x !== 'number' &&\n typeof x !== 'boolean' && typeof x !== 'string')) {\n const type = x == null ? 'null' : (x as {}).constructor.name;\n throw new Error(\n `Argument '${argName}' passed to '${functionName}' must be a ` +\n `Tensor or TensorLike, but got '${type}'`);\n }\n const inferredShape = inferShape(x, inferredDtype);\n if (!isTypedArray(x) && !Array.isArray(x)) {\n x = [x] as number[];\n }\n const skipTypedArray = true;\n const values = inferredDtype !== 'string' ?\n toTypedArray(x, inferredDtype as DataType) :\n flatten(x as string[], [], skipTypedArray) as string[];\n return ENGINE.makeTensor(values, inferredShape, inferredDtype) as T;\n}\n\nexport function convertToTensorArray(\n arg: Array, argName: string, functionName: string,\n parseAsDtype: DataType|'numeric'|'string_or_numeric' = 'numeric'): T[] {\n if (!Array.isArray(arg)) {\n throw new Error(\n `Argument ${argName} passed to ${functionName} must be a ` +\n '`Tensor[]` or `TensorLike[]`');\n }\n const tensors = arg as T[];\n return tensors.map(\n (t, i) =>\n convertToTensor(t, `${argName}[${i}]`, functionName, parseAsDtype));\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {isPromise} from '../util';\n\nexport const OP_SCOPE_SUFFIX = '__op';\n\n/**\n * Used for wrapping functions that perform math operations on\n * Tensors. The function will be wrapped in a named scope that cleans all\n * memory usage after the function is done.\n */\nexport function op(f: {[name: string]: T}): T {\n const keys = Object.keys(f);\n if (keys.length !== 1) {\n throw new Error(\n `Please provide an object with a single key ` +\n `(operation name) mapping to a function. Got an object with ` +\n `${keys.length} keys.`);\n }\n\n let opName = keys[0];\n const fn = f[opName];\n\n // Strip the underscore from the end of the function name.\n if (opName.endsWith('_')) {\n opName = opName.substring(0, opName.length - 1);\n }\n\n // add an __op suffix to distinguish ops from kernels in tf.profile\n opName = opName + OP_SCOPE_SUFFIX;\n\n // tslint:disable-next-line:no-any\n const f2 = (...args: any[]) => {\n ENGINE.startScope(opName);\n try {\n const result = fn(...args);\n if (isPromise(result)) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n ENGINE.endScope(result);\n return result;\n } catch (ex) {\n ENGINE.endScope(null);\n throw ex;\n }\n };\n Object.defineProperty(f2, 'name', {value: opName, configurable: true});\n\n // tslint:disable-next-line:no-any\n return f2 as any as T;\n}\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Complex, ComplexInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Converts two real numbers to a complex number.\n *\n * Given a tensor `real` representing the real part of a complex number, and a\n * tensor `imag` representing the imaginary part of a complex number, this\n * operation returns complex numbers elementwise of the form [r0, i0, r1, i1],\n * where r represents the real part and i represents the imag part.\n *\n * The input tensors real and imag must have the same shape.\n *\n * ```js\n * const real = tf.tensor1d([2.25, 3.25]);\n * const imag = tf.tensor1d([4.75, 5.75]);\n * const complex = tf.complex(real, imag);\n *\n * complex.print();\n * ```\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction complex_(real: T|TensorLike, imag: T|TensorLike): T {\n const $real = convertToTensor(real, 'real', 'complex');\n const $imag = convertToTensor(imag, 'imag', 'complex');\n util.assertShapesMatch(\n $real.shape, $imag.shape,\n `real and imag shapes, ${$real.shape} and ${$imag.shape}, ` +\n `must match in call to tf.complex().`);\n\n const inputs: ComplexInputs = {real: $real, imag: $imag};\n return ENGINE.runKernel(Complex, inputs as {} as NamedTensorMap);\n}\n\nexport const complex = op({complex_});\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tensor} from '../tensor';\nimport {TensorLike, TypedArray} from '../types';\nimport {DataType} from '../types';\nimport {assert, assertNonNegativeIntegerDimensions, flatten, inferDtype, isTypedArray, sizeFromShape, toTypedArray} from '../util';\n\n/** This is shared code across all tensor creation methods. */\nexport function makeTensor(\n values: TensorLike, shape: number[], inferredShape: number[],\n dtype?: DataType): Tensor {\n if (dtype == null) {\n dtype = inferDtype(values);\n }\n if (dtype === 'complex64') {\n throw new Error(\n `Cannot construct a complex64 tensor directly. ` +\n `Please use tf.complex(real, imag).`);\n }\n if (!isTypedArray(values) && !Array.isArray(values) &&\n typeof values !== 'number' && typeof values !== 'boolean' &&\n typeof values !== 'string') {\n throw new Error(\n 'values passed to tensor(values) must be a number/boolean/string or ' +\n 'an array of numbers/booleans/strings, or a TypedArray');\n }\n if (shape != null) {\n assertNonNegativeIntegerDimensions(shape);\n\n const providedSize = sizeFromShape(shape);\n const inferredSize = sizeFromShape(inferredShape);\n assert(\n providedSize === inferredSize,\n () =>\n `Based on the provided shape, [${shape}], the tensor should have ` +\n `${providedSize} values but has ${inferredSize}`);\n\n for (let i = 0; i < inferredShape.length; ++i) {\n const inferred = inferredShape[i];\n const flatDimsDontMatch = i === inferredShape.length - 1 ?\n inferred !== sizeFromShape(shape.slice(i)) :\n true;\n assert(\n inferredShape[i] === shape[i] || !flatDimsDontMatch,\n () => `Error creating a new Tensor. Inferred shape ` +\n `(${inferredShape}) does not match the provided ` +\n `shape (${shape}). `);\n }\n }\n\n if (!isTypedArray(values) && !Array.isArray(values)) {\n values = [values] as number[];\n }\n\n shape = shape || inferredShape;\n values = dtype !== 'string' ?\n toTypedArray(values, dtype) :\n flatten(values as string[], [], true) as string[];\n return ENGINE.makeTensor(values as TypedArray, shape, dtype);\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {inferShape} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {DataType, Rank, ShapeMap} from '../types';\n\nimport {makeTensor} from './tensor_ops_util';\n\n/**\n * Creates a `tf.Tensor` with the provided values, shape and dtype.\n *\n * ```js\n * // Pass an array of values to create a vector.\n * tf.tensor([1, 2, 3, 4]).print();\n * ```\n *\n * ```js\n * // Pass a nested array of values to make a matrix or a higher\n * // dimensional tensor.\n * tf.tensor([[1, 2], [3, 4]]).print();\n * ```\n *\n * ```js\n * // Pass a flat array and specify a shape yourself.\n * tf.tensor([1, 2, 3, 4], [2, 2]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`. If the values are strings,\n * they will be encoded as utf-8 and kept as `Uint8Array[]`.\n * @param shape The shape of the tensor. Optional. If not provided,\n * it is inferred from `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor(\n values: TensorLike, shape?: ShapeMap[R], dtype?: DataType): Tensor {\n const inferredShape = inferShape(values, dtype);\n return makeTensor(values, shape, inferredShape, dtype) as Tensor;\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/* Type definitions for exporting and importing of models. */\n\n/**\n * A map from Tensor dtype to number of bytes per element of the Tensor.\n */\nexport const DTYPE_VALUE_SIZE_MAP: {[dtype: string]: number} = {\n 'float32': 4,\n 'float16': 2,\n 'int32': 4,\n 'uint16': 2,\n 'uint8': 1,\n 'bool': 1,\n 'complex64': 8\n};\n\n/**\n * A weight manifest.\n *\n * The weight manifest consists of an ordered list of weight-manifest groups.\n * Each weight-manifest group (\"group\" for short hereafter) consists of a\n * number of weight values stored in a number of paths.\n * See the documentation of `WeightManifestGroupConfig` below for more details.\n */\nexport declare type WeightsManifestConfig = WeightsManifestGroupConfig[];\n\n/**\n * A weight-manifest group.\n *\n * Consists of an ordered list of weight values encoded in binary format,\n * stored in an ordered list of paths.\n */\nexport declare interface WeightsManifestGroupConfig {\n /**\n * An ordered list of paths.\n *\n * Paths are intentionally abstract in order to be general. For example, they\n * can be relative URL paths or relative paths on the file system.\n */\n paths: string[];\n\n /**\n * Specifications of the weights stored in the paths.\n */\n weights: WeightsManifestEntry[];\n}\n\n/**\n * Group to which the weight belongs.\n *\n * - 'optimizer': Weight from a stateful optimizer.\n */\nexport type WeightGroup = 'model'|'optimizer';\n\n/**\n * An entry in the weight manifest.\n *\n * The entry contains specification of a weight.\n */\nexport declare interface WeightsManifestEntry {\n /**\n * Name of the weight, e.g., 'Dense_1/bias'\n */\n name: string;\n\n /**\n * Shape of the weight.\n */\n shape: number[];\n\n /**\n * Data type of the weight.\n */\n dtype: 'float32'|'int32'|'bool'|'string'|'complex64';\n\n /**\n * Type of the weight.\n *\n * Optional.\n *\n * The value 'optimizer' indicates the weight belongs to an optimizer\n * (i.e., used only during model training and not during inference).\n */\n group?: WeightGroup;\n\n /**\n * Information for dequantization of the weight.\n */\n quantization?: {\n scale?: number, // The scaling constant to multiply by.\n min?: number, // The (possibly nudged) minimum weight to add.\n dtype: 'uint16'|'uint8'|'float16' // The dtype of the quantized weights.\n };\n}\n\n/**\n * Options for saving a model.\n * @innamespace io\n */\nexport interface SaveConfig {\n /**\n * Whether to save only the trainable weights of the model, ignoring the\n * non-trainable ones.\n */\n trainableOnly?: boolean;\n\n /**\n * Whether the optimizer will be saved (if exists).\n *\n * Default: `false`.\n */\n includeOptimizer?: boolean;\n}\n\n/**\n * Result of a saving operation.\n */\nexport interface SaveResult {\n /**\n * Information about the model artifacts saved.\n */\n modelArtifactsInfo: ModelArtifactsInfo;\n\n /**\n * HTTP responses from the server that handled the model-saving request (if\n * any). This is applicable only to server-based saving routes.\n */\n responses?: Response[];\n\n /**\n * Error messages and related data (if any).\n */\n errors?: Array<{}|string>;\n}\n\nexport declare interface ModelArtifactsInfo {\n /**\n * Timestamp for when the model is saved.\n */\n dateSaved: Date;\n\n /**\n * TODO (cais,yassogba) consider removing GraphDef as GraphDefs now\n * come in a JSON format and none of our IOHandlers support a non json\n * format. We could conder replacing this with 'Binary' if we want to\n * allow future handlers to save to non json formats (though they will\n * probably want more information than 'Binary').\n * Type of the model topology\n *\n * Type of the model topology\n *\n * Possible values:\n * - JSON: JSON config (human-readable, e.g., Keras JSON).\n * - GraphDef: TensorFlow\n * [GraphDef](https://www.tensorflow.org/extend/tool_developers/#graphdef)\n * protocol buffer (binary).\n */\n modelTopologyType: 'JSON'|'GraphDef';\n\n /**\n * Size of model topology (Keras JSON or GraphDef), in bytes.\n */\n modelTopologyBytes?: number;\n\n /**\n * Size of weight specification or manifest, in bytes.\n */\n weightSpecsBytes?: number;\n\n /**\n * Size of weight value data, in bytes.\n */\n weightDataBytes?: number;\n}\n\n/** Model training configuration. */\nexport declare interface TrainingConfig {\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n // See\n // tslint:disable-next-line:max-line-length\n // https://github.com/tensorflow/tfjs-layers/blob/master/src/keras_format/training_config.ts\n /** Optimizer used for the model training. */\n optimizer_config: {};\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n /** Loss function(s) for the model's output(s). */\n loss: string|string[]|{[key: string]: string};\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n /** Metric function(s) for the model's output(s). */\n metrics?: string[]|{[key: string]: string};\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n weighted_metrics?: string[];\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n sample_weight_mode?: string;\n\n loss_weights?: number[]|{[key: string]: number};\n}\n\n/**\n * The serialized artifacts of a model, including topology and weights.\n *\n * The `modelTopology`, `trainingConfig`, `weightSpecs` and `weightData` fields\n * of this interface are optional, in order to support topology- or weights-only\n * saving and loading.\n *\n * Note this interface is used internally in IOHandlers. For the file format\n * written to disk as `model.json`, see `ModelJSON`.\n */\nexport declare interface ModelArtifacts {\n /**\n * Model topology.\n *\n * For Keras-style `tf.Model`s, this is a JSON object.\n * For TensorFlow-style models (e.g., `SavedModel`), this is the JSON\n * encoding of the `GraphDef` protocol buffer.\n */\n modelTopology?: {}|ArrayBuffer;\n\n /**\n * Serialized configuration for the model's training.\n */\n trainingConfig?: TrainingConfig;\n\n /**\n * Weight specifications.\n *\n * This corresponds to the weightsData below.\n */\n weightSpecs?: WeightsManifestEntry[];\n\n /**\n * Binary buffer for all weight values concatenated in the order specified\n * by `weightSpecs`.\n */\n weightData?: ArrayBuffer;\n\n /**\n * Hard-coded format name for models saved from TensorFlow.js or converted\n * by TensorFlow.js Converter.\n */\n format?: string;\n\n /**\n * What library is responsible for originally generating this artifact.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js v1.0.0'.\n */\n generatedBy?: string;\n\n /**\n * What library or tool is responsible for converting the original model\n * to this format, applicable only if the model is output by a converter.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js Converter v1.0.0'.\n *\n * A value of `null` means the model artifacts are generated without any\n * conversion process (e.g., saved directly from a TensorFlow.js\n * `tf.LayersModel` instance.)\n */\n convertedBy?: string|null;\n\n /**\n * Inputs and outputs signature for saved model.\n */\n signature?: {};\n\n /**\n * User-defined metadata about the model.\n */\n userDefinedMetadata?: {[key: string]: {}};\n\n /**\n * Initializer for the model.\n */\n modelInitializer?: {};\n}\n\n/**\n * The on-disk format of the `model.json` file.\n *\n * TF.js 1.0 always populates the optional fields when writing model.json.\n * Prior versions did not provide those fields.\n */\nexport declare interface ModelJSON {\n /**\n * Model topology.\n *\n * For Keras-style `tf.Model`s, this is a JSON object.\n * For TensorFlow-style models (e.g., `SavedModel`), this is the JSON\n * encoding of the `GraphDef` protocol buffer.\n */\n modelTopology: {};\n\n /** Model training configuration. */\n trainingConfig?: TrainingConfig;\n\n /**\n * Weights manifest.\n *\n * The weights manifest consists of an ordered list of weight-manifest\n * groups. Each weight-manifest group consists of a number of weight values\n * stored in a number of paths. See the documentation of\n * `WeightsManifestConfig` for more details.\n */\n weightsManifest: WeightsManifestConfig;\n\n /**\n * Hard-coded format name for models saved from TensorFlow.js or converted\n * by TensorFlow.js Converter.\n */\n format?: string;\n\n /**\n * What library is responsible for originally generating this artifact.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js v1.0.0'.\n */\n generatedBy?: string;\n\n /**\n * What library or tool is responsible for converting the original model\n * to this format, applicable only if the model is output by a converter.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js Converter v1.0.0'.\n *\n * A value of `null` means the model artifacts are generated without any\n * conversion process (e.g., saved directly from a TensorFlow.js\n * `tf.LayersModel` instance.)\n */\n convertedBy?: string|null;\n\n /**\n * Inputs and outputs signature for saved model.\n */\n signature?: {};\n\n /**\n * User-defined metadata about the model.\n */\n userDefinedMetadata?: {[key: string]: {}};\n\n /**\n * Initializer for the model.\n */\n modelInitializer?: {};\n}\n\n/**\n * Type definition for handlers of loading operations.\n */\nexport type LoadHandler = () => Promise;\n\n/**\n * Type definition for handlers of saving operations.\n */\nexport type SaveHandler = (modelArtifact: ModelArtifacts) =>\n Promise;\n\n/**\n * Interface for a model import/export handler.\n *\n * The `save` and `load` handlers are both optional, in order to allow handlers\n * that support only saving or loading.\n */\n// tslint:disable-next-line:interface-name\nexport interface IOHandler {\n save?: SaveHandler;\n load?: LoadHandler;\n}\n\n/**\n * An interface for the manager of a model store.\n *\n * A model store is defined as a storage medium on which multiple models can\n * be stored. Each stored model has a unique `path` as its identifier.\n * A `ModelStoreManager` for the store allows actions including\n *\n * - Listing the models stored in the store.\n * - Deleting a model from the store.\n */\nexport interface ModelStoreManager {\n /**\n * List all models in the model store.\n *\n * @returns A dictionary mapping paths of existing models to their\n * model artifacts info. Model artifacts info include type of the model's\n * topology, byte sizes of the topology, weights, etc.\n */\n listModels(): Promise<{[path: string]: ModelArtifactsInfo}>;\n\n /**\n * Remove a model specified by `path`.\n *\n * @param path\n * @returns ModelArtifactsInfo of the deleted model (if and only if deletion\n * is successful).\n * @throws Error if deletion fails, e.g., if no model exists at `path`.\n */\n removeModel(path: string): Promise;\n}\n\n/**\n * Callback for the progress of a long-running action such as an HTTP\n * request for a large binary object.\n *\n * `fraction` should be a number in the [0, 1] interval, indicating how\n * much of the action has completed.\n */\nexport type OnProgressCallback = (fraction: number) => void;\n\n/** @innamespace io */\nexport interface LoadOptions {\n /**\n * RequestInit (options) for HTTP requests.\n *\n * For detailed information on the supported fields, see\n * [https://developer.mozilla.org/en-US/docs/Web/API/Request/Request](\n * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request)\n */\n requestInit?: RequestInit;\n\n /**\n * Progress callback.\n */\n onProgress?: OnProgressCallback;\n\n /**\n * A function used to override the `window.fetch` function.\n */\n fetchFunc?: Function;\n\n /**\n * Strict loading model: whether extraneous weights or missing\n * weights should trigger an `Error`.\n *\n * If `true`, require that the provided weights exactly match those\n * required by the layers. `false` means that both extra weights\n * and missing weights will be silently ignored.\n *\n * Default: `true`.\n */\n strict?: boolean;\n\n /**\n * Path prefix for weight files, by default this is calculated from the\n * path of the model JSON file.\n *\n * For instance, if the path to the model JSON file is\n * `http://localhost/foo/model.json`, then the default path prefix will be\n * `http://localhost/foo/`. If a weight file has the path value\n * `group1-shard1of2` in the weight manifest, then the weight file will be\n * loaded from `http://localhost/foo/group1-shard1of2` by default. However,\n * if you provide a `weightPathPrefix` value of\n * `http://localhost/foo/alt-weights`, then the weight file will be loaded\n * from the path `http://localhost/foo/alt-weights/group1-shard1of2` instead.\n */\n weightPathPrefix?: string;\n\n /**\n * Whether the module or model is to be loaded from TF Hub.\n *\n * Setting this to `true` allows passing a TF-Hub module URL, omitting the\n * standard model file name and the query parameters.\n *\n * Default: `false`.\n */\n fromTFHub?: boolean;\n\n /**\n * An async function to convert weight file name to URL. The weight file\n * names are stored in model.json's weightsManifest.paths field. By default we\n * consider weight files are colocated with the model.json file. For example:\n * model.json URL: https://www.google.com/models/1/model.json\n * group1-shard1of1.bin url:\n * https://www.google.com/models/1/group1-shard1of1.bin\n *\n * With this func you can convert the weight file name to any URL.\n */\n weightUrlConverter?: (weightFileName: string) => Promise;\n}\n\n/**\n * Additional options for Platform.fetch\n */\nexport interface RequestDetails {\n /**\n * Is this request for a binary file (as opposed to a json file)\n */\n isBinary?: boolean;\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {complex} from '../ops/complex';\n\nimport {tensor} from '../ops/tensor';\nimport {NamedTensor, NamedTensorMap} from '../tensor_types';\nimport {TypedArray} from '../types';\nimport {sizeFromShape} from '../util';\n\nimport {DTYPE_VALUE_SIZE_MAP, ModelArtifacts, ModelArtifactsInfo, WeightGroup, WeightsManifestEntry} from './types';\n\n/** Number of bytes reserved for the length of the string. (32bit integer). */\nconst NUM_BYTES_STRING_LENGTH = 4;\n\n/**\n * Encode a map from names to weight values as an ArrayBuffer, along with an\n * `Array` of `WeightsManifestEntry` as specification of the encoded weights.\n *\n * This function does not perform sharding.\n *\n * This function is the reverse of `decodeWeights`.\n *\n * @param tensors A map (\"dict\") from names to tensors.\n * @param group Group to which the weights belong (optional).\n * @returns A `Promise` of\n * - A flat `ArrayBuffer` with all the binary values of the `Tensor`s\n * concatenated.\n * - An `Array` of `WeightManifestEntry`s, carrying information including\n * tensor names, `dtype`s and shapes.\n * @throws Error: on unsupported tensor `dtype`.\n */\nexport async function encodeWeights(\n tensors: NamedTensorMap|NamedTensor[], group?: WeightGroup):\n Promise<{data: ArrayBuffer, specs: WeightsManifestEntry[]}> {\n // TODO(adarob, cais): Support quantization.\n const specs: WeightsManifestEntry[] = [];\n const dataPromises: Array> = [];\n\n const names: string[] = Array.isArray(tensors) ?\n tensors.map(tensor => tensor.name) :\n Object.keys(tensors);\n\n for (let i = 0; i < names.length; ++i) {\n const name = names[i];\n const t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name];\n if (t.dtype !== 'float32' && t.dtype !== 'int32' && t.dtype !== 'bool' &&\n t.dtype !== 'string' && t.dtype !== 'complex64') {\n throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`);\n }\n const spec: WeightsManifestEntry = {name, shape: t.shape, dtype: t.dtype};\n if (t.dtype === 'string') {\n const utf8bytes = new Promise(async resolve => {\n const vals = await t.bytes() as Uint8Array[];\n const totalNumBytes = vals.reduce((p, c) => p + c.length, 0) +\n NUM_BYTES_STRING_LENGTH * vals.length;\n const bytes = new Uint8Array(totalNumBytes);\n let offset = 0;\n for (let i = 0; i < vals.length; i++) {\n const val = vals[i];\n const bytesOfLength =\n new Uint8Array(new Uint32Array([val.length]).buffer);\n bytes.set(bytesOfLength, offset);\n offset += NUM_BYTES_STRING_LENGTH;\n bytes.set(val, offset);\n offset += val.length;\n }\n resolve(bytes);\n });\n dataPromises.push(utf8bytes);\n } else {\n dataPromises.push(t.data());\n }\n if (group != null) {\n spec.group = group;\n }\n specs.push(spec);\n }\n\n const tensorValues = await Promise.all(dataPromises);\n return {data: concatenateTypedArrays(tensorValues), specs};\n}\n\n/**\n * Decode flat ArrayBuffer as weights.\n *\n * This function does not handle sharding.\n *\n * This function is the reverse of `encodeWeights`.\n *\n * @param buffer A flat ArrayBuffer carrying the binary values of the tensors\n * concatenated in the order specified in `specs`.\n * @param specs Specifications of the names, dtypes and shapes of the tensors\n * whose value are encoded by `buffer`.\n * @return A map from tensor name to tensor value, with the names corresponding\n * to names in `specs`.\n * @throws Error, if any of the tensors has unsupported dtype.\n */\nexport function decodeWeights(\n buffer: ArrayBuffer, specs: WeightsManifestEntry[]): NamedTensorMap {\n // TODO(adarob, cais): Support quantization.\n const out: NamedTensorMap = {};\n let float16Decode: (buffer: Uint16Array) => Float32Array | undefined;\n let offset = 0;\n for (const spec of specs) {\n const name = spec.name;\n const dtype = spec.dtype;\n const shape = spec.shape;\n const size = sizeFromShape(shape);\n let values: TypedArray|string[]|Uint8Array[];\n\n if ('quantization' in spec) {\n const quantization = spec.quantization;\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n if (!('min' in quantization && 'scale' in quantization)) {\n throw new Error(\n `Weight ${spec.name} with quantization ${quantization.dtype} ` +\n `doesn't have corresponding metadata min and scale.`);\n }\n } else if (quantization.dtype === 'float16') {\n if (dtype !== 'float32') {\n throw new Error(\n `Weight ${spec.name} is quantized with ${quantization.dtype} ` +\n `which only supports weights of type float32 not ${dtype}.`);\n }\n } else {\n throw new Error(\n `Weight ${spec.name} has unknown ` +\n `quantization dtype ${quantization.dtype}. ` +\n `Supported quantization dtypes are: ` +\n `'uint8', 'uint16', and 'float16'.`);\n }\n const quantizationSizeFactor = DTYPE_VALUE_SIZE_MAP[quantization.dtype];\n const byteBuffer =\n buffer.slice(offset, offset + size * quantizationSizeFactor);\n const quantizedArray = (quantization.dtype === 'uint8') ?\n new Uint8Array(byteBuffer) :\n new Uint16Array(byteBuffer);\n if (dtype === 'float32') {\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n values = new Float32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = v * quantization.scale + quantization.min;\n }\n } else if (quantization.dtype === 'float16') {\n if (float16Decode === undefined) {\n float16Decode = getFloat16Decoder();\n }\n values = float16Decode(quantizedArray as Uint16Array);\n } else {\n throw new Error(\n `Unsupported quantization type ${quantization.dtype} ` +\n `for weight type float32.`);\n }\n } else if (dtype === 'int32') {\n if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') {\n throw new Error(\n `Unsupported quantization type ${quantization.dtype} ` +\n `for weight type int32.`);\n }\n values = new Int32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = Math.round(v * quantization.scale + quantization.min);\n }\n } else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * quantizationSizeFactor;\n } else if (dtype === 'string') {\n const size = sizeFromShape(spec.shape);\n values = [];\n for (let i = 0; i < size; i++) {\n const byteLength = new Uint32Array(\n buffer.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0];\n offset += NUM_BYTES_STRING_LENGTH;\n const bytes = new Uint8Array(buffer.slice(offset, offset + byteLength));\n (values as Uint8Array[]).push(bytes);\n offset += byteLength;\n }\n } else {\n const dtypeFactor = DTYPE_VALUE_SIZE_MAP[dtype];\n const byteBuffer = buffer.slice(offset, offset + size * dtypeFactor);\n\n if (dtype === 'float32') {\n values = new Float32Array(byteBuffer);\n } else if (dtype === 'int32') {\n values = new Int32Array(byteBuffer);\n } else if (dtype === 'bool') {\n values = new Uint8Array(byteBuffer);\n } else if (dtype === 'complex64') {\n values = new Float32Array(byteBuffer);\n const real = new Float32Array(values.length / 2);\n const image = new Float32Array(values.length / 2);\n for (let i = 0; i < real.length; i++) {\n real[i] = values[i * 2];\n image[i] = values[i * 2 + 1];\n }\n const realTensor = tensor(real, shape, 'float32');\n const imageTensor = tensor(image, shape, 'float32');\n out[name] = complex(realTensor, imageTensor);\n realTensor.dispose();\n imageTensor.dispose();\n } else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * dtypeFactor;\n }\n if (dtype !== 'complex64') {\n out[name] = tensor(values, shape, dtype);\n }\n }\n return out;\n}\n\n/**\n * Concatenate TypedArrays into an ArrayBuffer.\n */\nexport function concatenateTypedArrays(xs: TypedArray[]): ArrayBuffer {\n // TODO(adarob, cais): Support quantization.\n if (xs === null) {\n throw new Error(`Invalid input value: ${JSON.stringify(xs)}`);\n }\n\n let totalByteLength = 0;\n\n // `normalizedXs` is here for this reason: a `TypedArray`'s `buffer'\n // can have a different byte length from that of the `TypedArray` itself,\n // for example, when the `TypedArray` is created from an offset in an\n // `ArrayBuffer`. `normliazedXs` holds `TypedArray`s whose `buffer`s match\n // the `TypedArray` in byte length. If an element of `xs` does not show\n // this property, a new `TypedArray` that satisfy this property will be\n // constructed and pushed into `normalizedXs`.\n const normalizedXs: TypedArray[] = [];\n xs.forEach((x: TypedArray) => {\n totalByteLength += x.byteLength;\n // tslint:disable:no-any\n normalizedXs.push(\n x.byteLength === x.buffer.byteLength ? x :\n new (x.constructor as any)(x));\n if (!(x as any instanceof Float32Array || x as any instanceof Int32Array ||\n x as any instanceof Uint8Array)) {\n throw new Error(`Unsupported TypedArray subtype: ${x.constructor.name}`);\n }\n // tslint:enable:no-any\n });\n\n const y = new Uint8Array(totalByteLength);\n let offset = 0;\n normalizedXs.forEach((x: TypedArray) => {\n y.set(new Uint8Array(x.buffer), offset);\n offset += x.byteLength;\n });\n\n return y.buffer;\n}\n\n// Use Buffer on Node.js instead of Blob/atob/btoa\nconst useNodeBuffer = typeof Buffer !== 'undefined' &&\n (typeof Blob === 'undefined' || typeof atob === 'undefined' ||\n typeof btoa === 'undefined');\n\n/**\n * Calculate the byte length of a JavaScript string.\n *\n * Note that a JavaScript string can contain wide characters, therefore the\n * length of the string is not necessarily equal to the byte length.\n *\n * @param str Input string.\n * @returns Byte length.\n */\nexport function stringByteLength(str: string): number {\n if (useNodeBuffer) {\n return Buffer.byteLength(str);\n }\n return new Blob([str]).size;\n}\n\n/**\n * Encode an ArrayBuffer as a base64 encoded string.\n *\n * @param buffer `ArrayBuffer` to be converted.\n * @returns A string that base64-encodes `buffer`.\n */\nexport function arrayBufferToBase64String(buffer: ArrayBuffer): string {\n if (useNodeBuffer) {\n return Buffer.from(buffer).toString('base64');\n }\n const buf = new Uint8Array(buffer);\n let s = '';\n for (let i = 0, l = buf.length; i < l; i++) {\n s += String.fromCharCode(buf[i]);\n }\n return btoa(s);\n}\n\n/**\n * Decode a base64 string as an ArrayBuffer.\n *\n * @param str Base64 string.\n * @returns Decoded `ArrayBuffer`.\n */\nexport function base64StringToArrayBuffer(str: string): ArrayBuffer {\n if (useNodeBuffer) {\n const buf = Buffer.from(str, 'base64');\n return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);\n }\n const s = atob(str);\n const buffer = new Uint8Array(s.length);\n for (let i = 0; i < s.length; ++i) {\n buffer.set([s.charCodeAt(i)], i);\n }\n return buffer.buffer;\n}\n\n/**\n * Concatenate a number of ArrayBuffers into one.\n *\n * @param buffers A number of array buffers to concatenate.\n * @returns Result of concatenating `buffers` in order.\n */\nexport function concatenateArrayBuffers(buffers: ArrayBuffer[]): ArrayBuffer {\n if (buffers.length === 1) {\n return buffers[0];\n }\n\n let totalByteLength = 0;\n buffers.forEach((buffer: ArrayBuffer) => {\n totalByteLength += buffer.byteLength;\n });\n\n const temp = new Uint8Array(totalByteLength);\n let offset = 0;\n buffers.forEach((buffer: ArrayBuffer) => {\n temp.set(new Uint8Array(buffer), offset);\n offset += buffer.byteLength;\n });\n return temp.buffer;\n}\n\n/**\n * Get the basename of a path.\n *\n * Behaves in a way analogous to Linux's basename command.\n *\n * @param path\n */\nexport function basename(path: string): string {\n const SEPARATOR = '/';\n path = path.trim();\n while (path.endsWith(SEPARATOR)) {\n path = path.slice(0, path.length - 1);\n }\n const items = path.split(SEPARATOR);\n return items[items.length - 1];\n}\n\n/**\n * Populate ModelArtifactsInfo fields for a model with JSON topology.\n * @param modelArtifacts\n * @returns A ModelArtifactsInfo object.\n */\nexport function getModelArtifactsInfoForJSON(modelArtifacts: ModelArtifacts):\n ModelArtifactsInfo {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('Expected JSON model topology, received ArrayBuffer.');\n }\n\n return {\n dateSaved: new Date(),\n modelTopologyType: 'JSON',\n modelTopologyBytes: modelArtifacts.modelTopology == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.modelTopology)),\n weightSpecsBytes: modelArtifacts.weightSpecs == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)),\n weightDataBytes: modelArtifacts.weightData == null ?\n 0 :\n modelArtifacts.weightData.byteLength,\n };\n}\n\n/**\n * Computes mantisa table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 2048 mantissa lookup values.\n */\nfunction computeFloat16MantisaTable(): Uint32Array {\n const convertMantissa = (i: number): number => {\n let m = i << 13;\n let e = 0;\n\n while ((m & 0x00800000) === 0) {\n e -= 0x00800000;\n m <<= 1;\n }\n m &= ~0x00800000;\n e += 0x38800000;\n\n return m | e;\n };\n\n const mantisaTable = new Uint32Array(2048);\n\n mantisaTable[0] = 0;\n for (let i = 1; i < 1024; i++) {\n mantisaTable[i] = convertMantissa(i);\n }\n for (let i = 1024; i < 2048; i++) {\n mantisaTable[i] = 0x38000000 + ((i - 1024) << 13);\n }\n\n return mantisaTable;\n}\n\n/**\n * Computes exponent table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 64 exponent lookup values.\n */\nfunction computeFloat16ExponentTable(): Uint32Array {\n const exponentTable = new Uint32Array(64);\n\n exponentTable[0] = 0;\n exponentTable[31] = 0x47800000;\n exponentTable[32] = 0x80000000;\n exponentTable[63] = 0xc7800000;\n for (let i = 1; i < 31; i++) {\n exponentTable[i] = i << 23;\n }\n for (let i = 33; i < 63; i++) {\n exponentTable[i] = 0x80000000 + ((i - 32) << 23);\n }\n\n return exponentTable;\n}\n\n/**\n * Computes offset table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 6d offset values.\n */\nfunction computeFloat16OffsetTable(): Uint32Array {\n const offsetTable = new Uint32Array(64);\n\n for (let i = 0; i < 64; i++) {\n offsetTable[i] = 1024;\n }\n offsetTable[0] = offsetTable[32] = 0;\n\n return offsetTable;\n}\n\n/**\n * Retrieve a Float16 decoder which will decode a ByteArray of Float16 values\n * to a Float32Array.\n *\n * @returns Function (buffer: Uint16Array) => Float32Array which decodes\n * the Uint16Array of Float16 bytes to a Float32Array.\n */\nexport function getFloat16Decoder(): (buffer: Uint16Array) => Float32Array {\n // Algorithm is based off of\n // http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n\n // Cache lookup tables\n const mantisaTable = computeFloat16MantisaTable();\n const exponentTable = computeFloat16ExponentTable();\n const offsetTable = computeFloat16OffsetTable();\n\n return (quantizedArray: Uint16Array) => {\n const buffer = new ArrayBuffer(4 * quantizedArray.length);\n const bufferUint32View = new Uint32Array(buffer);\n for (let index = 0; index < quantizedArray.length; index++) {\n const float16Bits = quantizedArray[index];\n const float32Bits =\n mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +\n exponentTable[float16Bits >> 10];\n bufferUint32View[index] = float32Bits;\n }\n return new Float32Array(buffer);\n };\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IOHandler, LoadOptions} from './types';\n\nexport type IORouter = (url: string|string[], loadOptions?: LoadOptions) =>\n IOHandler;\n\nexport class IORouterRegistry {\n // Singleton instance.\n private static instance: IORouterRegistry;\n\n private saveRouters: IORouter[];\n private loadRouters: IORouter[];\n\n private constructor() {\n this.saveRouters = [];\n this.loadRouters = [];\n }\n\n private static getInstance(): IORouterRegistry {\n if (IORouterRegistry.instance == null) {\n IORouterRegistry.instance = new IORouterRegistry();\n }\n return IORouterRegistry.instance;\n }\n\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerSaveRouter(saveRouter: IORouter) {\n IORouterRegistry.getInstance().saveRouters.push(saveRouter);\n }\n\n /**\n * Register a load-handler router.\n *\n * @param loadRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `load` method defined or `null`.\n */\n static registerLoadRouter(loadRouter: IORouter) {\n IORouterRegistry.getInstance().loadRouters.push(loadRouter);\n }\n\n /**\n * Look up IOHandler for saving, given a URL-like string.\n *\n * @param url\n * @returns If only one match is found, an instance of IOHandler with the\n * `save` method defined. If no match is found, `null`.\n * @throws Error, if more than one match is found.\n */\n static getSaveHandlers(url: string|string[]): IOHandler[] {\n return IORouterRegistry.getHandlers(url, 'save');\n }\n\n /**\n * Look up IOHandler for loading, given a URL-like string.\n *\n * @param url\n * @param loadOptions Optional, custom load options.\n * @returns All valid handlers for `url`, given the currently registered\n * handler routers.\n */\n static getLoadHandlers(url: string|string[], loadOptions?: LoadOptions):\n IOHandler[] {\n return IORouterRegistry.getHandlers(url, 'load', loadOptions);\n }\n\n private static getHandlers(\n url: string|string[], handlerType: 'save'|'load',\n loadOptions?: LoadOptions): IOHandler[] {\n const validHandlers: IOHandler[] = [];\n const routers = handlerType === 'load' ?\n IORouterRegistry.getInstance().loadRouters :\n IORouterRegistry.getInstance().saveRouters;\n routers.forEach(router => {\n const handler = router(url, loadOptions);\n if (handler !== null) {\n validHandlers.push(handler);\n }\n });\n return validHandlers;\n }\n}\n\nexport const registerSaveRouter = (loudRouter: IORouter) =>\n IORouterRegistry.registerSaveRouter(loudRouter);\nexport const registerLoadRouter = (loudRouter: IORouter) =>\n IORouterRegistry.registerLoadRouter(loudRouter);\nexport const getSaveHandlers = (url: string|string[]) =>\n IORouterRegistry.getSaveHandlers(url);\nexport const getLoadHandlers =\n (url: string|string[], loadOptions?: LoadOptions) =>\n IORouterRegistry.getLoadHandlers(url, loadOptions);\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport '../flags';\n\nimport {env} from '../environment';\n\nimport {getModelArtifactsInfoForJSON} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, ModelArtifacts, ModelArtifactsInfo, ModelStoreManager, SaveResult} from './types';\n\nconst DATABASE_NAME = 'tensorflowjs';\nconst DATABASE_VERSION = 1;\n\n// Model data and ModelArtifactsInfo (metadata) are stored in two separate\n// stores for efficient access of the list of stored models and their metadata.\n// 1. The object store for model data: topology, weights and weight manifests.\nconst MODEL_STORE_NAME = 'models_store';\n// 2. The object store for ModelArtifactsInfo, including meta-information such\n// as the type of topology (JSON vs binary), byte size of the topology, byte\n// size of the weights, etc.\nconst INFO_STORE_NAME = 'model_info_store';\n\n/**\n * Delete the entire database for tensorflow.js, including the models store.\n */\nexport async function deleteDatabase(): Promise {\n const idbFactory = getIndexedDBFactory();\n\n return new Promise((resolve, reject) => {\n const deleteRequest = idbFactory.deleteDatabase(DATABASE_NAME);\n deleteRequest.onsuccess = () => resolve();\n deleteRequest.onerror = error => reject(error);\n });\n}\n\nfunction getIndexedDBFactory(): IDBFactory {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Add more info about what IOHandler subtypes are available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error(\n 'Failed to obtain IndexedDB factory because the current environment' +\n 'is not a web browser.');\n }\n // tslint:disable-next-line:no-any\n const theWindow: any = typeof window === 'undefined' ? self : window;\n const factory = theWindow.indexedDB || theWindow.mozIndexedDB ||\n theWindow.webkitIndexedDB || theWindow.msIndexedDB ||\n theWindow.shimIndexedDB;\n if (factory == null) {\n throw new Error(\n 'The current browser does not appear to support IndexedDB.');\n }\n return factory;\n}\n\nfunction setUpDatabase(openRequest: IDBRequest) {\n const db = openRequest.result as IDBDatabase;\n db.createObjectStore(MODEL_STORE_NAME, {keyPath: 'modelPath'});\n db.createObjectStore(INFO_STORE_NAME, {keyPath: 'modelPath'});\n}\n\n/**\n * IOHandler subclass: Browser IndexedDB.\n *\n * See the doc string of `browserIndexedDB` for more details.\n */\nexport class BrowserIndexedDB implements IOHandler {\n protected readonly indexedDB: IDBFactory;\n protected readonly modelPath: string;\n\n static readonly URL_SCHEME = 'indexeddb://';\n\n constructor(modelPath: string) {\n this.indexedDB = getIndexedDBFactory();\n\n if (modelPath == null || !modelPath) {\n throw new Error(\n 'For IndexedDB, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n }\n\n async save(modelArtifacts: ModelArtifacts): Promise {\n // TODO(cais): Support saving GraphDef models.\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n\n return this.databaseAction(this.modelPath, modelArtifacts) as\n Promise;\n }\n\n async load(): Promise {\n return this.databaseAction(this.modelPath) as Promise;\n }\n\n /**\n * Perform database action to put model artifacts into or read model artifacts\n * from IndexedDB object store.\n *\n * Whether the action is put or get depends on whether `modelArtifacts` is\n * specified. If it is specified, the action will be put; otherwise the action\n * will be get.\n *\n * @param modelPath A unique string path for the model.\n * @param modelArtifacts If specified, it will be the model artifacts to be\n * stored in IndexedDB.\n * @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise`\n * of `ModelArtifacts`, if the action is get.\n */\n private databaseAction(modelPath: string, modelArtifacts?: ModelArtifacts):\n Promise {\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n\n if (modelArtifacts == null) {\n // Read model out from object store.\n const modelTx = db.transaction(MODEL_STORE_NAME, 'readonly');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const getRequest = modelStore.get(this.modelPath);\n getRequest.onsuccess = () => {\n if (getRequest.result == null) {\n db.close();\n return reject(new Error(\n `Cannot find model with path '${this.modelPath}' ` +\n `in IndexedDB.`));\n } else {\n resolve(getRequest.result.modelArtifacts);\n }\n };\n getRequest.onerror = error => {\n db.close();\n return reject(getRequest.error);\n };\n modelTx.oncomplete = () => db.close();\n } else {\n // Put model into object store.\n const modelArtifactsInfo: ModelArtifactsInfo =\n getModelArtifactsInfoForJSON(modelArtifacts);\n // First, put ModelArtifactsInfo into info store.\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n let infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const putInfoRequest =\n infoStore.put({modelPath: this.modelPath, modelArtifactsInfo});\n let modelTx: IDBTransaction;\n putInfoRequest.onsuccess = () => {\n // Second, put model data into model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const putModelRequest = modelStore.put({\n modelPath: this.modelPath,\n modelArtifacts,\n modelArtifactsInfo\n });\n putModelRequest.onsuccess = () => resolve({modelArtifactsInfo});\n putModelRequest.onerror = error => {\n // If the put-model request fails, roll back the info entry as\n // well.\n infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const deleteInfoRequest = infoStore.delete(this.modelPath);\n deleteInfoRequest.onsuccess = () => {\n db.close();\n return reject(putModelRequest.error);\n };\n deleteInfoRequest.onerror = error => {\n db.close();\n return reject(putModelRequest.error);\n };\n };\n };\n putInfoRequest.onerror = error => {\n db.close();\n return reject(putInfoRequest.error);\n };\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n } else {\n modelTx.oncomplete = () => db.close();\n }\n };\n }\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\n\nexport const indexedDBRouter: IORouter = (url: string|string[]) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n } else {\n if (!Array.isArray(url) && url.startsWith(BrowserIndexedDB.URL_SCHEME)) {\n return browserIndexedDB(url.slice(BrowserIndexedDB.URL_SCHEME.length));\n } else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(indexedDBRouter);\nIORouterRegistry.registerLoadRouter(indexedDBRouter);\n\n/**\n * Creates a browser IndexedDB IOHandler for saving and loading models.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save('indexeddb://MyModel'));\n * console.log(saveResult);\n * ```\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),\n * which can be used with, e.g., `tf.Model.save`.\n */\nexport function browserIndexedDB(modelPath: string): IOHandler {\n return new BrowserIndexedDB(modelPath);\n}\n\nfunction maybeStripScheme(key: string) {\n return key.startsWith(BrowserIndexedDB.URL_SCHEME) ?\n key.slice(BrowserIndexedDB.URL_SCHEME.length) :\n key;\n}\n\nexport class BrowserIndexedDBManager implements ModelStoreManager {\n private indexedDB: IDBFactory;\n\n constructor() {\n this.indexedDB = getIndexedDBFactory();\n }\n\n async listModels(): Promise<{[path: string]: ModelArtifactsInfo}> {\n return new Promise<{[path: string]: ModelArtifactsInfo}>(\n (resolve, reject) => {\n const openRequest =\n this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const tx = db.transaction(INFO_STORE_NAME, 'readonly');\n const store = tx.objectStore(INFO_STORE_NAME);\n // tslint:disable:max-line-length\n // Need to cast `store` as `any` here because TypeScript's DOM\n // library does not have the `getAll()` method even though the\n // method is supported in the latest version of most mainstream\n // browsers:\n // https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll\n // tslint:enable:max-line-length\n // tslint:disable-next-line:no-any\n const getAllInfoRequest = (store as any).getAll() as IDBRequest;\n getAllInfoRequest.onsuccess = () => {\n const out: {[path: string]: ModelArtifactsInfo} = {};\n for (const item of getAllInfoRequest.result) {\n out[item.modelPath] = item.modelArtifactsInfo;\n }\n resolve(out);\n };\n getAllInfoRequest.onerror = error => {\n db.close();\n return reject(getAllInfoRequest.error);\n };\n tx.oncomplete = () => db.close();\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n\n async removeModel(path: string): Promise {\n path = maybeStripScheme(path);\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n const infoStore = infoTx.objectStore(INFO_STORE_NAME);\n\n const getInfoRequest = infoStore.get(path);\n let modelTx: IDBTransaction;\n getInfoRequest.onsuccess = () => {\n if (getInfoRequest.result == null) {\n db.close();\n return reject(new Error(\n `Cannot find model with path '${path}' ` +\n `in IndexedDB.`));\n } else {\n // First, delete the entry in the info store.\n const deleteInfoRequest = infoStore.delete(path);\n const deleteModelData = () => {\n // Second, delete the entry in the model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const deleteModelRequest = modelStore.delete(path);\n deleteModelRequest.onsuccess = () =>\n resolve(getInfoRequest.result.modelArtifactsInfo);\n deleteModelRequest.onerror = error =>\n reject(getInfoRequest.error);\n };\n // Proceed with deleting model data regardless of whether deletion\n // of info data succeeds or not.\n deleteInfoRequest.onsuccess = deleteModelData;\n deleteInfoRequest.onerror = error => {\n deleteModelData();\n db.close();\n return reject(getInfoRequest.error);\n };\n }\n };\n getInfoRequest.onerror = error => {\n db.close();\n return reject(getInfoRequest.error);\n };\n\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n } else {\n modelTx.oncomplete = () => db.close();\n }\n };\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport '../flags';\nimport {env} from '../environment';\n\nimport {assert} from '../util';\nimport {arrayBufferToBase64String, base64StringToArrayBuffer, getModelArtifactsInfoForJSON} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, ModelArtifacts, ModelArtifactsInfo, ModelStoreManager, SaveResult} from './types';\n\nconst PATH_SEPARATOR = '/';\nconst PATH_PREFIX = 'tensorflowjs_models';\nconst INFO_SUFFIX = 'info';\nconst MODEL_TOPOLOGY_SUFFIX = 'model_topology';\nconst WEIGHT_SPECS_SUFFIX = 'weight_specs';\nconst WEIGHT_DATA_SUFFIX = 'weight_data';\nconst MODEL_METADATA_SUFFIX = 'model_metadata';\n\n/**\n * Purge all tensorflow.js-saved model artifacts from local storage.\n *\n * @returns Paths of the models purged.\n */\nexport function purgeLocalStorageArtifacts(): string[] {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n throw new Error(\n 'purgeLocalStorageModels() cannot proceed because local storage is ' +\n 'unavailable in the current environment.');\n }\n const LS = window.localStorage;\n const purgedModelPaths: string[] = [];\n for (let i = 0; i < LS.length; ++i) {\n const key = LS.key(i);\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n if (key.startsWith(prefix) && key.length > prefix.length) {\n LS.removeItem(key);\n const modelName = getModelPathFromKey(key);\n if (purgedModelPaths.indexOf(modelName) === -1) {\n purgedModelPaths.push(modelName);\n }\n }\n }\n return purgedModelPaths;\n}\n\nfunction getModelKeys(path: string): {\n info: string,\n topology: string,\n weightSpecs: string,\n weightData: string,\n modelMetadata: string\n} {\n return {\n info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR),\n topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR),\n weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR),\n weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR),\n modelMetadata:\n [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR)\n };\n}\n\n/**\n * Get model path from a local-storage key.\n *\n * E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1'\n *\n * @param key\n */\nfunction getModelPathFromKey(key: string) {\n const items = key.split(PATH_SEPARATOR);\n if (items.length < 3) {\n throw new Error(`Invalid key format: ${key}`);\n }\n return items.slice(1, items.length - 1).join(PATH_SEPARATOR);\n}\n\nfunction maybeStripScheme(key: string) {\n return key.startsWith(BrowserLocalStorage.URL_SCHEME) ?\n key.slice(BrowserLocalStorage.URL_SCHEME.length) :\n key;\n}\n\ndeclare type LocalStorageKeys = {\n info: string,\n topology: string,\n weightSpecs: string,\n weightData: string,\n modelMetadata: string\n};\n\n/**\n * IOHandler subclass: Browser Local Storage.\n *\n * See the doc string to `browserLocalStorage` for more details.\n */\nexport class BrowserLocalStorage implements IOHandler {\n protected readonly LS: Storage;\n protected readonly modelPath: string;\n protected readonly keys: LocalStorageKeys;\n\n static readonly URL_SCHEME = 'localstorage://';\n\n constructor(modelPath: string) {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n // TODO(cais): Add more info about what IOHandler subtypes are\n // available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error(\n 'The current environment does not support local storage.');\n }\n this.LS = window.localStorage;\n\n if (modelPath == null || !modelPath) {\n throw new Error(\n 'For local storage, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n this.keys = getModelKeys(this.modelPath);\n }\n\n /**\n * Save model artifacts to browser local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @param modelArtifacts The model artifacts to be stored.\n * @returns An instance of SaveResult.\n */\n async save(modelArtifacts: ModelArtifacts): Promise {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n } else {\n const topology = JSON.stringify(modelArtifacts.modelTopology);\n const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs);\n\n const modelArtifactsInfo: ModelArtifactsInfo =\n getModelArtifactsInfoForJSON(modelArtifacts);\n\n try {\n this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo));\n this.LS.setItem(this.keys.topology, topology);\n this.LS.setItem(this.keys.weightSpecs, weightSpecs);\n this.LS.setItem(\n this.keys.weightData,\n arrayBufferToBase64String(modelArtifacts.weightData));\n const result: ModelArtifacts = {\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy\n };\n if (modelArtifacts.signature != null) {\n result.signature = modelArtifacts.signature;\n }\n if (modelArtifacts.userDefinedMetadata != null) {\n result.userDefinedMetadata = modelArtifacts.userDefinedMetadata;\n }\n if (modelArtifacts.modelInitializer != null) {\n result.modelInitializer = modelArtifacts.modelInitializer;\n }\n this.LS.setItem(this.keys.modelMetadata, JSON.stringify(result));\n\n return {modelArtifactsInfo};\n } catch (err) {\n // If saving failed, clean up all items saved so far.\n this.LS.removeItem(this.keys.info);\n this.LS.removeItem(this.keys.topology);\n this.LS.removeItem(this.keys.weightSpecs);\n this.LS.removeItem(this.keys.weightData);\n this.LS.removeItem(this.keys.modelMetadata);\n\n throw new Error(\n `Failed to save model '${this.modelPath}' to local storage: ` +\n `size quota being exceeded is a possible cause of this failure: ` +\n `modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` +\n `weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` +\n `weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`);\n }\n }\n }\n\n /**\n * Load a model from local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @returns The loaded model (if loading succeeds).\n */\n async load(): Promise {\n const info =\n JSON.parse(this.LS.getItem(this.keys.info)) as ModelArtifactsInfo;\n if (info == null) {\n throw new Error(\n `In local storage, there is no model with name '${this.modelPath}'`);\n }\n\n if (info.modelTopologyType !== 'JSON') {\n throw new Error(\n 'BrowserLocalStorage does not support loading non-JSON model ' +\n 'topology yet.');\n }\n\n const out: ModelArtifacts = {};\n\n // Load topology.\n const topology = JSON.parse(this.LS.getItem(this.keys.topology));\n if (topology == null) {\n throw new Error(\n `In local storage, the topology of model '${this.modelPath}' ` +\n `is missing.`);\n }\n out.modelTopology = topology;\n\n // Load weight specs.\n const weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs));\n if (weightSpecs == null) {\n throw new Error(\n `In local storage, the weight specs of model '${this.modelPath}' ` +\n `are missing.`);\n }\n out.weightSpecs = weightSpecs;\n\n // Load meta-data fields.\n const metadataString = this.LS.getItem(this.keys.modelMetadata);\n if (metadataString != null) {\n const metadata = JSON.parse(metadataString) as ModelArtifacts;\n out.format = metadata['format'];\n out.generatedBy = metadata['generatedBy'];\n out.convertedBy = metadata['convertedBy'];\n if (metadata['signature'] != null) {\n out.signature = metadata['signature'];\n }\n if (metadata['userDefinedMetadata'] != null) {\n out.userDefinedMetadata = metadata['userDefinedMetadata'];\n }\n if (metadata['modelInitializer'] != null) {\n out.modelInitializer = metadata['modelInitializer'];\n }\n }\n\n // Load weight data.\n const weightDataBase64 = this.LS.getItem(this.keys.weightData);\n if (weightDataBase64 == null) {\n throw new Error(\n `In local storage, the binary weight values of model ` +\n `'${this.modelPath}' are missing.`);\n }\n out.weightData = base64StringToArrayBuffer(weightDataBase64);\n\n return out;\n }\n}\n\nexport const localStorageRouter: IORouter = (url: string|string[]) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n } else {\n if (!Array.isArray(url) && url.startsWith(BrowserLocalStorage.URL_SCHEME)) {\n return browserLocalStorage(\n url.slice(BrowserLocalStorage.URL_SCHEME.length));\n } else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(localStorageRouter);\nIORouterRegistry.registerLoadRouter(localStorageRouter);\n\n/**\n * Factory function for local storage IOHandler.\n *\n * This `IOHandler` supports both `save` and `load`.\n *\n * For each model's saved artifacts, four items are saved to local storage.\n * - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the\n * model, such as date saved, type of the topology, size in bytes, etc.\n * - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras-\n * style models, this is a stringized JSON.\n * - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the\n * model, can be used to decode the saved binary weight values (see\n * item below).\n * - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary\n * weight values, stored as a base64-encoded string.\n *\n * Saving may throw an `Error` if the total size of the artifacts exceed the\n * browser-specific quota.\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `IOHandler`, which can be used with, e.g.,\n * `tf.Model.save`.\n */\nexport function browserLocalStorage(modelPath: string): IOHandler {\n return new BrowserLocalStorage(modelPath);\n}\n\nexport class BrowserLocalStorageManager implements ModelStoreManager {\n private readonly LS: Storage;\n\n constructor() {\n assert(\n env().getBool('IS_BROWSER'),\n () => 'Current environment is not a web browser');\n assert(\n typeof window === 'undefined' ||\n typeof window.localStorage !== 'undefined',\n () => 'Current browser does not appear to support localStorage');\n this.LS = window.localStorage;\n }\n\n async listModels(): Promise<{[path: string]: ModelArtifactsInfo}> {\n const out: {[path: string]: ModelArtifactsInfo} = {};\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n const suffix = PATH_SEPARATOR + INFO_SUFFIX;\n for (let i = 0; i < this.LS.length; ++i) {\n const key = this.LS.key(i);\n if (key.startsWith(prefix) && key.endsWith(suffix)) {\n const modelPath = getModelPathFromKey(key);\n out[modelPath] = JSON.parse(this.LS.getItem(key)) as ModelArtifactsInfo;\n }\n }\n return out;\n }\n\n async removeModel(path: string): Promise {\n path = maybeStripScheme(path);\n const keys = getModelKeys(path);\n if (this.LS.getItem(keys.info) == null) {\n throw new Error(`Cannot find model at path '${path}'`);\n }\n const info = JSON.parse(this.LS.getItem(keys.info)) as ModelArtifactsInfo;\n\n this.LS.removeItem(keys.info);\n this.LS.removeItem(keys.topology);\n this.LS.removeItem(keys.weightSpecs);\n this.LS.removeItem(keys.weightData);\n return info;\n }\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Classes and functions for model management across multiple storage mediums.\n *\n * Supported client actions:\n * - Listing models on all registered storage mediums.\n * - Remove model by URL from any registered storage mediums, by using URL\n * string.\n * - Moving or copying model from one path to another in the same medium or from\n * one medium to another, by using URL strings.\n */\n\nimport {assert} from '../util';\n\nimport {IORouterRegistry} from './router_registry';\nimport {ModelArtifactsInfo, ModelStoreManager} from './types';\n\nconst URL_SCHEME_SUFFIX = '://';\n\nexport class ModelStoreManagerRegistry {\n // Singleton instance.\n private static instance: ModelStoreManagerRegistry;\n\n private managers: {[scheme: string]: ModelStoreManager};\n\n private constructor() {\n this.managers = {};\n }\n\n private static getInstance(): ModelStoreManagerRegistry {\n if (ModelStoreManagerRegistry.instance == null) {\n ModelStoreManagerRegistry.instance = new ModelStoreManagerRegistry();\n }\n return ModelStoreManagerRegistry.instance;\n }\n\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerManager(scheme: string, manager: ModelStoreManager) {\n assert(scheme != null, () => 'scheme must not be undefined or null.');\n if (scheme.endsWith(URL_SCHEME_SUFFIX)) {\n scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX));\n }\n assert(scheme.length > 0, () => 'scheme must not be an empty string.');\n const registry = ModelStoreManagerRegistry.getInstance();\n assert(\n registry.managers[scheme] == null,\n () => `A model store manager is already registered for scheme '${\n scheme}'.`);\n registry.managers[scheme] = manager;\n }\n\n static getManager(scheme: string): ModelStoreManager {\n const manager = this.getInstance().managers[scheme];\n if (manager == null) {\n throw new Error(`Cannot find model manager for scheme '${scheme}'`);\n }\n return manager;\n }\n\n static getSchemes(): string[] {\n return Object.keys(this.getInstance().managers);\n }\n}\n\n/**\n * Helper method for parsing a URL string into a scheme and a path.\n *\n * @param url E.g., 'localstorage://my-model'\n * @returns A dictionary with two fields: scheme and path.\n * Scheme: e.g., 'localstorage' in the example above.\n * Path: e.g., 'my-model' in the example above.\n */\nfunction parseURL(url: string): {scheme: string, path: string} {\n if (url.indexOf(URL_SCHEME_SUFFIX) === -1) {\n throw new Error(\n `The url string provided does not contain a scheme. ` +\n `Supported schemes are: ` +\n `${ModelStoreManagerRegistry.getSchemes().join(',')}`);\n }\n return {\n scheme: url.split(URL_SCHEME_SUFFIX)[0],\n path: url.split(URL_SCHEME_SUFFIX)[1],\n };\n}\n\nasync function cloneModelInternal(\n sourceURL: string, destURL: string,\n deleteSource = false): Promise {\n assert(\n sourceURL !== destURL,\n () => `Old path and new path are the same: '${sourceURL}'`);\n\n const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL);\n assert(\n loadHandlers.length > 0,\n () => `Copying failed because no load handler is found for source URL ${\n sourceURL}.`);\n assert(\n loadHandlers.length < 2,\n () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `load handlers for source URL ${sourceURL}.`);\n const loadHandler = loadHandlers[0];\n\n const saveHandlers = IORouterRegistry.getSaveHandlers(destURL);\n assert(\n saveHandlers.length > 0,\n () => `Copying failed because no save handler is found for destination ` +\n `URL ${destURL}.`);\n assert(\n saveHandlers.length < 2,\n () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `save handlers for destination URL ${destURL}.`);\n const saveHandler = saveHandlers[0];\n\n const sourceScheme = parseURL(sourceURL).scheme;\n const sourcePath = parseURL(sourceURL).path;\n const sameMedium = sourceScheme === parseURL(sourceURL).scheme;\n\n const modelArtifacts = await loadHandler.load();\n\n // If moving within the same storage medium, remove the old model as soon as\n // the loading is done. Without doing this, it is possible that the combined\n // size of the two models will cause the cloning to fail.\n if (deleteSource && sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n\n const saveResult = await saveHandler.save(modelArtifacts);\n\n // If moving between mediums, the deletion is done after the save succeeds.\n // This guards against the case in which saving to the destination medium\n // fails.\n if (deleteSource && !sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n\n return saveResult.modelArtifactsInfo;\n}\n\n/**\n * List all models stored in registered storage mediums.\n *\n * For a web browser environment, the registered mediums are Local Storage and\n * IndexedDB.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @returns A `Promise` of a dictionary mapping URLs of existing models to\n * their model artifacts info. URLs include medium-specific schemes, e.g.,\n * 'indexeddb://my/model/1'. Model artifacts info include type of the\n * model's topology, byte sizes of the topology, weights, etc.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function listModels(): Promise<{[url: string]: ModelArtifactsInfo}> {\n const schemes = ModelStoreManagerRegistry.getSchemes();\n const out: {[url: string]: ModelArtifactsInfo} = {};\n for (const scheme of schemes) {\n const schemeOut =\n await ModelStoreManagerRegistry.getManager(scheme).listModels();\n for (const path in schemeOut) {\n const url = scheme + URL_SCHEME_SUFFIX + path;\n out[url] = schemeOut[path];\n }\n }\n return out;\n}\n\n/**\n * Remove a model specified by URL from a reigstered storage medium.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @param url A URL to a stored model, with a scheme prefix, e.g.,\n * 'localstorage://my-model-1', 'indexeddb://my/model/2'.\n * @returns ModelArtifactsInfo of the deleted model (if and only if deletion\n * is successful).\n * @throws Error if deletion fails, e.g., if no model exists at `path`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function removeModel(url: string): Promise {\n const schemeAndPath = parseURL(url);\n const manager = ModelStoreManagerRegistry.getManager(schemeAndPath.scheme);\n return manager.removeModel(schemeAndPath.path);\n}\n\n/**\n * Copy a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Copying within a storage medium, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Copying between two storage mediums, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Copy the model, from Local Storage to IndexedDB.\n * await tf.io.copyModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove both models.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of copying.\n * @param destURL Destination URL of copying.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function copyModel(\n sourceURL: string, destURL: string): Promise {\n const deleteSource = false;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\n\n/**\n * Move a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Moving within a storage medium, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Moving between two storage mediums, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Move the model, from Local Storage to IndexedDB.\n * await tf.io.moveModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove the moved model.\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of moving.\n * @param destURL Destination URL of moving.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function moveModel(\n sourceURL: string, destURL: string): Promise {\n const deleteSource = true;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\n\nexport {moveModel, copyModel, removeModel, listModels};\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport '../flags';\n\nimport {env} from '../environment';\nimport {BrowserIndexedDB, BrowserIndexedDBManager} from '../io/indexed_db';\nimport {BrowserLocalStorage, BrowserLocalStorageManager} from '../io/local_storage';\nimport {ModelStoreManagerRegistry} from '../io/model_management';\n\nimport {Platform} from './platform';\n\nexport class PlatformBrowser implements Platform {\n // According to the spec, the built-in encoder can do only UTF-8 encoding.\n // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder\n private textEncoder: TextEncoder;\n\n fetch(path: string, init?: RequestInit): Promise {\n return fetch(path, init);\n }\n\n now(): number {\n return performance.now();\n }\n\n encode(text: string, encoding: string): Uint8Array {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(\n `Browser's encoder only supports utf-8, but got ${encoding}`);\n }\n if (this.textEncoder == null) {\n this.textEncoder = new TextEncoder();\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes: Uint8Array, encoding: string): string {\n return new TextDecoder(encoding).decode(bytes);\n }\n}\n\nif (env().get('IS_BROWSER')) {\n env().setPlatform('browser', new PlatformBrowser());\n\n // Register LocalStorage IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(\n BrowserLocalStorage.URL_SCHEME, new BrowserLocalStorageManager());\n } catch (err) {\n }\n\n // Register IndexedDB IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(\n BrowserIndexedDB.URL_SCHEME, new BrowserIndexedDBManager());\n } catch (err) {\n }\n}\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {env} from '../environment';\n\nimport {Platform} from './platform';\n\n// We are wrapping this within an object so it can be stubbed by Jasmine.\nexport const getNodeFetch = {\n // tslint:disable-next-line:no-require-imports\n importFetch: () => require('node-fetch')\n};\n\ntype FetchFn = (url: string, init?: RequestInit) => Promise;\nlet systemFetch: FetchFn;\n// These getters and setters are for testing so we don't export a mutable\n// variable.\nexport function resetSystemFetch() {\n systemFetch = null;\n}\nexport function setSystemFetch(fetchFn: FetchFn) {\n systemFetch = fetchFn;\n}\nexport function getSystemFetch(): FetchFn {\n return systemFetch;\n}\n\nexport class PlatformNode implements Platform {\n private textEncoder: TextEncoder;\n // tslint:disable-next-line:no-any\n util: any;\n\n constructor() {\n // tslint:disable-next-line:no-require-imports\n this.util = require('util');\n // According to the spec, the built-in encoder can do only UTF-8 encoding.\n // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder\n this.textEncoder = new this.util.TextEncoder();\n }\n\n fetch(path: string, requestInits?: RequestInit): Promise {\n if (env().global.fetch != null) {\n return env().global.fetch(path, requestInits);\n }\n\n if (systemFetch == null) {\n systemFetch = getNodeFetch.importFetch();\n }\n return systemFetch(path, requestInits);\n }\n\n now(): number {\n const time = process.hrtime();\n return time[0] * 1000 + time[1] / 1000000;\n }\n\n encode(text: string, encoding: string): Uint8Array {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(\n `Node built-in encoder only supports utf-8, but got ${encoding}`);\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes: Uint8Array, encoding: string): string {\n if (bytes.length === 0) {\n return '';\n }\n return new this.util.TextDecoder(encoding).decode(bytes);\n }\n}\n\nif (env().get('IS_NODE')) {\n env().setPlatform('node', new PlatformNode());\n}\n", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TensorBuffer} from '../tensor';\nimport {DataType, DataTypeMap, Rank, ShapeMap} from '../types';\nimport * as util from '../util';\n\n/**\n * Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.\n *\n * The values are stored in CPU as `TypedArray`. Fill the buffer using\n * `buffer.set()`, or by modifying directly `buffer.values`.\n *\n * When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with\n * those values.\n *\n * ```js\n * // Create a buffer and set values at particular indices.\n * const buffer = tf.buffer([2, 2]);\n * buffer.set(3, 0, 0);\n * buffer.set(5, 1, 0);\n *\n * // Convert the buffer back to a tensor.\n * buffer.toTensor().print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param dtype The dtype of the buffer. Defaults to 'float32'.\n * @param values The values of the buffer as `TypedArray`. Defaults to\n * zeros.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function buffer(\n shape: ShapeMap[R], dtype: D = 'float32' as D,\n values?: DataTypeMap[D]): TensorBuffer {\n dtype = dtype || 'float32' as D;\n util.assertNonNegativeIntegerDimensions(shape);\n return new TensorBuffer(shape, dtype, values);\n}\n", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Cast, CastAttrs, CastInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {DataType, TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Casts a `tf.Tensor` to a new dtype.\n *\n * ```js\n * const x = tf.tensor1d([1.5, 2.5, 3]);\n * tf.cast(x, 'int32').print();\n * ```\n * @param x The input tensor to be casted.\n * @param dtype The dtype to cast the input tensor to.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction cast_(x: T|TensorLike, dtype: DataType): T {\n const $x = convertToTensor(x, 'x', 'cast');\n\n // Sanity checks.\n if (!util.isValidDtype(dtype)) {\n throw new Error(`Failed to cast to unknown dtype ${dtype}`);\n }\n if (dtype === 'string' && $x.dtype !== 'string' ||\n dtype !== 'string' && $x.dtype === 'string') {\n throw new Error('Only strings can be casted to strings');\n }\n\n const inputs: CastInputs = {x: $x};\n const attrs: CastAttrs = {dtype};\n\n return ENGINE.runKernel(\n Cast, inputs as {} as NamedTensorMap, attrs as {} as NamedAttrMap);\n}\n\nexport const cast = op({cast_});\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Identity, IdentityInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Creates a new tensor with the same values and shape as the specified\n * tensor.\n *\n * ```js\n * const x = tf.tensor([1, 2]);\n *\n * x.clone().print();\n * ```\n *\n * @param x The tensor to clone.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction clone_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'clone', 'string_or_numeric');\n const inputs: IdentityInputs = {x: $x};\n\n // Note this op is called tf.identity in python. Hence the kernel name used\n // here.\n return ENGINE.runKernel(Identity, inputs as {} as NamedTensorMap);\n}\n\nexport const clone = op({clone_});\n", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\n\n/**\n * Prints information about the `tf.Tensor` including its data.\n *\n * ```js\n * const verbose = true;\n * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);\n * ```\n * @param x The tensor to be printed.\n * @param verbose Whether to print verbose information about the ` Tensor`,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function print(x: T, verbose = false): void {\n console.log(x.toString(verbose));\n}\n", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Required side effectful code for tfjs-core\n\n// Set up Engine and ENV\nimport {getOrMakeEngine} from './engine';\ngetOrMakeEngine();\n\n// Register backend-agnostic flags.\nimport './flags';\n// Register platforms\nimport './platforms/platform_browser';\nimport './platforms/platform_node';\n\n// Set up OpHandler\nimport {buffer} from './ops/buffer';\nimport {cast} from './ops/cast';\nimport {clone} from './ops/clone';\nimport {print} from './ops/print';\nimport {OpHandler, setOpHandler} from './tensor';\nconst opHandler: OpHandler = {\n buffer,\n cast,\n clone,\n print\n};\nsetOpHandler(opHandler);\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Importing local_storage and indexed_db is necessary for the routers to be\n// registered.\nimport './indexed_db';\nimport './local_storage';\n\nimport {browserFiles} from './browser_files';\nimport {browserHTTPRequest, http, isHTTPScheme} from './http';\nimport {concatenateArrayBuffers, decodeWeights, encodeWeights, getModelArtifactsInfoForJSON} from './io_utils';\nimport {fromMemory, withSaveHandler} from './passthrough';\nimport {getLoadHandlers, getSaveHandlers, registerLoadRouter, registerSaveRouter} from './router_registry';\nimport {IOHandler, LoadHandler, LoadOptions, ModelArtifacts, ModelArtifactsInfo, ModelJSON, ModelStoreManager, OnProgressCallback, RequestDetails, SaveConfig, SaveHandler, SaveResult, WeightGroup, WeightsManifestConfig, WeightsManifestEntry} from './types';\nimport {loadWeights, weightsLoaderFactory} from './weights_loader';\n\nexport {copyModel, listModels, moveModel, removeModel} from './model_management';\nexport {\n browserFiles,\n browserHTTPRequest,\n concatenateArrayBuffers,\n decodeWeights,\n encodeWeights,\n fromMemory,\n getLoadHandlers,\n getModelArtifactsInfoForJSON,\n getSaveHandlers,\n http,\n IOHandler,\n isHTTPScheme,\n LoadHandler,\n LoadOptions,\n loadWeights,\n ModelArtifacts,\n ModelArtifactsInfo,\n ModelJSON,\n ModelStoreManager,\n OnProgressCallback,\n registerLoadRouter,\n registerSaveRouter,\n RequestDetails,\n SaveConfig,\n SaveHandler,\n SaveResult,\n WeightGroup,\n weightsLoaderFactory,\n WeightsManifestConfig,\n WeightsManifestEntry,\n withSaveHandler\n};\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * IOHandlers related to files, such as browser-triggered file downloads,\n * user-selected files in browser.\n */\n\nimport '../flags';\nimport {env} from '../environment';\n\nimport {basename, concatenateArrayBuffers, getModelArtifactsInfoForJSON} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, ModelArtifacts, ModelJSON, SaveResult, WeightsManifestConfig, WeightsManifestEntry} from './types';\n\nconst DEFAULT_FILE_NAME_PREFIX = 'model';\nconst DEFAULT_JSON_EXTENSION_NAME = '.json';\nconst DEFAULT_WEIGHT_DATA_EXTENSION_NAME = '.weights.bin';\n\nfunction defer(f: () => T): Promise {\n return new Promise(resolve => setTimeout(resolve)).then(f);\n}\n\nexport class BrowserDownloads implements IOHandler {\n private readonly modelTopologyFileName: string;\n private readonly weightDataFileName: string;\n private readonly jsonAnchor: HTMLAnchorElement;\n private readonly weightDataAnchor: HTMLAnchorElement;\n\n static readonly URL_SCHEME = 'downloads://';\n\n constructor(fileNamePrefix?: string) {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Provide info on what IOHandlers are available under the\n // current environment.\n throw new Error(\n 'browserDownloads() cannot proceed because the current environment ' +\n 'is not a browser.');\n }\n\n if (fileNamePrefix.startsWith(BrowserDownloads.URL_SCHEME)) {\n fileNamePrefix = fileNamePrefix.slice(BrowserDownloads.URL_SCHEME.length);\n }\n if (fileNamePrefix == null || fileNamePrefix.length === 0) {\n fileNamePrefix = DEFAULT_FILE_NAME_PREFIX;\n }\n\n this.modelTopologyFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME;\n this.weightDataFileName =\n fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME;\n }\n\n async save(modelArtifacts: ModelArtifacts): Promise {\n if (typeof (document) === 'undefined') {\n throw new Error(\n 'Browser downloads are not supported in ' +\n 'this environment since `document` is not present');\n }\n const weightsURL = window.URL.createObjectURL(new Blob(\n [modelArtifacts.weightData], {type: 'application/octet-stream'}));\n\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserDownloads.save() does not support saving model topology ' +\n 'in binary formats yet.');\n } else {\n const weightsManifest: WeightsManifestConfig = [{\n paths: ['./' + this.weightDataFileName],\n weights: modelArtifacts.weightSpecs\n }];\n const modelTopologyAndWeightManifest: ModelJSON = {\n modelTopology: modelArtifacts.modelTopology,\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n weightsManifest\n };\n if (modelArtifacts.signature != null) {\n modelTopologyAndWeightManifest.signature = modelArtifacts.signature;\n }\n if (modelArtifacts.userDefinedMetadata != null) {\n modelTopologyAndWeightManifest.userDefinedMetadata =\n modelArtifacts.userDefinedMetadata;\n }\n if (modelArtifacts.modelInitializer != null) {\n modelTopologyAndWeightManifest.modelInitializer =\n modelArtifacts.modelInitializer;\n }\n const modelTopologyAndWeightManifestURL =\n window.URL.createObjectURL(new Blob(\n [JSON.stringify(modelTopologyAndWeightManifest)],\n {type: 'application/json'}));\n\n // If anchor elements are not provided, create them without attaching them\n // to parents, so that the downloaded file names can be controlled.\n const jsonAnchor = this.jsonAnchor == null ? document.createElement('a') :\n this.jsonAnchor;\n jsonAnchor.download = this.modelTopologyFileName;\n jsonAnchor.href = modelTopologyAndWeightManifestURL;\n // Trigger downloads by evoking a click event on the download anchors.\n // When multiple downloads are started synchronously, Firefox will only\n // save the last one.\n await defer(() => jsonAnchor.dispatchEvent(new MouseEvent('click')));\n\n if (modelArtifacts.weightData != null) {\n const weightDataAnchor = this.weightDataAnchor == null ?\n document.createElement('a') :\n this.weightDataAnchor;\n weightDataAnchor.download = this.weightDataFileName;\n weightDataAnchor.href = weightsURL;\n await defer(\n () => weightDataAnchor.dispatchEvent(new MouseEvent('click')));\n }\n\n return {modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts)};\n }\n }\n}\n\nclass BrowserFiles implements IOHandler {\n private readonly files: File[];\n\n constructor(files: File[]) {\n if (files == null || files.length < 1) {\n throw new Error(\n `When calling browserFiles, at least 1 file is required, ` +\n `but received ${files}`);\n }\n this.files = files;\n }\n\n async load(): Promise {\n const jsonFile = this.files[0];\n const weightFiles = this.files.slice(1);\n\n return new Promise((resolve, reject) => {\n const jsonReader = new FileReader();\n jsonReader.onload = (event: Event) => {\n // tslint:disable-next-line:no-any\n const modelJSON = JSON.parse((event.target as any).result) as ModelJSON;\n const modelTopology = modelJSON.modelTopology;\n if (modelTopology == null) {\n reject(new Error(\n `modelTopology field is missing from file ${jsonFile.name}`));\n return;\n }\n\n if (weightFiles.length === 0) {\n resolve({modelTopology});\n }\n\n const weightsManifest = modelJSON.weightsManifest;\n if (weightsManifest == null) {\n reject(new Error(\n `weightManifest field is missing from file ${jsonFile.name}`));\n return;\n }\n\n let pathToFile: {[path: string]: File};\n try {\n pathToFile =\n this.checkManifestAndWeightFiles(weightsManifest, weightFiles);\n } catch (err) {\n reject(err);\n return;\n }\n\n const weightSpecs: WeightsManifestEntry[] = [];\n const paths: string[] = [];\n const perFileBuffers: ArrayBuffer[] = [];\n weightsManifest.forEach(weightsGroup => {\n weightsGroup.paths.forEach(path => {\n paths.push(path);\n perFileBuffers.push(null);\n });\n weightSpecs.push(...weightsGroup.weights);\n });\n\n weightsManifest.forEach(weightsGroup => {\n weightsGroup.paths.forEach(path => {\n const weightFileReader = new FileReader();\n weightFileReader.onload = (event: Event) => {\n // tslint:disable-next-line:no-any\n const weightData = (event.target as any).result as ArrayBuffer;\n const index = paths.indexOf(path);\n perFileBuffers[index] = weightData;\n if (perFileBuffers.indexOf(null) === -1) {\n const result: ModelArtifacts = {\n modelTopology,\n weightSpecs,\n weightData: concatenateArrayBuffers(perFileBuffers),\n format: modelJSON.format,\n generatedBy: modelJSON.generatedBy,\n convertedBy: modelJSON.convertedBy\n };\n if (modelJSON.signature != null) {\n result.signature = modelJSON.signature;\n }\n if (modelJSON.userDefinedMetadata != null) {\n result.userDefinedMetadata = modelJSON.userDefinedMetadata;\n }\n if (modelJSON.modelInitializer != null) {\n result.modelInitializer = modelJSON.modelInitializer;\n }\n resolve(result);\n }\n };\n weightFileReader.onerror = error =>\n reject(`Failed to weights data from file of path '${path}'.`);\n weightFileReader.readAsArrayBuffer(pathToFile[path]);\n });\n });\n };\n jsonReader.onerror = error => reject(\n `Failed to read model topology and weights manifest JSON ` +\n `from file '${jsonFile.name}'. BrowserFiles supports loading ` +\n `Keras-style tf.Model artifacts only.`);\n jsonReader.readAsText(jsonFile);\n });\n }\n\n /**\n * Check the compatibility between weights manifest and weight files.\n */\n private checkManifestAndWeightFiles(\n manifest: WeightsManifestConfig, files: File[]): {[path: string]: File} {\n const basenames: string[] = [];\n const fileNames = files.map(file => basename(file.name));\n const pathToFile: {[path: string]: File} = {};\n for (const group of manifest) {\n group.paths.forEach(path => {\n const pathBasename = basename(path);\n if (basenames.indexOf(pathBasename) !== -1) {\n throw new Error(\n `Duplicate file basename found in weights manifest: ` +\n `'${pathBasename}'`);\n }\n basenames.push(pathBasename);\n if (fileNames.indexOf(pathBasename) === -1) {\n throw new Error(\n `Weight file with basename '${pathBasename}' is not provided.`);\n } else {\n pathToFile[path] = files[fileNames.indexOf(pathBasename)];\n }\n });\n }\n\n if (basenames.length !== files.length) {\n throw new Error(\n `Mismatch in the number of files in weights manifest ` +\n `(${basenames.length}) and the number of weight files provided ` +\n `(${files.length}).`);\n }\n return pathToFile;\n }\n}\n\nexport const browserDownloadsRouter: IORouter = (url: string|string[]) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n } else {\n if (!Array.isArray(url) && url.startsWith(BrowserDownloads.URL_SCHEME)) {\n return browserDownloads(url.slice(BrowserDownloads.URL_SCHEME.length));\n } else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(browserDownloadsRouter);\n\n/**\n * Creates an IOHandler that triggers file downloads from the browser.\n *\n * The returned `IOHandler` instance can be used as model exporting methods such\n * as `tf.Model.save` and supports only saving.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * const saveResult = await model.save('downloads://mymodel');\n * // This will trigger downloading of two files:\n * // 'mymodel.json' and 'mymodel.weights.bin'.\n * console.log(saveResult);\n * ```\n *\n * @param fileNamePrefix Prefix name of the files to be downloaded. For use with\n * `tf.Model`, `fileNamePrefix` should follow either of the following two\n * formats:\n * 1. `null` or `undefined`, in which case the default file\n * names will be used:\n * - 'model.json' for the JSON file containing the model topology and\n * weights manifest.\n * - 'model.weights.bin' for the binary file containing the binary weight\n * values.\n * 2. A single string or an Array of a single string, as the file name prefix.\n * For example, if `'foo'` is provided, the downloaded JSON\n * file and binary weights file will be named 'foo.json' and\n * 'foo.weights.bin', respectively.\n * @param config Additional configuration for triggering downloads.\n * @returns An instance of `BrowserDownloads` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserDownloads(fileNamePrefix = 'model'): IOHandler {\n return new BrowserDownloads(fileNamePrefix);\n}\n\n/**\n * Creates an IOHandler that loads model artifacts from user-selected files.\n *\n * This method can be used for loading from files such as user-selected files\n * in the browser.\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * // Note: This code snippet won't run properly without the actual file input\n * // elements in the HTML DOM.\n *\n * // Suppose there are two HTML file input (``)\n * // elements.\n * const uploadJSONInput = document.getElementById('upload-json');\n * const uploadWeightsInput = document.getElementById('upload-weights');\n * const model = await tf.loadLayersModel(tf.io.browserFiles(\n * [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));\n * ```\n *\n * @param files `File`s to load from. Currently, this function supports only\n * loading from files that contain Keras-style models (i.e., `tf.Model`s), for\n * which an `Array` of `File`s is expected (in that order):\n * - A JSON file containing the model topology and weight manifest.\n * - Optionally, One or more binary files containing the binary weights.\n * These files must have names that match the paths in the `weightsManifest`\n * contained by the aforementioned JSON file, or errors will be thrown\n * during loading. These weights files have the same format as the ones\n * generated by `tensorflowjs_converter` that comes with the `tensorflowjs`\n * Python PIP package. If no weights files are provided, only the model\n * topology will be loaded from the JSON file above.\n * @returns An instance of `Files` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserFiles(files: File[]): IOHandler {\n return new BrowserFiles(files);\n}\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {assert} from '../util';\n\nimport {OnProgressCallback} from './types';\n\n/**\n * Monitor Promise.all progress, fire onProgress callback function.\n *\n * @param promises Promise list going to be monitored\n * @param onProgress Callback function. Fired when a promise resolved.\n * @param startFraction Optional fraction start. Default to 0.\n * @param endFraction Optional fraction end. Default to 1.\n */\nexport function monitorPromisesProgress(\n promises: Array>, onProgress: OnProgressCallback,\n startFraction?: number, endFraction?: number) {\n checkPromises(promises);\n startFraction = startFraction == null ? 0 : startFraction;\n endFraction = endFraction == null ? 1 : endFraction;\n checkFraction(startFraction, endFraction);\n let resolvedPromise = 0;\n\n const registerMonitor = (promise: Promise<{}>) => {\n promise.then(value => {\n const fraction = startFraction +\n ++resolvedPromise / promises.length * (endFraction - startFraction);\n // pass fraction as parameter to callback function.\n onProgress(fraction);\n return value;\n });\n return promise;\n };\n\n function checkPromises(promises: Array>): void {\n assert(\n promises != null && Array.isArray(promises) && promises.length > 0,\n () => 'promises must be a none empty array');\n }\n\n function checkFraction(startFraction: number, endFraction: number): void {\n assert(\n startFraction >= 0 && startFraction <= 1,\n () => `Progress fraction must be in range [0, 1], but ` +\n `got startFraction ${startFraction}`);\n assert(\n endFraction >= 0 && endFraction <= 1,\n () => `Progress fraction must be in range [0, 1], but ` +\n `got endFraction ${endFraction}`);\n assert(\n endFraction >= startFraction,\n () => `startFraction must be no more than endFraction, but ` +\n `got startFraction ${startFraction} and endFraction ` +\n `${endFraction}`);\n }\n\n return Promise.all(promises.map(registerMonitor));\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from '../environment';\n\nimport {NamedTensorMap} from '../tensor_types';\nimport * as util from '../util';\nimport {decodeWeights} from './io_utils';\nimport {monitorPromisesProgress} from './progress';\nimport {DTYPE_VALUE_SIZE_MAP, LoadOptions, WeightsManifestConfig, WeightsManifestEntry} from './types';\n\n/**\n * Reads binary weights data from a number of URLs.\n *\n * @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls.\n * @param requestOptions RequestInit (options) for the HTTP requests.\n * @param fetchFunc Optional overriding value for the `window.fetch` function.\n * @param onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same\n * length as `fetchURLs`.\n */\nexport async function loadWeightsAsArrayBuffer(\n fetchURLs: string[], loadOptions?: LoadOptions): Promise {\n if (loadOptions == null) {\n loadOptions = {};\n }\n\n const fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch :\n loadOptions.fetchFunc;\n\n // Create the requests for all of the weights in parallel.\n const requests = fetchURLs.map(\n fetchURL =>\n fetchFunc(fetchURL, loadOptions.requestInit, {isBinary: true}));\n\n const fetchStartFraction = 0;\n const fetchEndFraction = 0.5;\n\n const responses = loadOptions.onProgress == null ?\n await Promise.all(requests) :\n await monitorPromisesProgress(\n requests, loadOptions.onProgress, fetchStartFraction,\n fetchEndFraction);\n\n const bufferPromises = responses.map(response => response.arrayBuffer());\n\n const bufferStartFraction = 0.5;\n const bufferEndFraction = 1;\n\n const buffers = loadOptions.onProgress == null ?\n await Promise.all(bufferPromises) :\n await monitorPromisesProgress(\n bufferPromises, loadOptions.onProgress, bufferStartFraction,\n bufferEndFraction);\n return buffers;\n}\n\n/**\n * Reads a weights manifest JSON configuration, fetches the weights and\n * returns them as `Tensor`s.\n *\n * @param manifest The weights manifest JSON.\n * @param filePathPrefix The path prefix for filenames given in the manifest.\n * Defaults to the empty string.\n * @param weightNames The names of the weights to be fetched.\n */\nexport async function loadWeights(\n manifest: WeightsManifestConfig, filePathPrefix = '',\n weightNames?: string[],\n requestInit?: RequestInit): Promise {\n // TODO(nsthorat): Groups are currently fetched atomically. If you need a\n // single weight from a group, the whole group will be fetched. At a future\n // date, we should support fetching only the individual shards within a\n // group that are needed to reconstruct the requested weight.\n // TODO(cais): Use `decodeWeights` for implementation.\n\n const fetchWeights = (fetchUrls: string[]) =>\n loadWeightsAsArrayBuffer(fetchUrls, {requestInit});\n const loadWeights = weightsLoaderFactory(fetchWeights);\n\n return loadWeights(manifest, filePathPrefix, weightNames);\n}\n\n/**\n * Creates a function, which reads a weights manifest JSON configuration,\n * fetches the weight files using the specified function and returns them as\n * `Tensor`s.\n *\n * ```js\n * // example for creating a nodejs weight loader, which reads the weight files\n * // from disk using fs.readFileSync\n *\n * import * as fs from 'fs'\n *\n * const fetchWeightsFromDisk = (filePaths: string[]) =>\n * filePaths.map(filePath => fs.readFileSync(filePath).buffer)\n *\n * const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)\n *\n * const manifest = JSON.parse(\n * fs.readFileSync('./my_model-weights_manifest').toString()\n * )\n * const weightMap = await loadWeights(manifest, './')\n * ```\n * @param fetchWeightsFunction The function used for fetching the weight files.\n * @returns Weight loading function.\n */\nexport function weightsLoaderFactory(\n fetchWeightsFunction: (fetchUrls: string[]) => Promise):\n (manifest: WeightsManifestConfig, filePathPrefix?: string,\n weightNames?: string[]) => Promise {\n return async(\n manifest: WeightsManifestConfig, filePathPrefix = '',\n weightNames?: string[]): Promise => {\n // Collect all the groups, weights, and their relative offsets to be\n // fetched.\n const groupIndicesToFetchMap = manifest.map(() => false);\n const groupWeightsToFetch: {\n [group: number]: Array<{\n manifestEntry: WeightsManifestEntry; groupOffset: number;\n sizeBytes: number;\n }>\n } = {};\n const weightsFound =\n weightNames != null ? weightNames.map(() => false) : [];\n const allManifestWeightNames: string[] = [];\n manifest.forEach((manifestGroupConfig, groupIndex) => {\n let groupOffset = 0;\n manifestGroupConfig.weights.forEach(weightsEntry => {\n const rawDtype = ('quantization' in weightsEntry) ?\n weightsEntry.quantization.dtype :\n weightsEntry.dtype;\n\n const weightsBytes = DTYPE_VALUE_SIZE_MAP[rawDtype] *\n util.sizeFromShape(weightsEntry.shape);\n\n const enqueueWeightsForFetchingFn = () => {\n groupIndicesToFetchMap[groupIndex] = true;\n if (groupWeightsToFetch[groupIndex] == null) {\n groupWeightsToFetch[groupIndex] = [];\n }\n\n groupWeightsToFetch[groupIndex].push({\n manifestEntry: weightsEntry,\n groupOffset,\n sizeBytes: weightsBytes\n });\n };\n\n if (weightNames != null) {\n weightNames.forEach((weightName, weightIndex) => {\n if (weightName === weightsEntry.name) {\n enqueueWeightsForFetchingFn();\n weightsFound[weightIndex] = true;\n }\n });\n } else {\n enqueueWeightsForFetchingFn();\n }\n\n allManifestWeightNames.push(weightsEntry.name);\n groupOffset += weightsBytes;\n });\n });\n\n if (!weightsFound.every(found => found)) {\n const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]);\n throw new Error(\n `Could not find weights in manifest with names: ` +\n `${weightsNotFound.join(', ')}. \\n` +\n `Manifest JSON has weights with names: ` +\n `${allManifestWeightNames.join(', ')}.`);\n }\n\n // Convert the one-hot boolean groupId => shouldFetch map to a list of group\n // IDs.\n const groupIndicesToFetch =\n groupIndicesToFetchMap.reduce((accumulator, shouldFetch, i) => {\n if (shouldFetch) {\n accumulator.push(i);\n }\n return accumulator;\n }, []);\n\n const fetchUrls: string[] = [];\n groupIndicesToFetch.forEach(i => {\n manifest[i].paths.forEach(filepath => {\n const fetchUrl = filePathPrefix +\n (!filePathPrefix.endsWith('/') ? '/' : '') + filepath;\n fetchUrls.push(fetchUrl);\n });\n });\n const buffers = await fetchWeightsFunction(fetchUrls);\n\n const weightsTensorMap: NamedTensorMap = {};\n let bufferIndexOffset = 0;\n groupIndicesToFetch.forEach(i => {\n const numBuffers = manifest[i].paths.length;\n\n let groupBytes = 0;\n for (let i = 0; i < numBuffers; i++) {\n groupBytes += buffers[bufferIndexOffset + i].byteLength;\n }\n\n // Create a buffer for the whole group.\n const groupBuffer = new ArrayBuffer(groupBytes);\n const groupByteBuffer = new Uint8Array(groupBuffer);\n let groupBufferOffset = 0;\n for (let i = 0; i < numBuffers; i++) {\n const buffer = new Uint8Array(buffers[bufferIndexOffset + i]);\n groupByteBuffer.set(buffer, groupBufferOffset);\n groupBufferOffset += buffer.byteLength;\n }\n\n const weightsEntries = groupWeightsToFetch[i];\n weightsEntries.forEach(weightsEntry => {\n const byteBuffer = groupBuffer.slice(\n weightsEntry.groupOffset,\n weightsEntry.groupOffset + weightsEntry.sizeBytes);\n const nameToTensorMap =\n decodeWeights(byteBuffer, [weightsEntry.manifestEntry]);\n for (const name in nameToTensorMap) {\n weightsTensorMap[name] = nameToTensorMap[name];\n }\n });\n\n bufferIndexOffset += numBuffers;\n });\n\n return weightsTensorMap;\n };\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * IOHandler implementations based on HTTP requests in the web browser.\n *\n * Uses [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n */\n\nimport {env} from '../environment';\n\nimport {assert} from '../util';\nimport {concatenateArrayBuffers, getModelArtifactsInfoForJSON} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, LoadOptions, ModelArtifacts, ModelJSON, OnProgressCallback, SaveResult, WeightsManifestConfig, WeightsManifestEntry} from './types';\nimport {loadWeightsAsArrayBuffer} from './weights_loader';\n\nconst OCTET_STREAM_MIME_TYPE = 'application/octet-stream';\nconst JSON_TYPE = 'application/json';\nexport class HTTPRequest implements IOHandler {\n protected readonly path: string;\n protected readonly requestInit: RequestInit;\n\n private readonly fetch: Function;\n private readonly weightUrlConverter: (weightName: string) => Promise;\n\n readonly DEFAULT_METHOD = 'POST';\n\n static readonly URL_SCHEME_REGEX = /^https?:\\/\\//;\n\n private readonly weightPathPrefix: string;\n private readonly onProgress: OnProgressCallback;\n\n constructor(path: string, loadOptions?: LoadOptions) {\n if (loadOptions == null) {\n loadOptions = {};\n }\n this.weightPathPrefix = loadOptions.weightPathPrefix;\n this.onProgress = loadOptions.onProgress;\n this.weightUrlConverter = loadOptions.weightUrlConverter;\n\n if (loadOptions.fetchFunc != null) {\n assert(\n typeof loadOptions.fetchFunc === 'function',\n () => 'Must pass a function that matches the signature of ' +\n '`fetch` (see ' +\n 'https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)');\n this.fetch = loadOptions.fetchFunc;\n } else {\n this.fetch = env().platform.fetch;\n }\n\n assert(\n path != null && path.length > 0,\n () => 'URL path for http must not be null, undefined or ' +\n 'empty.');\n\n if (Array.isArray(path)) {\n assert(\n path.length === 2,\n () => 'URL paths for http must have a length of 2, ' +\n `(actual length is ${path.length}).`);\n }\n this.path = path;\n\n if (loadOptions.requestInit != null &&\n loadOptions.requestInit.body != null) {\n throw new Error(\n 'requestInit is expected to have no pre-existing body, but has one.');\n }\n this.requestInit = loadOptions.requestInit || {};\n }\n\n async save(modelArtifacts: ModelArtifacts): Promise {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserHTTPRequest.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n\n const init = Object.assign({method: this.DEFAULT_METHOD}, this.requestInit);\n init.body = new FormData();\n\n const weightsManifest: WeightsManifestConfig = [{\n paths: ['./model.weights.bin'],\n weights: modelArtifacts.weightSpecs,\n }];\n const modelTopologyAndWeightManifest: ModelJSON = {\n modelTopology: modelArtifacts.modelTopology,\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n weightsManifest\n };\n if (modelArtifacts.signature != null) {\n modelTopologyAndWeightManifest.signature = modelArtifacts.signature;\n }\n if (modelArtifacts.userDefinedMetadata != null) {\n modelTopologyAndWeightManifest.userDefinedMetadata =\n modelArtifacts.userDefinedMetadata;\n }\n if (modelArtifacts.modelInitializer != null) {\n modelTopologyAndWeightManifest.modelInitializer =\n modelArtifacts.modelInitializer;\n }\n\n init.body.append(\n 'model.json',\n new Blob(\n [JSON.stringify(modelTopologyAndWeightManifest)],\n {type: JSON_TYPE}),\n 'model.json');\n\n if (modelArtifacts.weightData != null) {\n init.body.append(\n 'model.weights.bin',\n new Blob([modelArtifacts.weightData], {type: OCTET_STREAM_MIME_TYPE}),\n 'model.weights.bin');\n }\n\n const response = await this.fetch(this.path, init);\n\n if (response.ok) {\n return {\n modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts),\n responses: [response],\n };\n } else {\n throw new Error(\n `BrowserHTTPRequest.save() failed due to HTTP response status ` +\n `${response.status}.`);\n }\n }\n\n /**\n * Load model artifacts via HTTP request(s).\n *\n * See the documentation to `tf.io.http` for details on the saved\n * artifacts.\n *\n * @returns The loaded model artifacts (if loading succeeds).\n */\n async load(): Promise {\n const modelConfigRequest = await this.fetch(this.path, this.requestInit);\n\n if (!modelConfigRequest.ok) {\n throw new Error(\n `Request to ${this.path} failed with status code ` +\n `${modelConfigRequest.status}. Please verify this URL points to ` +\n `the model JSON of the model to load.`);\n }\n let modelConfig: ModelJSON;\n try {\n modelConfig = await modelConfigRequest.json();\n } catch (e) {\n let message = `Failed to parse model JSON of response from ${this.path}.`;\n // TODO(nsthorat): Remove this after some time when we're comfortable that\n // .pb files are mostly gone.\n if (this.path.endsWith('.pb')) {\n message += ' Your path contains a .pb file extension. ' +\n 'Support for .pb models have been removed in TensorFlow.js 1.0 ' +\n 'in favor of .json models. You can re-convert your Python ' +\n 'TensorFlow model using the TensorFlow.js 1.0 conversion scripts ' +\n 'or you can convert your.pb models with the \\'pb2json\\'' +\n 'NPM script in the tensorflow/tfjs-converter repository.';\n } else {\n message += ' Please make sure the server is serving valid ' +\n 'JSON for this request.';\n }\n throw new Error(message);\n }\n const modelTopology = modelConfig.modelTopology;\n const weightsManifest = modelConfig.weightsManifest;\n const generatedBy = modelConfig.generatedBy;\n const convertedBy = modelConfig.convertedBy;\n const format = modelConfig.format;\n const signature = modelConfig.signature;\n const userDefinedMetadata = modelConfig.userDefinedMetadata;\n\n // We do not allow both modelTopology and weightsManifest to be missing.\n if (modelTopology == null && weightsManifest == null) {\n throw new Error(\n `The JSON from HTTP path ${this.path} contains neither model ` +\n `topology or manifest for weights.`);\n }\n\n let weightSpecs: WeightsManifestEntry[];\n let weightData: ArrayBuffer;\n if (weightsManifest != null) {\n const results = await this.loadWeights(weightsManifest);\n [weightSpecs, weightData] = results;\n }\n\n const artifacts: ModelArtifacts = {\n modelTopology,\n weightSpecs,\n weightData,\n generatedBy,\n convertedBy,\n format\n };\n\n if (signature != null) {\n artifacts.signature = signature;\n }\n if (userDefinedMetadata != null) {\n artifacts.userDefinedMetadata = userDefinedMetadata;\n }\n\n const initializer = modelConfig.modelInitializer;\n if (initializer) {\n artifacts.modelInitializer = initializer;\n }\n\n return artifacts;\n }\n\n private async loadWeights(weightsManifest: WeightsManifestConfig):\n Promise<[WeightsManifestEntry[], ArrayBuffer]> {\n const weightPath = Array.isArray(this.path) ? this.path[1] : this.path;\n const [prefix, suffix] = parseUrl(weightPath);\n const pathPrefix = this.weightPathPrefix || prefix;\n\n const weightSpecs = [];\n for (const entry of weightsManifest) {\n weightSpecs.push(...entry.weights);\n }\n\n const fetchURLs: string[] = [];\n const urlPromises: Array> = [];\n for (const weightsGroup of weightsManifest) {\n for (const path of weightsGroup.paths) {\n if (this.weightUrlConverter != null) {\n urlPromises.push(this.weightUrlConverter(path));\n } else {\n fetchURLs.push(pathPrefix + path + suffix);\n }\n }\n }\n\n if (this.weightUrlConverter) {\n fetchURLs.push(...await Promise.all(urlPromises));\n }\n\n const buffers = await loadWeightsAsArrayBuffer(fetchURLs, {\n requestInit: this.requestInit,\n fetchFunc: this.fetch,\n onProgress: this.onProgress\n });\n return [weightSpecs, concatenateArrayBuffers(buffers)];\n }\n}\n\n/**\n * Extract the prefix and suffix of the url, where the prefix is the path before\n * the last file, and suffix is the search params after the last file.\n * ```\n * const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file'\n * [prefix, suffix] = parseUrl(url)\n * // prefix = 'http://tfhub.dev/model/1/'\n * // suffix = '?tfjs-format=file'\n * ```\n * @param url the model url to be parsed.\n */\nexport function parseUrl(url: string): [string, string] {\n const lastSlash = url.lastIndexOf('/');\n const lastSearchParam = url.lastIndexOf('?');\n const prefix = url.substring(0, lastSlash);\n const suffix =\n lastSearchParam > lastSlash ? url.substring(lastSearchParam) : '';\n return [prefix + '/', suffix];\n}\n\nexport function isHTTPScheme(url: string): boolean {\n return url.match(HTTPRequest.URL_SCHEME_REGEX) != null;\n}\n\nexport const httpRouter: IORouter =\n (url: string, loadOptions?: LoadOptions) => {\n if (typeof fetch === 'undefined' &&\n (loadOptions == null || loadOptions.fetchFunc == null)) {\n // `http` uses `fetch` or `node-fetch`, if one wants to use it in\n // an environment that is not the browser or node they have to setup a\n // global fetch polyfill.\n return null;\n } else {\n let isHTTP = true;\n if (Array.isArray(url)) {\n isHTTP = url.every(urlItem => isHTTPScheme(urlItem));\n } else {\n isHTTP = isHTTPScheme(url);\n }\n if (isHTTP) {\n return http(url, loadOptions);\n }\n }\n return null;\n };\nIORouterRegistry.registerSaveRouter(httpRouter);\nIORouterRegistry.registerLoadRouter(httpRouter);\n\n/**\n * Creates an IOHandler subtype that sends model artifacts to HTTP server.\n *\n * An HTTP request of the `multipart/form-data` mime type will be sent to the\n * `path` URL. The form data includes artifacts that represent the topology\n * and/or weights of the model. In the case of Keras-style `tf.Model`, two\n * blobs (files) exist in form-data:\n * - A JSON file consisting of `modelTopology` and `weightsManifest`.\n * - A binary weights file consisting of the concatenated weight values.\n * These files are in the same format as the one generated by\n * [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).\n *\n * The following code snippet exemplifies the client-side code that uses this\n * function:\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save(tf.io.http(\n * 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));\n * console.log(saveResult);\n * ```\n *\n * If the default `POST` method is to be used, without any custom parameters\n * such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`:\n *\n * ```js\n * const saveResult = await model.save('http://model-server:5000/upload');\n * ```\n *\n * The following GitHub Gist\n * https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864\n * implements a server based on [flask](https://github.com/pallets/flask) that\n * can receive the request. Upon receiving the model artifacts via the requst,\n * this particular server reconsistutes instances of [Keras\n * Models](https://keras.io/models/model/) in memory.\n *\n *\n * @param path A URL path to the model.\n * Can be an absolute HTTP path (e.g.,\n * 'http://localhost:8000/model-upload)') or a relative path (e.g.,\n * './model-upload').\n * @param requestInit Request configurations to be used when sending\n * HTTP request to server using `fetch`. It can contain fields such as\n * `method`, `credentials`, `headers`, `mode`, etc. See\n * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request\n * for more information. `requestInit` must not have a body, because the\n * body will be set by TensorFlow.js. File blobs representing the model\n * topology (filename: 'model.json') and the weights of the model (filename:\n * 'model.weights.bin') will be appended to the body. If `requestInit` has a\n * `body`, an Error will be thrown.\n * @param loadOptions Optional configuration for the loading. It includes the\n * following fields:\n * - weightPathPrefix Optional, this specifies the path prefix for weight\n * files, by default this is calculated from the path param.\n * - fetchFunc Optional, custom `fetch` function. E.g., in Node.js,\n * the `fetch` from node-fetch can be used here.\n * - onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns An instance of `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function http(path: string, loadOptions?: LoadOptions): IOHandler {\n return new HTTPRequest(path, loadOptions);\n}\n\n/**\n * Deprecated. Use `tf.io.http`.\n * @param path\n * @param loadOptions\n */\nexport function browserHTTPRequest(\n path: string, loadOptions?: LoadOptions): IOHandler {\n return http(path, loadOptions);\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * IOHandlers that pass through the in-memory ModelArtifacts format.\n */\n\nimport {IOHandler, ModelArtifacts, SaveResult, TrainingConfig, WeightsManifestEntry} from './types';\n\nclass PassthroughLoader implements IOHandler {\n constructor(private readonly modelArtifacts?: ModelArtifacts) {}\n\n async load(): Promise {\n return this.modelArtifacts;\n }\n}\n\nclass PassthroughSaver implements IOHandler {\n constructor(\n private readonly saveHandler:\n (artifacts: ModelArtifacts) => Promise) {}\n\n async save(modelArtifacts: ModelArtifacts) {\n return this.saveHandler(modelArtifacts);\n }\n}\n\n/**\n * Creates an IOHandler that loads model artifacts from memory.\n *\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * const model = await tf.loadLayersModel(tf.io.fromMemory(\n * modelTopology, weightSpecs, weightData));\n * ```\n *\n * @param modelArtifacts a object containing model topology (i.e., parsed from\n * the JSON format).\n * @param weightSpecs An array of `WeightsManifestEntry` objects describing the\n * names, shapes, types, and quantization of the weight data.\n * @param weightData A single `ArrayBuffer` containing the weight data,\n * concatenated in the order described by the weightSpecs.\n * @param trainingConfig Model training configuration. Optional.\n *\n * @returns A passthrough `IOHandler` that simply loads the provided data.\n */\nexport function fromMemory(\n modelArtifacts: {}|ModelArtifacts, weightSpecs?: WeightsManifestEntry[],\n weightData?: ArrayBuffer, trainingConfig?: TrainingConfig): IOHandler {\n if (arguments.length === 1) {\n const isModelArtifacts =\n (modelArtifacts as ModelArtifacts).modelTopology != null ||\n (modelArtifacts as ModelArtifacts).weightSpecs != null;\n if (isModelArtifacts) {\n return new PassthroughLoader(modelArtifacts as ModelArtifacts);\n } else {\n // Legacy support: with only modelTopology.\n // TODO(cais): Remove this deprecated API.\n console.warn(\n 'Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({modelTopology: modelArtifacts as {}});\n }\n } else {\n // Legacy support.\n // TODO(cais): Remove this deprecated API.\n console.warn(\n 'Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({\n modelTopology: modelArtifacts as {},\n weightSpecs,\n weightData,\n trainingConfig\n });\n }\n}\n\n/**\n * Creates an IOHandler that passes saved model artifacts to a callback.\n *\n * ```js\n * function handleSave(artifacts) {\n * // ... do something with the artifacts ...\n * return {modelArtifactsInfo: {...}, ...};\n * }\n *\n * const saveResult = model.save(tf.io.withSaveHandler(handleSave));\n * ```\n *\n * @param saveHandler A function that accepts a `ModelArtifacts` and returns a\n * `SaveResult`.\n */\nexport function withSaveHandler(\n saveHandler: (artifacts: ModelArtifacts) =>\n Promise): IOHandler {\n return new PassthroughSaver(saveHandler);\n}\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Exports under the tf.math.* namespace.\n */\n\nimport {confusionMatrix} from './ops/confusion_matrix';\n\nexport {confusionMatrix};\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {BatchMatMul, BatchMatMulAttrs, BatchMatMulInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the dot product of two matrices, A * B. These must be matrices.\n *\n * ```js\n * const a = tf.tensor2d([1, 2], [1, 2]);\n * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * a.matMul(b).print(); // or tf.matMul(a, b)\n * ```\n * @param a First matrix in dot product operation.\n * @param b Second matrix in dot product operation.\n * @param transposeA If true, `a` is transposed before multiplication.\n * @param transposeB If true, `b` is transposed before multiplication.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction matMul_(\n a: Tensor|TensorLike, b: Tensor|TensorLike, transposeA = false,\n transposeB = false): T {\n let $a = convertToTensor(a, 'a', 'matMul');\n let $b = convertToTensor(b, 'b', 'matMul');\n [$a, $b] = makeTypesMatch($a, $b);\n\n const inputs: BatchMatMulInputs = {a: $a, b: $b};\n const attrs: BatchMatMulAttrs = {transposeA, transposeB};\n\n return ENGINE.runKernel(\n BatchMatMul, inputs as {} as NamedTensorMap, attrs as {} as NamedAttrMap);\n}\n\nexport const matMul = op({matMul_});\n", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {OneHot, OneHotAttrs, OneHotInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Creates a one-hot `tf.Tensor`. The locations represented by `indices` take\n * value `onValue` (defaults to 1), while all other locations take value\n * `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank\n * `R+1` with the last axis of size `depth`.\n *\n * ```js\n * tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print();\n * ```\n *\n * @param indices `tf.Tensor` of indices with dtype `int32`.\n * @param depth The depth of the one hot dimension.\n * @param onValue A number used to fill in the output when the index matches\n * the location.\n * @param offValue A number used to fill in the output when the index does\n * not match the location.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction oneHot_(\n indices: Tensor|TensorLike, depth: number, onValue = 1,\n offValue = 0): Tensor {\n if (depth < 2) {\n throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`);\n }\n const $indices = convertToTensor(indices, 'indices', 'oneHot', 'int32');\n\n const inputs: OneHotInputs = {indices: $indices};\n const attrs: OneHotAttrs = {depth, onValue, offValue};\n\n return ENGINE.runKernel(\n OneHot, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const oneHot = op({oneHot_});\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Transpose, TransposeAttrs, TransposeInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`.\n *\n * The returned `tf.Tensor`'s dimension `i` will correspond to the input\n * dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`,\n * where `n` is the rank of the input `tf.Tensor`. Hence by default, this\n * operation performs a regular matrix transpose on 2-D input `tf.Tensor`s.\n *\n * ```js\n * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);\n *\n * a.transpose().print(); // or tf.transpose(a)\n * ```\n *\n * @param x The tensor to transpose.\n * @param perm The permutation of the dimensions of a.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction transpose_(x: T|TensorLike, perm?: number[]): T {\n const $x = convertToTensor(x, 'x', 'transpose');\n\n if (perm == null) {\n perm = $x.shape.map((s, i) => i).reverse();\n }\n util.assert(\n $x.rank === perm.length,\n () => `Error in transpose: rank of input ${$x.rank} ` +\n `must match length of perm ${perm}.`);\n perm.forEach(axis => {\n util.assert(\n axis >= 0 && axis < $x.rank,\n () => `All entries in 'perm' must be between 0 and ${$x.rank - 1}` +\n ` but got ${perm}`);\n });\n\n if ($x.rank <= 1) {\n return $x.clone();\n }\n\n const inputs: TransposeInputs = {x: $x};\n const attrs: TransposeAttrs = {perm};\n\n return ENGINE.runKernel(\n Transpose, inputs as {} as NamedTensorMap, attrs as {} as NamedAttrMap);\n}\n\nexport const transpose = op({transpose_});\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor1D, Tensor2D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {cast} from './cast';\nimport {matMul} from './mat_mul';\nimport {oneHot} from './one_hot';\nimport {op} from './operation';\nimport {transpose} from './transpose';\n\n/**\n * Computes the confusion matrix from true labels and predicted labels.\n *\n * ```js\n * const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32');\n * const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32');\n * const numClasses = 3;\n * const out = tf.math.confusionMatrix(labels, predictions, numClasses);\n * out.print();\n * // Expected output matrix:\n * // [[2, 0, 0],\n * // [0, 1, 1],\n * // [0, 0, 1]]\n * ```\n *\n * @param labels The target labels, assumed to be 0-based integers\n * for the classes. The shape is `[numExamples]`, where\n * `numExamples` is the number of examples included.\n * @param predictions The predicted classes, assumed to be\n * 0-based integers for the classes. Must have the same shape as `labels`.\n * @param numClasses Number of all classes, as an integer.\n * Its value must be larger than the largest element in `labels` and\n * `predictions`.\n * @returns The confusion matrix as a int32-type 2D tensor. The value at\n * row `r` and column `c` is the number of times examples of actual class\n * `r` were predicted as class `c`.\n *\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nexport function confusionMatrix_(\n labels: Tensor1D|TensorLike, predictions: Tensor1D|TensorLike,\n numClasses: number): Tensor2D {\n const $labels = convertToTensor(labels, 'labels', 'confusionMatrix');\n const $predictions =\n convertToTensor(predictions, 'predictions', 'confusionMatrix');\n\n util.assert(\n numClasses == null || numClasses > 0 && Number.isInteger(numClasses),\n () => `If provided, numClasses must be a positive integer, ` +\n `but got ${numClasses}`);\n util.assert(\n $labels.rank === 1,\n () => `Expected the rank of labels to be 1, but got ${$labels.rank}`);\n util.assert(\n $predictions.rank === 1,\n () => `Expected the rank of predictions to be 1, ` +\n `but got ${$predictions.rank}`);\n util.assert(\n $labels.shape[0] === $predictions.shape[0],\n () => `Mismatch in the number of examples: ` +\n `${$labels.shape[0]} vs. ${$predictions.shape[0]}. ` +\n `Labels and predictions should have the same number of elements.`);\n util.assert(\n numClasses > 0 && Number.isInteger(numClasses),\n () => `numClasses is required to be a positive integer, but got ` +\n `${numClasses}`);\n // TODO(cais): In the future, if oneHot supports tensors inputs for\n // `numClasses`, `confusionMatrix` can make `numClasses` optional.\n\n const oneHotLabels = oneHot(cast($labels, 'int32'), numClasses) as Tensor2D;\n const oneHotPredictions =\n oneHot(cast($predictions, 'int32'), numClasses) as Tensor2D;\n const oneHotLabelsT: Tensor2D = transpose(oneHotLabels);\n const product: Tensor2D = matMul(oneHotLabelsT, oneHotPredictions);\n return cast(product, 'int32');\n}\n\nexport const confusionMatrix = op({confusionMatrix_});\n", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {env} from '../environment';\nimport {FromPixels, FromPixelsAttrs, FromPixelsInputs} from '../kernel_names';\nimport {getKernel, NamedAttrMap} from '../kernel_registry';\nimport {Tensor, Tensor2D, Tensor3D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {PixelData, TensorLike} from '../types';\n\nimport {cast} from './cast';\nimport {op} from './operation';\nimport {tensor3d} from './tensor3d';\n\nlet fromPixels2DContext: CanvasRenderingContext2D;\n\n/**\n * Creates a `tf.Tensor` from an image.\n *\n * ```js\n * const image = new ImageData(1, 1);\n * image.data[0] = 100;\n * image.data[1] = 150;\n * image.data[2] = 200;\n * image.data[3] = 255;\n *\n * tf.browser.fromPixels(image).print();\n * ```\n *\n * @param pixels The input image to construct the tensor from. The\n * supported image types are all 4-channel. You can also pass in an image\n * object with following attributes:\n * `{data: Uint8Array; width: number; height: number}`\n * @param numChannels The number of channels of the output tensor. A\n * numChannels value less than 4 allows you to ignore channels. Defaults to\n * 3 (ignores alpha channel of input image).\n *\n * @returns A Tensor3D with the shape `[height, width, numChannels]`.\n *\n * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}\n */\nfunction fromPixels_(\n pixels: PixelData|ImageData|HTMLImageElement|HTMLCanvasElement|\n HTMLVideoElement|ImageBitmap,\n numChannels = 3): Tensor3D {\n // Sanity checks.\n if (numChannels > 4) {\n throw new Error(\n 'Cannot construct Tensor with more than 4 channels from pixels.');\n }\n if (pixels == null) {\n throw new Error('pixels passed to tf.browser.fromPixels() can not be null');\n }\n let isPixelData = false;\n let isImageData = false;\n let isVideo = false;\n let isImage = false;\n let isCanvasLike = false;\n let isImageBitmap = false;\n if ((pixels as PixelData).data instanceof Uint8Array) {\n isPixelData = true;\n } else if (\n typeof (ImageData) !== 'undefined' && pixels instanceof ImageData) {\n isImageData = true;\n } else if (\n typeof (HTMLVideoElement) !== 'undefined' &&\n pixels instanceof HTMLVideoElement) {\n isVideo = true;\n } else if (\n typeof (HTMLImageElement) !== 'undefined' &&\n pixels instanceof HTMLImageElement) {\n isImage = true;\n // tslint:disable-next-line: no-any\n } else if ((pixels as any).getContext != null) {\n isCanvasLike = true;\n } else if (\n typeof (ImageBitmap) !== 'undefined' && pixels instanceof ImageBitmap) {\n isImageBitmap = true;\n } else {\n throw new Error(\n 'pixels passed to tf.browser.fromPixels() must be either an ' +\n `HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` +\n `in browser, or OffscreenCanvas, ImageData in webworker` +\n ` or {data: Uint32Array, width: number, height: number}, ` +\n `but was ${(pixels as {}).constructor.name}`);\n }\n if (isVideo) {\n const HAVE_CURRENT_DATA_READY_STATE = 2;\n if (isVideo &&\n (pixels as HTMLVideoElement).readyState <\n HAVE_CURRENT_DATA_READY_STATE) {\n throw new Error(\n 'The video element has not loaded data yet. Please wait for ' +\n '`loadeddata` event on the