diff --git a/build/package.json b/build/package.json index a570de9..73ebb2f 100644 --- a/build/package.json +++ b/build/package.json @@ -1,6 +1,6 @@ { "name": "@vladmandic/face-api", - "version": "0.7.4", + "version": "0.8.1", "description": "JavaScript module for Face Detection and Face Recognition Using Tensorflow/JS", "main": "dist/face-api.cjs", "module": "dist/face-api.esm.js", diff --git a/build/src/faceFeatureExtractor/FaceFeatureExtractor.js b/build/src/faceFeatureExtractor/FaceFeatureExtractor.js index ab01e38..c4551a6 100644 --- a/build/src/faceFeatureExtractor/FaceFeatureExtractor.js +++ b/build/src/faceFeatureExtractor/FaceFeatureExtractor.js @@ -15,7 +15,7 @@ export class FaceFeatureExtractor extends NeuralNetwork { throw new Error('FaceFeatureExtractor - load model before inference'); } return tf.tidy(() => { - const batchTensor = input.toBatchTensor(112, true); + const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32'); const meanRgb = [122.782, 117.001, 104.298]; const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)); let out = denseBlock4(normalized, params.dense0, true); diff --git a/build/src/faceFeatureExtractor/FaceFeatureExtractor.js.map b/build/src/faceFeatureExtractor/FaceFeatureExtractor.js.map index 03286b3..5aa3c9c 100644 --- a/build/src/faceFeatureExtractor/FaceFeatureExtractor.js.map +++ b/build/src/faceFeatureExtractor/FaceFeatureExtractor.js.map @@ -1 +1 @@ -{"version":3,"file":"FaceFeatureExtractor.js","sourceRoot":"","sources":["../../../src/faceFeatureExtractor/FaceFeatureExtractor.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD,OAAO,EAAuB,UAAU,EAAE,MAAM,QAAQ,CAAC;AACzD,OAAO,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACjD,OAAO,EAAE,SAAS,EAAE,MAAM,QAAQ,CAAC;AACnC,OAAO,EAAE,WAAW,EAAE,MAAM,cAAc,CAAC;AAC3C,OAAO,EAAE,aAAa,EAAE,MAAM,iBAAiB,CAAC;AAChD,OAAO,EAAE,0BAA0B,EAAE,MAAM,8BAA8B,CAAC;AAG1E,MAAM,OAAO,oBAAqB,SAAQ,aAAyC;IAEjF;QACE,KAAK,CAAC,sBAAsB,CAAC,CAAA;IAC/B,CAAC;IAEM,YAAY,CAAC,KAAe;QAEjC,MAAM,EAAE,MAAM,EAAE,GAAG,IAAI,CAAA;QAEvB,IAAI,CAAC,MAAM,EAAE;YACX,MAAM,IAAI,KAAK,CAAC,oDAAoD,CAAC,CAAA;SACtE;QAED,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;YAClB,MAAM,WAAW,GAAG,KAAK,CAAC,aAAa,CAAC,GAAG,EAAE,IAAI,CAAC,CAAA;YAClD,MAAM,OAAO,GAAG,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,CAAA;YAC3C,MAAM,UAAU,GAAG,SAAS,CAAC,WAAW,EAAE,OAAO,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,CAAgB,CAAA;YAErF,IAAI,GAAG,GAAG,WAAW,CAAC,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,CAAA;YACtD,GAAG,GAAG,WAAW,CAAC,GAAG,EAAE,MAAM,CAAC,MAAM,CAAC,CAAA;YACrC,GAAG,GAAG,WAAW,CAAC,GAAG,EAAE,MAAM,CAAC,MAAM,CAAC,CAAA;YACrC,GAAG,GAAG,WAAW,CAAC,GAAG,EAAE,MAAM,CAAC,MAAM,CAAC,CAAA;YACrC,GAAG,GAAG,EAAE,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,OAAO,CAAC,CAAA;YAE9C,OAAO,GAAG,CAAA;QACZ,CAAC,CAAC,CAAA;IACJ,CAAC;IAEM,KAAK,CAAC,OAAO,CAAC,KAAgB;QACnC,OAAO,IAAI,CAAC,YAAY,CAAC,MAAM,UAAU,CAAC,KAAK,CAAC,CAAC,CAAA;IACnD,CAAC;IAES,mBAAmB;QAC3B,OAAO,8BAA8B,CAAA;IACvC,CAAC;IAES,0BAA0B,CAAC,SAA4B;QAC/D,OAAO,0BAA0B,CAAC,SAAS,CAAC,CAAA;IAC9C,CAAC;IAES,aAAa,CAAC,OAAqB;QAC3C,OAAO,aAAa,CAAC,OAAO,CAAC,CAAA;IAC/B,CAAC;CACF"} \ No newline at end of file +{"version":3,"file":"FaceFeatureExtractor.js","sourceRoot":"","sources":["../../../src/faceFeatureExtractor/FaceFeatureExtractor.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD,OAAO,EAAuB,UAAU,EAAE,MAAM,QAAQ,CAAC;AACzD,OAAO,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACjD,OAAO,EAAE,SAAS,EAAE,MAAM,QAAQ,CAAC;AACnC,OAAO,EAAE,WAAW,EAAE,MAAM,cAAc,CAAC;AAC3C,OAAO,EAAE,aAAa,EAAE,MAAM,iBAAiB,CAAC;AAChD,OAAO,EAAE,0BAA0B,EAAE,MAAM,8BAA8B,CAAC;AAG1E,MAAM,OAAO,oBAAqB,SAAQ,aAAyC;IAEjF;QACE,KAAK,CAAC,sBAAsB,CAAC,CAAA;IAC/B,CAAC;IAEM,YAAY,CAAC,KAAe;QAEjC,MAAM,EAAE,MAAM,EAAE,GAAG,IAAI,CAAA;QAEvB,IAAI,CAAC,MAAM,EAAE;YACX,MAAM,IAAI,KAAK,CAAC,oDAAoD,CAAC,CAAA;SACtE;QAED,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;YAClB,MAAM,WAAW,GAAG,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,aAAa,CAAC,GAAG,EAAE,IAAI,CAAC,EAAE,SAAS,CAAC,CAAC;YACvE,MAAM,OAAO,GAAG,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,CAAA;YAC3C,MAAM,UAAU,GAAG,SAAS,CAAC,WAAW,EAAE,OAAO,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,CAAgB,CAAA;YAErF,IAAI,GAAG,GAAG,WAAW,CAAC,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,CAAA;YACtD,GAAG,GAAG,WAAW,CAAC,GAAG,EAAE,MAAM,CAAC,MAAM,CAAC,CAAA;YACrC,GAAG,GAAG,WAAW,CAAC,GAAG,EAAE,MAAM,CAAC,MAAM,CAAC,CAAA;YACrC,GAAG,GAAG,WAAW,CAAC,GAAG,EAAE,MAAM,CAAC,MAAM,CAAC,CAAA;YACrC,GAAG,GAAG,EAAE,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,OAAO,CAAC,CAAA;YAE9C,OAAO,GAAG,CAAA;QACZ,CAAC,CAAC,CAAA;IACJ,CAAC;IAEM,KAAK,CAAC,OAAO,CAAC,KAAgB;QACnC,OAAO,IAAI,CAAC,YAAY,CAAC,MAAM,UAAU,CAAC,KAAK,CAAC,CAAC,CAAA;IACnD,CAAC;IAES,mBAAmB;QAC3B,OAAO,8BAA8B,CAAA;IACvC,CAAC;IAES,0BAA0B,CAAC,SAA4B;QAC/D,OAAO,0BAA0B,CAAC,SAAS,CAAC,CAAA;IAC9C,CAAC;IAES,aAAa,CAAC,OAAqB;QAC3C,OAAO,aAAa,CAAC,OAAO,CAAC,CAAA;IAC/B,CAAC;CACF"} \ No newline at end of file diff --git a/build/src/faceFeatureExtractor/TinyFaceFeatureExtractor.js b/build/src/faceFeatureExtractor/TinyFaceFeatureExtractor.js index cd59a41..b43da07 100644 --- a/build/src/faceFeatureExtractor/TinyFaceFeatureExtractor.js +++ b/build/src/faceFeatureExtractor/TinyFaceFeatureExtractor.js @@ -15,7 +15,7 @@ export class TinyFaceFeatureExtractor extends NeuralNetwork { throw new Error('TinyFaceFeatureExtractor - load model before inference'); } return tf.tidy(() => { - const batchTensor = input.toBatchTensor(112, true); + const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32'); const meanRgb = [122.782, 117.001, 104.298]; const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(255)); let out = denseBlock3(normalized, params.dense0, true); diff --git a/build/src/faceFeatureExtractor/TinyFaceFeatureExtractor.js.map b/build/src/faceFeatureExtractor/TinyFaceFeatureExtractor.js.map index 44dca6e..e56255a 100644 --- a/build/src/faceFeatureExtractor/TinyFaceFeatureExtractor.js.map +++ b/build/src/faceFeatureExtractor/TinyFaceFeatureExtractor.js.map @@ -1 +1 @@ -{"version":3,"file":"TinyFaceFeatureExtractor.js","sourceRoot":"","sources":["../../../src/faceFeatureExtractor/TinyFaceFeatureExtractor.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD,OAAO,EAAuB,UAAU,EAAE,MAAM,QAAQ,CAAC;AACzD,OAAO,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACjD,OAAO,EAAE,SAAS,EAAE,MAAM,QAAQ,CAAC;AACnC,OAAO,EAAE,WAAW,EAAE,MAAM,cAAc,CAAC;AAC3C,OAAO,EAAE,8BAA8B,EAAE,MAAM,kCAAkC,CAAC;AAClF,OAAO,EAAE,iBAAiB,EAAE,MAAM,qBAAqB,CAAC;AAGxD,MAAM,OAAO,wBAAyB,SAAQ,aAA6C;IAEzF;QACE,KAAK,CAAC,0BAA0B,CAAC,CAAA;IACnC,CAAC;IAEM,YAAY,CAAC,KAAe;QAEjC,MAAM,EAAE,MAAM,EAAE,GAAG,IAAI,CAAA;QAEvB,IAAI,CAAC,MAAM,EAAE;YACX,MAAM,IAAI,KAAK,CAAC,wDAAwD,CAAC,CAAA;SAC1E;QAED,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;YAClB,MAAM,WAAW,GAAG,KAAK,CAAC,aAAa,CAAC,GAAG,EAAE,IAAI,CAAC,CAAA;YAClD,MAAM,OAAO,GAAG,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,CAAA;YAC3C,MAAM,UAAU,GAAG,SAAS,CAAC,WAAW,EAAE,OAAO,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,CAAgB,CAAA;YAErF,IAAI,GAAG,GAAG,WAAW,CAAC,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,CAAA;YACtD,GAAG,GAAG,WAAW,CAAC,GAAG,EAAE,MAAM,CAAC,MAAM,CAAC,CAAA;YACrC,GAAG,GAAG,WAAW,CAAC,GAAG,EAAE,MAAM,CAAC,MAAM,CAAC,CAAA;YACrC,GAAG,GAAG,EAAE,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,OAAO,CAAC,CAAA;YAEhD,OAAO,GAAG,CAAA;QACZ,CAAC,CAAC,CAAA;IACJ,CAAC;IAEM,KAAK,CAAC,OAAO,CAAC,KAAgB;QACnC,OAAO,IAAI,CAAC,YAAY,CAAC,MAAM,UAAU,CAAC,KAAK,CAAC,CAAC,CAAA;IACnD,CAAC;IAES,mBAAmB;QAC3B,OAAO,mCAAmC,CAAA;IAC5C,CAAC;IAES,0BAA0B,CAAC,SAA4B;QAC/D,OAAO,8BAA8B,CAAC,SAAS,CAAC,CAAA;IAClD,CAAC;IAES,aAAa,CAAC,OAAqB;QAC3C,OAAO,iBAAiB,CAAC,OAAO,CAAC,CAAA;IACnC,CAAC;CACF"} \ No newline at end of file +{"version":3,"file":"TinyFaceFeatureExtractor.js","sourceRoot":"","sources":["../../../src/faceFeatureExtractor/TinyFaceFeatureExtractor.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD,OAAO,EAAuB,UAAU,EAAE,MAAM,QAAQ,CAAC;AACzD,OAAO,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACjD,OAAO,EAAE,SAAS,EAAE,MAAM,QAAQ,CAAC;AACnC,OAAO,EAAE,WAAW,EAAE,MAAM,cAAc,CAAC;AAC3C,OAAO,EAAE,8BAA8B,EAAE,MAAM,kCAAkC,CAAC;AAClF,OAAO,EAAE,iBAAiB,EAAE,MAAM,qBAAqB,CAAC;AAGxD,MAAM,OAAO,wBAAyB,SAAQ,aAA6C;IAEzF;QACE,KAAK,CAAC,0BAA0B,CAAC,CAAA;IACnC,CAAC;IAEM,YAAY,CAAC,KAAe;QAEjC,MAAM,EAAE,MAAM,EAAE,GAAG,IAAI,CAAA;QAEvB,IAAI,CAAC,MAAM,EAAE;YACX,MAAM,IAAI,KAAK,CAAC,wDAAwD,CAAC,CAAA;SAC1E;QAED,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;YAClB,MAAM,WAAW,GAAG,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,aAAa,CAAC,GAAG,EAAE,IAAI,CAAC,EAAE,SAAS,CAAC,CAAC;YACvE,MAAM,OAAO,GAAG,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,CAAA;YAC3C,MAAM,UAAU,GAAG,SAAS,CAAC,WAAW,EAAE,OAAO,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,CAAgB,CAAA;YAErF,IAAI,GAAG,GAAG,WAAW,CAAC,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,CAAA;YACtD,GAAG,GAAG,WAAW,CAAC,GAAG,EAAE,MAAM,CAAC,MAAM,CAAC,CAAA;YACrC,GAAG,GAAG,WAAW,CAAC,GAAG,EAAE,MAAM,CAAC,MAAM,CAAC,CAAA;YACrC,GAAG,GAAG,EAAE,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,OAAO,CAAC,CAAA;YAEhD,OAAO,GAAG,CAAA;QACZ,CAAC,CAAC,CAAA;IACJ,CAAC;IAEM,KAAK,CAAC,OAAO,CAAC,KAAgB;QACnC,OAAO,IAAI,CAAC,YAAY,CAAC,MAAM,UAAU,CAAC,KAAK,CAAC,CAAC,CAAA;IACnD,CAAC;IAES,mBAAmB;QAC3B,OAAO,mCAAmC,CAAA;IAC5C,CAAC;IAES,0BAA0B,CAAC,SAA4B;QAC/D,OAAO,8BAA8B,CAAC,SAAS,CAAC,CAAA;IAClD,CAAC;IAES,aAAa,CAAC,OAAqB;QAC3C,OAAO,iBAAiB,CAAC,OAAO,CAAC,CAAA;IACnC,CAAC;CACF"} \ No newline at end of file diff --git a/build/src/faceLandmarkNet/FaceLandmark68NetBase.js b/build/src/faceLandmarkNet/FaceLandmark68NetBase.js index db71f0a..fa494c6 100644 --- a/build/src/faceLandmarkNet/FaceLandmark68NetBase.js +++ b/build/src/faceLandmarkNet/FaceLandmark68NetBase.js @@ -15,10 +15,7 @@ export class FaceLandmark68NetBase extends FaceProcessor { }); const batchSize = inputDimensions.length; return tf.tidy(() => { - const createInterleavedTensor = (fillX, fillY) => tf.stack([ - tf.fill([68], fillX), - tf.fill([68], fillY) - ], 1).as2D(1, 136).as1D(); + const createInterleavedTensor = (fillX, fillY) => tf.stack([tf.fill([68], fillX, 'float32'), tf.fill([68], fillY, 'float32')], 1).as2D(1, 136).as1D(); const getPadding = (batchIdx, cond) => { const { width, height } = inputDimensions[batchIdx]; return cond(width, height) ? Math.abs(width - height) / 2 : 0; @@ -26,7 +23,7 @@ export class FaceLandmark68NetBase extends FaceProcessor { const getPaddingX = (batchIdx) => getPadding(batchIdx, (w, h) => w < h); const getPaddingY = (batchIdx) => getPadding(batchIdx, (w, h) => h < w); const landmarkTensors = output - .mul(tf.fill([batchSize, 136], inputSize)) + .mul(tf.fill([batchSize, 136], inputSize, 'float32')) .sub(tf.stack(Array.from(Array(batchSize), (_, batchIdx) => createInterleavedTensor(getPaddingX(batchIdx), getPaddingY(batchIdx))))) .div(tf.stack(Array.from(Array(batchSize), (_, batchIdx) => createInterleavedTensor(inputDimensions[batchIdx].width, inputDimensions[batchIdx].height)))); return landmarkTensors; diff --git a/build/src/faceLandmarkNet/FaceLandmark68NetBase.js.map b/build/src/faceLandmarkNet/FaceLandmark68NetBase.js.map index aa67b20..7f353bd 100644 --- a/build/src/faceLandmarkNet/FaceLandmark68NetBase.js.map +++ b/build/src/faceLandmarkNet/FaceLandmark68NetBase.js.map @@ -1 +1 @@ -{"version":3,"file":"FaceLandmark68NetBase.js","sourceRoot":"","sources":["../../../src/faceLandmarkNet/FaceLandmark68NetBase.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD,OAAO,EAAe,KAAK,EAAE,MAAM,YAAY,CAAC;AAChD,OAAO,EAAE,eAAe,EAAE,MAAM,4BAA4B,CAAC;AAC7D,OAAO,EAAuB,UAAU,EAAE,MAAM,QAAQ,CAAC;AAEzD,OAAO,EAAE,aAAa,EAAE,MAAM,gCAAgC,CAAC;AAC/D,OAAO,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AAElC,MAAM,OAAgB,qBAGpB,SAAQ,aAA+B;IAEhC,WAAW,CAAC,MAAmB,EAAE,SAAiB,EAAE,kBAAiC;QAE1F,MAAM,eAAe,GAAG,kBAAkB,CAAC,GAAG,CAAC,CAAC,EAAE,KAAK,EAAE,MAAM,EAAE,EAAE,EAAE;YACnE,MAAM,KAAK,GAAG,SAAS,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE,KAAK,CAAC,CAAA;YACjD,OAAO;gBACL,KAAK,EAAE,KAAK,GAAG,KAAK;gBACpB,MAAM,EAAE,MAAM,GAAG,KAAK;aACvB,CAAA;QACH,CAAC,CAAC,CAAA;QAEF,MAAM,SAAS,GAAG,eAAe,CAAC,MAAM,CAAA;QAExC,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;YAClB,MAAM,uBAAuB,GAAG,CAAC,KAAa,EAAE,KAAa,EAAE,EAAE,CAC/D,EAAE,CAAC,KAAK,CAAC;gBACP,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,KAAK,CAAC;gBACpB,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,KAAK,CAAC;aACrB,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,IAAI,EAAE,CAAA;YAE3B,MAAM,UAAU,GAAG,CAAC,QAAgB,EAAE,IAAuC,EAAU,EAAE;gBACvF,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,GAAG,eAAe,CAAC,QAAQ,CAAC,CAAA;gBACnD,OAAO,IAAI,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAA;YAC/D,CAAC,CAAA;YACD,MAAM,WAAW,GAAG,CAAC,QAAgB,EAAE,EAAE,CAAC,UAAU,CAAC,QAAQ,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAA;YAC/E,MAAM,WAAW,GAAG,CAAC,QAAgB,EAAE,EAAE,CAAC,UAAU,CAAC,QAAQ,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAA;YAE/E,MAAM,eAAe,GAAG,MAAM;iBAC3B,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE,GAAG,CAAC,EAAE,SAAS,CAAC,CAAC;iBACzC,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,EAAE,QAAQ,EAAE,EAAE,CACzD,uBAAuB,CACrB,WAAW,CAAC,QAAQ,CAAC,EACrB,WAAW,CAAC,QAAQ,CAAC,CACtB,CACF,CAAC,CAAC;iBACF,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,EAAE,QAAQ,EAAE,EAAE,CACzD,uBAAuB,CACrB,eAAe,CAAC,QAAQ,CAAC,CAAC,KAAK,EAC/B,eAAe,CAAC,QAAQ,CAAC,CAAC,MAAM,CACjC,CACF,CAAC,CAAC,CAAA;YAEL,OAAO,eAA8B,CAAA;QACvC,CAAC,CAAC,CAAA;IACJ,CAAC;IAEM,YAAY,CAAC,KAAe;QACjC,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;YAClB,MAAM,GAAG,GAAG,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,CAAA;YAC9B,OAAO,IAAI,CAAC,WAAW,CACrB,GAAG,EACH,KAAK,CAAC,SAAmB,EACzB,KAAK,CAAC,eAAe,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,EAAE,KAAK,CAAC,EAAE,EAAE,CAAC,CAAC,EAAE,MAAM,EAAE,KAAK,EAAE,CAAC,CAAC,CACpE,CAAA;QACH,CAAC,CAAC,CAAA;IACJ,CAAC;IAEM,KAAK,CAAC,OAAO,CAAC,KAAgB;QACnC,OAAO,IAAI,CAAC,YAAY,CAAC,MAAM,UAAU,CAAC,KAAK,CAAC,CAAC,CAAA;IACnD,CAAC;IAEM,KAAK,CAAC,eAAe,CAAC,KAAgB;QAC3C,MAAM,QAAQ,GAAG,MAAM,UAAU,CAAC,KAAK,CAAC,CAAA;QACxC,MAAM,eAAe,GAAG,EAAE,CAAC,IAAI,CAC7B,GAAG,EAAE,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,QAAQ,CAAC,CAAC,CAC9C,CAAA;QAED,MAAM,iBAAiB,GAAG,MAAM,OAAO,CAAC,GAAG,CAAC,eAAe,CAAC,GAAG,CAC7D,KAAK,EAAE,cAAc,EAAE,QAAQ,EAAE,EAAE;YACjC,MAAM,cAAc,GAAG,KAAK,CAAC,IAAI,CAAC,MAAM,cAAc,CAAC,IAAI,EAAE,CAAC,CAAA;YAC9D,MAAM,OAAO,GAAG,cAAc,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAA;YAC1D,MAAM,OAAO,GAAG,cAAc,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAA;YAE3D,OAAO,IAAI,eAAe,CACxB,KAAK,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAW,EAAE,OAAO,CAAC,CAAC,CAAW,CAAC,CAAC,EACtF;gBACE,MAAM,EAAE,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC;gBACzC,KAAK,EAAG,QAAQ,CAAC,aAAa,CAAC,QAAQ,CAAC;aACzC,CACF,CAAA;QACH,CAAC,CACF,CAAC,CAAA;QAEF,eAAe,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,EAAE,CAAC,CAAA;QAEzC,OAAO,QAAQ,CAAC,YAAY,CAAC,CAAC,CAAC,iBAAsC,CAAC,CAAC,CAAC,iBAAiB,CAAC,CAAC,CAAoB,CAAC;IAClH,CAAC;IAES,wBAAwB;QAChC,OAAO,GAAG,CAAA;IACZ,CAAC;CACF"} \ No newline at end of file +{"version":3,"file":"FaceLandmark68NetBase.js","sourceRoot":"","sources":["../../../src/faceLandmarkNet/FaceLandmark68NetBase.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD,OAAO,EAAe,KAAK,EAAE,MAAM,YAAY,CAAC;AAChD,OAAO,EAAE,eAAe,EAAE,MAAM,4BAA4B,CAAC;AAC7D,OAAO,EAAuB,UAAU,EAAE,MAAM,QAAQ,CAAC;AAEzD,OAAO,EAAE,aAAa,EAAE,MAAM,gCAAgC,CAAC;AAC/D,OAAO,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AAElC,MAAM,OAAgB,qBAGpB,SAAQ,aAA+B;IAEhC,WAAW,CAAC,MAAmB,EAAE,SAAiB,EAAE,kBAAiC;QAE1F,MAAM,eAAe,GAAG,kBAAkB,CAAC,GAAG,CAAC,CAAC,EAAE,KAAK,EAAE,MAAM,EAAE,EAAE,EAAE;YACnE,MAAM,KAAK,GAAG,SAAS,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE,KAAK,CAAC,CAAA;YACjD,OAAO;gBACL,KAAK,EAAE,KAAK,GAAG,KAAK;gBACpB,MAAM,EAAE,MAAM,GAAG,KAAK;aACvB,CAAA;QACH,CAAC,CAAC,CAAA;QAEF,MAAM,SAAS,GAAG,eAAe,CAAC,MAAM,CAAA;QAExC,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;YAClB,MAAM,uBAAuB,GAAG,CAAC,KAAa,EAAE,KAAa,EAAE,EAAE,CAC/D,EAAE,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,SAAS,CAAC,EAAE,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,SAAS,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,EAAE,GAAG,CAAC,CAAC,IAAI,EAAE,CAAA;YAErG,MAAM,UAAU,GAAG,CAAC,QAAgB,EAAE,IAAuC,EAAU,EAAE;gBACvF,MAAM,EAAE,KAAK,EAAE,MAAM,EAAE,GAAG,eAAe,CAAC,QAAQ,CAAC,CAAA;gBACnD,OAAO,IAAI,CAAC,KAAK,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAA;YAC/D,CAAC,CAAA;YACD,MAAM,WAAW,GAAG,CAAC,QAAgB,EAAE,EAAE,CAAC,UAAU,CAAC,QAAQ,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAA;YAC/E,MAAM,WAAW,GAAG,CAAC,QAAgB,EAAE,EAAE,CAAC,UAAU,CAAC,QAAQ,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,GAAG,CAAC,CAAC,CAAA;YAE/E,MAAM,eAAe,GAAG,MAAM;iBAC3B,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE,GAAG,CAAC,EAAE,SAAS,EAAE,SAAS,CAAC,CAAC;iBACpD,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,EAAE,QAAQ,EAAE,EAAE,CACzD,uBAAuB,CACrB,WAAW,CAAC,QAAQ,CAAC,EACrB,WAAW,CAAC,QAAQ,CAAC,CACtB,CACF,CAAC,CAAC;iBACF,GAAG,CAAC,EAAE,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC,EAAE,QAAQ,EAAE,EAAE,CACzD,uBAAuB,CACrB,eAAe,CAAC,QAAQ,CAAC,CAAC,KAAK,EAC/B,eAAe,CAAC,QAAQ,CAAC,CAAC,MAAM,CACjC,CACF,CAAC,CAAC,CAAA;YAEL,OAAO,eAA8B,CAAA;QACvC,CAAC,CAAC,CAAA;IACJ,CAAC;IAEM,YAAY,CAAC,KAAe;QACjC,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;YAClB,MAAM,GAAG,GAAG,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,CAAA;YAC9B,OAAO,IAAI,CAAC,WAAW,CACrB,GAAG,EACH,KAAK,CAAC,SAAmB,EACzB,KAAK,CAAC,eAAe,CAAC,GAAG,CAAC,CAAC,CAAC,MAAM,EAAE,KAAK,CAAC,EAAE,EAAE,CAAC,CAAC,EAAE,MAAM,EAAE,KAAK,EAAE,CAAC,CAAC,CACpE,CAAA;QACH,CAAC,CAAC,CAAA;IACJ,CAAC;IAEM,KAAK,CAAC,OAAO,CAAC,KAAgB;QACnC,OAAO,IAAI,CAAC,YAAY,CAAC,MAAM,UAAU,CAAC,KAAK,CAAC,CAAC,CAAA;IACnD,CAAC;IAEM,KAAK,CAAC,eAAe,CAAC,KAAgB;QAC3C,MAAM,QAAQ,GAAG,MAAM,UAAU,CAAC,KAAK,CAAC,CAAA;QACxC,MAAM,eAAe,GAAG,EAAE,CAAC,IAAI,CAC7B,GAAG,EAAE,CAAC,EAAE,CAAC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,QAAQ,CAAC,CAAC,CAC9C,CAAA;QAED,MAAM,iBAAiB,GAAG,MAAM,OAAO,CAAC,GAAG,CAAC,eAAe,CAAC,GAAG,CAC7D,KAAK,EAAE,cAAc,EAAE,QAAQ,EAAE,EAAE;YACjC,MAAM,cAAc,GAAG,KAAK,CAAC,IAAI,CAAC,MAAM,cAAc,CAAC,IAAI,EAAE,CAAC,CAAA;YAC9D,MAAM,OAAO,GAAG,cAAc,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAA;YAC1D,MAAM,OAAO,GAAG,cAAc,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,CAAA;YAE3D,OAAO,IAAI,eAAe,CACxB,KAAK,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,EAAE,EAAE,CAAC,IAAI,KAAK,CAAC,OAAO,CAAC,CAAC,CAAW,EAAE,OAAO,CAAC,CAAC,CAAW,CAAC,CAAC,EACtF;gBACE,MAAM,EAAE,QAAQ,CAAC,cAAc,CAAC,QAAQ,CAAC;gBACzC,KAAK,EAAG,QAAQ,CAAC,aAAa,CAAC,QAAQ,CAAC;aACzC,CACF,CAAA;QACH,CAAC,CACF,CAAC,CAAA;QAEF,eAAe,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,EAAE,CAAC,CAAA;QAEzC,OAAO,QAAQ,CAAC,YAAY,CAAC,CAAC,CAAC,iBAAsC,CAAC,CAAC,CAAC,iBAAiB,CAAC,CAAC,CAAoB,CAAC;IAClH,CAAC;IAES,wBAAwB;QAChC,OAAO,GAAG,CAAA;IACZ,CAAC;CACF"} \ No newline at end of file diff --git a/build/src/ops/normalize.js b/build/src/ops/normalize.js index 155051c..2f94096 100644 --- a/build/src/ops/normalize.js +++ b/build/src/ops/normalize.js @@ -2,9 +2,9 @@ import * as tf from '@tensorflow/tfjs/dist/tf.es2017.js'; export function normalize(x, meanRgb) { return tf.tidy(() => { const [r, g, b] = meanRgb; - const avg_r = tf.fill([...x.shape.slice(0, 3), 1], r); - const avg_g = tf.fill([...x.shape.slice(0, 3), 1], g); - const avg_b = tf.fill([...x.shape.slice(0, 3), 1], b); + const avg_r = tf.fill([...x.shape.slice(0, 3), 1], r, 'float32'); + const avg_g = tf.fill([...x.shape.slice(0, 3), 1], g, 'float32'); + const avg_b = tf.fill([...x.shape.slice(0, 3), 1], b, 'float32'); const avg_rgb = tf.concat([avg_r, avg_g, avg_b], 3); return tf.sub(x, avg_rgb); }); diff --git a/build/src/ops/normalize.js.map b/build/src/ops/normalize.js.map index c9811f4..c197d3f 100644 --- a/build/src/ops/normalize.js.map +++ b/build/src/ops/normalize.js.map @@ -1 +1 @@ -{"version":3,"file":"normalize.js","sourceRoot":"","sources":["../../../src/ops/normalize.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD,MAAM,UAAU,SAAS,CAAC,CAAc,EAAE,OAAiB;IACzD,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;QAClB,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,GAAG,OAAO,CAAA;QACzB,MAAM,KAAK,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAA;QACrD,MAAM,KAAK,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAA;QACrD,MAAM,KAAK,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAA;QACrD,MAAM,OAAO,GAAG,EAAE,CAAC,MAAM,CAAC,CAAC,KAAK,EAAE,KAAK,EAAE,KAAK,CAAC,EAAE,CAAC,CAAC,CAAA;QAEnD,OAAO,EAAE,CAAC,GAAG,CAAC,CAAC,EAAE,OAAO,CAAC,CAAA;IAC3B,CAAC,CAAC,CAAA;AACJ,CAAC"} \ No newline at end of file +{"version":3,"file":"normalize.js","sourceRoot":"","sources":["../../../src/ops/normalize.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD,MAAM,UAAU,SAAS,CAAC,CAAc,EAAE,OAAiB;IACzD,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;QAClB,MAAM,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,GAAG,OAAO,CAAA;QACzB,MAAM,KAAK,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,SAAS,CAAC,CAAA;QAChE,MAAM,KAAK,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,SAAS,CAAC,CAAA;QAChE,MAAM,KAAK,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC,GAAG,CAAC,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,EAAE,SAAS,CAAC,CAAA;QAChE,MAAM,OAAO,GAAG,EAAE,CAAC,MAAM,CAAC,CAAC,KAAK,EAAE,KAAK,EAAE,KAAK,CAAC,EAAE,CAAC,CAAC,CAAA;QAEnD,OAAO,EAAE,CAAC,GAAG,CAAC,CAAC,EAAE,OAAO,CAAC,CAAA;IAC3B,CAAC,CAAC,CAAA;AACJ,CAAC"} \ No newline at end of file diff --git a/build/src/ops/padToSquare.js b/build/src/ops/padToSquare.js index 3f01e61..f544b9f 100644 --- a/build/src/ops/padToSquare.js +++ b/build/src/ops/padToSquare.js @@ -19,7 +19,7 @@ export function padToSquare(imgTensor, isCenterImage = false) { const createPaddingTensor = (paddingAmount) => { const paddingTensorShape = imgTensor.shape.slice(); paddingTensorShape[paddingAxis] = paddingAmount; - return tf.fill(paddingTensorShape, 0); + return tf.fill(paddingTensorShape, 0, 'float32'); }; const paddingTensorAppend = createPaddingTensor(paddingAmount); const remainingPaddingAmount = dimDiff - paddingTensorAppend.shape[paddingAxis]; diff --git a/build/src/ops/padToSquare.js.map b/build/src/ops/padToSquare.js.map index c2f85ef..184bd74 100644 --- a/build/src/ops/padToSquare.js.map +++ b/build/src/ops/padToSquare.js.map @@ -1 +1 @@ -{"version":3,"file":"padToSquare.js","sourceRoot":"","sources":["../../../src/ops/padToSquare.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD;;;;;;;GAOG;AACH,MAAM,UAAU,WAAW,CACzB,SAAsB,EACtB,gBAAyB,KAAK;IAE9B,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;QAElB,MAAM,CAAC,MAAM,EAAE,KAAK,CAAC,GAAG,SAAS,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,CAAA;QAChD,IAAI,MAAM,KAAK,KAAK,EAAE;YACpB,OAAO,SAAS,CAAA;SACjB;QAED,MAAM,OAAO,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,GAAG,KAAK,CAAC,CAAA;QACxC,MAAM,aAAa,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAA;QACrE,MAAM,WAAW,GAAG,MAAM,GAAG,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAA;QAE1C,MAAM,mBAAmB,GAAG,CAAC,aAAqB,EAAa,EAAE;YAC/D,MAAM,kBAAkB,GAAG,SAAS,CAAC,KAAK,CAAC,KAAK,EAAE,CAAA;YAClD,kBAAkB,CAAC,WAAW,CAAC,GAAG,aAAa,CAAA;YAC/C,OAAO,EAAE,CAAC,IAAI,CAAC,kBAAkB,EAAE,CAAC,CAAC,CAAA;QACvC,CAAC,CAAA;QAED,MAAM,mBAAmB,GAAG,mBAAmB,CAAC,aAAa,CAAC,CAAA;QAC9D,MAAM,sBAAsB,GAAG,OAAO,GAAI,mBAAmB,CAAC,KAAK,CAAC,WAAW,CAAY,CAAA;QAE3F,MAAM,oBAAoB,GAAG,aAAa,IAAI,sBAAsB;YAClE,CAAC,CAAC,mBAAmB,CAAC,sBAAsB,CAAC;YAC7C,CAAC,CAAC,IAAI,CAAA;QAER,MAAM,cAAc,GAAG;YACrB,oBAAoB;YACpB,SAAS;YACT,mBAAmB;SACpB;aACE,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;YACjB,uDAAuD;aACtD,GAAG,CAAC,CAAC,CAAY,EAAE,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,SAAS,CAAC,CAAkB,CAAA;QAChE,OAAO,EAAE,CAAC,MAAM,CAAC,cAAc,EAAE,WAAW,CAAC,CAAA;IAE/C,CAAC,CAAC,CAAA;AACJ,CAAC"} \ No newline at end of file +{"version":3,"file":"padToSquare.js","sourceRoot":"","sources":["../../../src/ops/padToSquare.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD;;;;;;;GAOG;AACH,MAAM,UAAU,WAAW,CACzB,SAAsB,EACtB,gBAAyB,KAAK;IAE9B,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;QAElB,MAAM,CAAC,MAAM,EAAE,KAAK,CAAC,GAAG,SAAS,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC,CAAA;QAChD,IAAI,MAAM,KAAK,KAAK,EAAE;YACpB,OAAO,SAAS,CAAA;SACjB;QAED,MAAM,OAAO,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,GAAG,KAAK,CAAC,CAAA;QACxC,MAAM,aAAa,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,GAAG,CAAC,aAAa,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAA;QACrE,MAAM,WAAW,GAAG,MAAM,GAAG,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAA;QAE1C,MAAM,mBAAmB,GAAG,CAAC,aAAqB,EAAa,EAAE;YAC/D,MAAM,kBAAkB,GAAG,SAAS,CAAC,KAAK,CAAC,KAAK,EAAE,CAAA;YAClD,kBAAkB,CAAC,WAAW,CAAC,GAAG,aAAa,CAAA;YAC/C,OAAO,EAAE,CAAC,IAAI,CAAC,kBAAkB,EAAE,CAAC,EAAE,SAAS,CAAC,CAAA;QAClD,CAAC,CAAA;QAED,MAAM,mBAAmB,GAAG,mBAAmB,CAAC,aAAa,CAAC,CAAA;QAC9D,MAAM,sBAAsB,GAAG,OAAO,GAAI,mBAAmB,CAAC,KAAK,CAAC,WAAW,CAAY,CAAA;QAE3F,MAAM,oBAAoB,GAAG,aAAa,IAAI,sBAAsB;YAClE,CAAC,CAAC,mBAAmB,CAAC,sBAAsB,CAAC;YAC7C,CAAC,CAAC,IAAI,CAAA;QAER,MAAM,cAAc,GAAG;YACrB,oBAAoB;YACpB,SAAS;YACT,mBAAmB;SACpB;aACE,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;YACjB,uDAAuD;aACtD,GAAG,CAAC,CAAC,CAAY,EAAE,EAAE,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,EAAE,SAAS,CAAC,CAAkB,CAAA;QAChE,OAAO,EAAE,CAAC,MAAM,CAAC,cAAc,EAAE,WAAW,CAAC,CAAA;IAE/C,CAAC,CAAC,CAAA;AACJ,CAAC"} \ No newline at end of file diff --git a/build/src/xception/TinyXception.js b/build/src/xception/TinyXception.js index 9cb3ecb..3a6ce2e 100644 --- a/build/src/xception/TinyXception.js +++ b/build/src/xception/TinyXception.js @@ -35,7 +35,7 @@ export class TinyXception extends NeuralNetwork { throw new Error('TinyXception - load model before inference'); } return tf.tidy(() => { - const batchTensor = input.toBatchTensor(112, true); + const batchTensor = tf.cast(input.toBatchTensor(112, true), 'float32'); const meanRgb = [122.782, 117.001, 104.298]; const normalized = normalize(batchTensor, meanRgb).div(tf.scalar(256)); let out = tf.relu(conv(normalized, params.entry_flow.conv_in, [2, 2])); diff --git a/build/src/xception/TinyXception.js.map b/build/src/xception/TinyXception.js.map index a08eb62..d7d1e30 100644 --- a/build/src/xception/TinyXception.js.map +++ b/build/src/xception/TinyXception.js.map @@ -1 +1 @@ -{"version":3,"file":"TinyXception.js","sourceRoot":"","sources":["../../../src/xception/TinyXception.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD,OAAO,EAAc,sBAAsB,EAAE,MAAM,WAAW,CAAC;AAC/D,OAAO,EAAuB,UAAU,EAAE,MAAM,QAAQ,CAAC;AACzD,OAAO,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACjD,OAAO,EAAE,SAAS,EAAE,MAAM,QAAQ,CAAC;AACnC,OAAO,EAAE,KAAK,EAAE,MAAM,UAAU,CAAC;AACjC,OAAO,EAAE,aAAa,EAAE,MAAM,iBAAiB,CAAC;AAChD,OAAO,EAAE,0BAA0B,EAAE,MAAM,8BAA8B,CAAC;AAG1E,SAAS,IAAI,CAAC,CAAc,EAAE,MAAkB,EAAE,MAAwB;IACxE,OAAO,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE,MAAM,CAAC,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,CAAA;AAC1E,CAAC;AAED,SAAS,cAAc,CAAC,CAAc,EAAE,MAA4B,EAAE,kBAA2B,IAAI;IACnG,IAAI,GAAG,GAAG,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAA;IAC1C,GAAG,GAAG,sBAAsB,CAAC,GAAG,EAAE,MAAM,CAAC,eAAe,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IACjE,GAAG,GAAG,sBAAsB,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,EAAG,MAAM,CAAC,eAAe,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IAC3E,GAAG,GAAG,EAAE,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,MAAM,CAAC,CAAA;IAC7C,GAAG,GAAG,EAAE,CAAC,GAAG,CAAC,GAAG,EAAE,IAAI,CAAC,CAAC,EAAG,MAAM,CAAC,cAAc,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAA;IAC1D,OAAO,GAAG,CAAA;AACZ,CAAC;AAED,SAAS,SAAS,CAAC,CAAc,EAAE,MAAuB;IACxD,IAAI,GAAG,GAAG,sBAAsB,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,eAAe,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IAC5E,GAAG,GAAG,sBAAsB,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,MAAM,CAAC,eAAe,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IAC1E,GAAG,GAAG,sBAAsB,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,MAAM,CAAC,eAAe,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IAC1E,GAAG,GAAG,EAAE,CAAC,GAAG,CAAC,GAAG,EAAE,CAAC,CAAC,CAAA;IACpB,OAAO,GAAG,CAAA;AACZ,CAAC;AAED,MAAM,OAAO,YAAa,SAAQ,aAAiC;IAIjE,YAAY,aAAqB;QAC/B,KAAK,CAAC,cAAc,CAAC,CAAA;QACrB,IAAI,CAAC,cAAc,GAAG,aAAa,CAAA;IACrC,CAAC;IAEM,YAAY,CAAC,KAAe;QAEjC,MAAM,EAAE,MAAM,EAAE,GAAG,IAAI,CAAA;QAEvB,IAAI,CAAC,MAAM,EAAE;YACX,MAAM,IAAI,KAAK,CAAC,4CAA4C,CAAC,CAAA;SAC9D;QAED,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;YAClB,MAAM,WAAW,GAAG,KAAK,CAAC,aAAa,CAAC,GAAG,EAAE,IAAI,CAAC,CAAA;YAClD,MAAM,OAAO,GAAG,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,CAAA;YAC3C,MAAM,UAAU,GAAG,SAAS,CAAC,WAAW,EAAE,OAAO,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,CAAgB,CAAA;YAErF,IAAI,GAAG,GAAG,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,UAAU,EAAE,MAAM,CAAC,UAAU,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAA;YACtE,GAAG,GAAG,cAAc,CAAC,GAAG,EAAE,MAAM,CAAC,UAAU,CAAC,iBAAiB,EAAE,KAAK,CAAC,CAAA;YACrE,GAAG,GAAG,cAAc,CAAC,GAAG,EAAE,MAAM,CAAC,UAAU,CAAC,iBAAiB,CAAC,CAAA;YAE9D,KAAK,CAAC,IAAI,CAAC,cAAc,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,GAAG,EAAE,EAAE;gBAC/C,GAAG,GAAG,SAAS,CAAC,GAAG,EAAE,MAAM,CAAC,WAAW,CAAC,cAAc,GAAG,EAAE,CAAC,CAAC,CAAA;YAC/D,CAAC,CAAC,CAAA;YAEF,GAAG,GAAG,cAAc,CAAC,GAAG,EAAE,MAAM,CAAC,SAAS,CAAC,eAAe,CAAC,CAAA;YAC3D,GAAG,GAAG,EAAE,CAAC,IAAI,CAAC,sBAAsB,CAAC,GAAG,EAAE,MAAM,CAAC,SAAS,CAAC,cAAc,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAA;YACnF,OAAO,GAAG,CAAA;QACZ,CAAC,CAAC,CAAA;IACJ,CAAC;IAEM,KAAK,CAAC,OAAO,CAAC,KAAgB;QACnC,OAAO,IAAI,CAAC,YAAY,CAAC,MAAM,UAAU,CAAC,KAAK,CAAC,CAAC,CAAA;IACnD,CAAC;IAES,mBAAmB;QAC3B,OAAO,qBAAqB,CAAA;IAC9B,CAAC;IAES,0BAA0B,CAAC,SAA4B;QAC/D,OAAO,0BAA0B,CAAC,SAAS,EAAE,IAAI,CAAC,cAAc,CAAC,CAAA;IACnE,CAAC;IAES,aAAa,CAAC,OAAqB;QAC3C,OAAO,aAAa,CAAC,OAAO,EAAE,IAAI,CAAC,cAAc,CAAC,CAAA;IACpD,CAAC;CACF"} \ No newline at end of file +{"version":3,"file":"TinyXception.js","sourceRoot":"","sources":["../../../src/xception/TinyXception.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,oCAAoC,CAAC;AAEzD,OAAO,EAAc,sBAAsB,EAAE,MAAM,WAAW,CAAC;AAC/D,OAAO,EAAuB,UAAU,EAAE,MAAM,QAAQ,CAAC;AACzD,OAAO,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACjD,OAAO,EAAE,SAAS,EAAE,MAAM,QAAQ,CAAC;AACnC,OAAO,EAAE,KAAK,EAAE,MAAM,UAAU,CAAC;AACjC,OAAO,EAAE,aAAa,EAAE,MAAM,iBAAiB,CAAC;AAChD,OAAO,EAAE,0BAA0B,EAAE,MAAM,8BAA8B,CAAC;AAG1E,SAAS,IAAI,CAAC,CAAc,EAAE,MAAkB,EAAE,MAAwB;IACxE,OAAO,EAAE,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,CAAC,EAAE,MAAM,CAAC,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE,MAAM,CAAC,IAAI,CAAC,CAAA;AAC1E,CAAC;AAED,SAAS,cAAc,CAAC,CAAc,EAAE,MAA4B,EAAE,kBAA2B,IAAI;IACnG,IAAI,GAAG,GAAG,eAAe,CAAC,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC,CAAA;IAC1C,GAAG,GAAG,sBAAsB,CAAC,GAAG,EAAE,MAAM,CAAC,eAAe,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IACjE,GAAG,GAAG,sBAAsB,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,EAAG,MAAM,CAAC,eAAe,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IAC3E,GAAG,GAAG,EAAE,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,MAAM,CAAC,CAAA;IAC7C,GAAG,GAAG,EAAE,CAAC,GAAG,CAAC,GAAG,EAAE,IAAI,CAAC,CAAC,EAAG,MAAM,CAAC,cAAc,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAA;IAC1D,OAAO,GAAG,CAAA;AACZ,CAAC;AAED,SAAS,SAAS,CAAC,CAAc,EAAE,MAAuB;IACxD,IAAI,GAAG,GAAG,sBAAsB,CAAC,EAAE,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC,eAAe,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IAC5E,GAAG,GAAG,sBAAsB,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,MAAM,CAAC,eAAe,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IAC1E,GAAG,GAAG,sBAAsB,CAAC,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,MAAM,CAAC,eAAe,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAA;IAC1E,GAAG,GAAG,EAAE,CAAC,GAAG,CAAC,GAAG,EAAE,CAAC,CAAC,CAAA;IACpB,OAAO,GAAG,CAAA;AACZ,CAAC;AAED,MAAM,OAAO,YAAa,SAAQ,aAAiC;IAIjE,YAAY,aAAqB;QAC/B,KAAK,CAAC,cAAc,CAAC,CAAA;QACrB,IAAI,CAAC,cAAc,GAAG,aAAa,CAAA;IACrC,CAAC;IAEM,YAAY,CAAC,KAAe;QAEjC,MAAM,EAAE,MAAM,EAAE,GAAG,IAAI,CAAA;QAEvB,IAAI,CAAC,MAAM,EAAE;YACX,MAAM,IAAI,KAAK,CAAC,4CAA4C,CAAC,CAAA;SAC9D;QAED,OAAO,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;YAClB,MAAM,WAAW,GAAG,EAAE,CAAC,IAAI,CAAC,KAAK,CAAC,aAAa,CAAC,GAAG,EAAE,IAAI,CAAC,EAAE,SAAS,CAAC,CAAC;YACvE,MAAM,OAAO,GAAG,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,CAAC,CAAA;YAC3C,MAAM,UAAU,GAAG,SAAS,CAAC,WAAW,EAAE,OAAO,CAAC,CAAC,GAAG,CAAC,EAAE,CAAC,MAAM,CAAC,GAAG,CAAC,CAAgB,CAAA;YAErF,IAAI,GAAG,GAAG,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,UAAU,EAAE,MAAM,CAAC,UAAU,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAA;YACtE,GAAG,GAAG,cAAc,CAAC,GAAG,EAAE,MAAM,CAAC,UAAU,CAAC,iBAAiB,EAAE,KAAK,CAAC,CAAA;YACrE,GAAG,GAAG,cAAc,CAAC,GAAG,EAAE,MAAM,CAAC,UAAU,CAAC,iBAAiB,CAAC,CAAA;YAE9D,KAAK,CAAC,IAAI,CAAC,cAAc,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,GAAG,EAAE,EAAE;gBAC/C,GAAG,GAAG,SAAS,CAAC,GAAG,EAAE,MAAM,CAAC,WAAW,CAAC,cAAc,GAAG,EAAE,CAAC,CAAC,CAAA;YAC/D,CAAC,CAAC,CAAA;YAEF,GAAG,GAAG,cAAc,CAAC,GAAG,EAAE,MAAM,CAAC,SAAS,CAAC,eAAe,CAAC,CAAA;YAC3D,GAAG,GAAG,EAAE,CAAC,IAAI,CAAC,sBAAsB,CAAC,GAAG,EAAE,MAAM,CAAC,SAAS,CAAC,cAAc,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAA;YACnF,OAAO,GAAG,CAAA;QACZ,CAAC,CAAC,CAAA;IACJ,CAAC;IAEM,KAAK,CAAC,OAAO,CAAC,KAAgB;QACnC,OAAO,IAAI,CAAC,YAAY,CAAC,MAAM,UAAU,CAAC,KAAK,CAAC,CAAC,CAAA;IACnD,CAAC;IAES,mBAAmB;QAC3B,OAAO,qBAAqB,CAAA;IAC9B,CAAC;IAES,0BAA0B,CAAC,SAA4B;QAC/D,OAAO,0BAA0B,CAAC,SAAS,EAAE,IAAI,CAAC,cAAc,CAAC,CAAA;IACnE,CAAC;IAES,aAAa,CAAC,OAAqB;QAC3C,OAAO,aAAa,CAAC,OAAO,EAAE,IAAI,CAAC,cAAc,CAAC,CAAA;IACpD,CAAC;CACF"} \ No newline at end of file diff --git a/dist/face-api.cjs b/dist/face-api.cjs index d5a95e5..2043fa4 100644 --- a/dist/face-api.cjs +++ b/dist/face-api.cjs @@ -1,39 +1,39 @@ -var Nm=Object.defineProperty,gX=Object.prototype.hasOwnProperty,CS=(r,l)=>()=>(l||(l={exports:{}},r(l.exports,l)),l.exports),jC=r=>Nm(r,"__esModule",{value:!0}),Tc=(r,l)=>{jC(r);for(var h in l)Nm(r,h,{get:l[h],enumerable:!0})},yX=(r,l)=>{if(jC(r),typeof l=="object"||typeof l=="function")for(let h in l)!gX.call(r,h)&&h!=="default"&&Nm(r,h,{get:()=>l[h],enumerable:!0});return r},Ye=r=>r&&r.__esModule?r:yX(Nm({},"default",{value:r,enumerable:!0}),r);var i2=CS(bX=>{Tc(bX,{FetchError:()=>Mn,Headers:()=>fi,Request:()=>Xo,Response:()=>Pi,default:()=>OX});const Ys=Ye(require("stream")),RS=Ye(require("http")),Cm=Ye(require("url")),KC=Ye(require("https")),no=Ye(require("zlib")),wX=Ys.default.Readable,pr=Symbol("buffer"),OS=Symbol("type");class fu{constructor(){this[OS]="";const r=arguments[0],l=arguments[1],h=[];let d=0;if(r){const g=r,S=Number(g.length);for(let L=0;L1&&arguments[1]!==void 0?arguments[1]:{},d=h.size;let f=d===void 0?0:d;var g=h.timeout;let S=g===void 0?0:g;r==null?r=null:JC(r)?r=Buffer.from(r.toString()):gu(r)||(Buffer.isBuffer(r)||(Object.prototype.toString.call(r)==="[object ArrayBuffer]"?r=Buffer.from(r):ArrayBuffer.isView(r)?r=Buffer.from(r.buffer,r.byteOffset,r.byteLength):r instanceof Ys.default||(r=Buffer.from(String(r))))),this[mr]={body:r,disturbed:!1,error:null},this.size=f,this.timeout=S,r instanceof Ys.default&&r.on("error",function(L){const x=L.name==="AbortError"?L:new Mn(`Invalid response body while trying to fetch ${l.url}: ${L.message}`,"system",L);l[mr].error=x})}xn.prototype={get body(){return this[mr].body},get bodyUsed(){return this[mr].disturbed},arrayBuffer(){return Ac.call(this).then(function(r){return r.buffer.slice(r.byteOffset,r.byteOffset+r.byteLength)})},blob(){let r=this.headers&&this.headers.get("content-type")||"";return Ac.call(this).then(function(l){return Object.assign(new fu([],{type:r.toLowerCase()}),{[pr]:l})})},json(){var r=this;return Ac.call(this).then(function(l){try{return JSON.parse(l.toString())}catch(h){return xn.Promise.reject(new Mn(`invalid json response body at ${r.url} reason: ${h.message}`,"invalid-json"))}})},text(){return Ac.call(this).then(function(r){return r.toString()})},buffer(){return Ac.call(this)},textConverted(){var r=this;return Ac.call(this).then(function(l){return LX(l,r.headers)})}};Object.defineProperties(xn.prototype,{body:{enumerable:!0},bodyUsed:{enumerable:!0},arrayBuffer:{enumerable:!0},blob:{enumerable:!0},json:{enumerable:!0},text:{enumerable:!0}});xn.mixIn=function(r){for(const l of Object.getOwnPropertyNames(xn.prototype))if(!(l in r)){const h=Object.getOwnPropertyDescriptor(xn.prototype,l);Object.defineProperty(r,l,h)}};function Ac(){var r=this;if(this[mr].disturbed)return xn.Promise.reject(new TypeError(`body used already for: ${this.url}`));if(this[mr].disturbed=!0,this[mr].error)return xn.Promise.reject(this[mr].error);let l=this.body;if(l===null)return xn.Promise.resolve(Buffer.alloc(0));if(gu(l)&&(l=l.stream()),Buffer.isBuffer(l))return xn.Promise.resolve(l);if(!(l instanceof Ys.default))return xn.Promise.resolve(Buffer.alloc(0));let h=[],d=0,f=!1;return new xn.Promise(function(g,S){let L;r.timeout&&(L=setTimeout(function(){f=!0,S(new Mn(`Response timeout while trying to fetch ${r.url} (over ${r.timeout}ms)`,"body-timeout"))},r.timeout)),l.on("error",function(x){x.name==="AbortError"?(f=!0,S(x)):S(new Mn(`Invalid response body while trying to fetch ${r.url}: ${x.message}`,"system",x))}),l.on("data",function(x){if(f||x===null)return;if(r.size&&d+x.length>r.size){f=!0,S(new Mn(`content size at ${r.url} over limit: ${r.size}`,"max-size"));return}d+=x.length,h.push(x)}),l.on("end",function(){if(f)return;clearTimeout(L);try{g(Buffer.concat(h,d))}catch(x){S(new Mn(`Could not create Buffer from response body for ${r.url}: ${x.message}`,"system",x))}})})}function LX(r,l){if(typeof ES!="function")throw new Error("The package `encoding` must be installed to use the textConverted() function");const h=l.get("content-type");let d="utf-8",f,g;return h&&(f=/charset=([^;]*)/i.exec(h)),g=r.slice(0,1024).toString(),!f&&g&&(f=/0&&arguments[0]!==void 0?arguments[0]:void 0;if(this[tn]=Object.create(null),r instanceof fi){const l=r.raw(),h=Object.keys(l);for(const d of h)for(const f of l[d])this.append(d,f);return}if(!(r==null))if(typeof r=="object"){const l=r[Symbol.iterator];if(l!=null){if(typeof l!="function")throw new TypeError("Header pairs must be iterable");const h=[];for(const d of r){if(typeof d!="object"||typeof d[Symbol.iterator]!="function")throw new TypeError("Each header pair must be iterable");h.push(Array.from(d))}for(const d of h){if(d.length!==2)throw new TypeError("Each header pair must be a name/value tuple");this.append(d[0],d[1])}}else for(const h of Object.keys(r)){const d=r[h];this.append(h,d)}}else throw new TypeError("Provided initializer must be an object")}get(r){r=`${r}`,yu(r);const l=vc(this[tn],r);return l===void 0?null:this[tn][l].join(", ")}forEach(r){let l=arguments.length>1&&arguments[1]!==void 0?arguments[1]:void 0,h=kS(this),d=0;for(;d1&&arguments[1]!==void 0?arguments[1]:"key+value";const h=Object.keys(r[tn]).sort();return h.map(l==="key"?function(d){return d.toLowerCase()}:l==="value"?function(d){return r[tn][d].join(", ")}:function(d){return[d.toLowerCase(),r[tn][d].join(", ")]})}const _S=Symbol("internal");function FS(r,l){const h=Object.create(WS);return h[_S]={target:r,kind:l,index:0},h}const WS=Object.setPrototypeOf({next(){if(!this||Object.getPrototypeOf(this)!==WS)throw new TypeError("Value of `this` is not a HeadersIterator");var r=this[_S];const l=r.target,h=r.kind,d=r.index,f=kS(l,h),g=f.length;return d>=g?{value:void 0,done:!0}:(this[_S].index=d+1,{value:f[d],done:!1})}},Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]())));Object.defineProperty(WS,Symbol.toStringTag,{value:"HeadersIterator",writable:!1,enumerable:!1,configurable:!0});function IX(r){const l=Object.assign({__proto__:null},r[tn]),h=vc(r[tn],"Host");return h!==void 0&&(l[h]=l[h][0]),l}function xX(r){const l=new fi;for(const h of Object.keys(r)){if(t2.test(h))continue;if(Array.isArray(r[h]))for(const d of r[h]){if(DS.test(d))continue;l[tn][h]===void 0?l[tn][h]=[d]:l[tn][h].push(d)}else DS.test(r[h])||(l[tn][h]=[r[h]])}return l}const so=Symbol("Response internals"),TX=RS.default.STATUS_CODES;class Pi{constructor(){let r=arguments.length>0&&arguments[0]!==void 0?arguments[0]:null,l=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};xn.call(this,r,l);const h=l.status||200,d=new fi(l.headers);if(r!=null&&!d.has("Content-Type")){const f=QC(r);f&&d.append("Content-Type",f)}this[so]={url:l.url,status:h,statusText:l.statusText||TX[h],headers:d,counter:l.counter}}get url(){return this[so].url||""}get status(){return this[so].status}get ok(){return this[so].status>=200&&this[so].status<300}get redirected(){return this[so].counter>0}get statusText(){return this[so].statusText}get headers(){return this[so].headers}clone(){return new Pi(ZC(this),{url:this.url,status:this.status,statusText:this.statusText,headers:this.headers,ok:this.ok,redirected:this.redirected})}}xn.mixIn(Pi.prototype);Object.defineProperties(Pi.prototype,{url:{enumerable:!0},status:{enumerable:!0},ok:{enumerable:!0},redirected:{enumerable:!0},statusText:{enumerable:!0},headers:{enumerable:!0},clone:{enumerable:!0}});Object.defineProperty(Pi.prototype,Symbol.toStringTag,{value:"Response",writable:!1,enumerable:!1,configurable:!0});const fr=Symbol("Request internals"),$S=Cm.default.parse,AX=Cm.default.format,vX="destroy"in Ys.default.Readable.prototype;function Rm(r){return typeof r=="object"&&typeof r[fr]=="object"}function NX(r){const l=r&&typeof r=="object"&&Object.getPrototypeOf(r);return!!(l&&l.constructor.name==="AbortSignal")}class Xo{constructor(r){let l=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{},h;Rm(r)?h=$S(r.url):(r&&r.href?h=$S(r.href):h=$S(`${r}`),r={});let d=l.method||r.method||"GET";if(d=d.toUpperCase(),(l.body!=null||Rm(r)&&r.body!==null)&&(d==="GET"||d==="HEAD"))throw new TypeError("Request with GET/HEAD method cannot have body");let f=l.body!=null?l.body:Rm(r)&&r.body!==null?ZC(r):null;xn.call(this,f,{timeout:l.timeout||r.timeout||0,size:l.size||r.size||0});const g=new fi(l.headers||r.headers||{});if(f!=null&&!g.has("Content-Type")){const L=QC(f);L&&g.append("Content-Type",L)}let S=Rm(r)?r.signal:null;if("signal"in l&&(S=l.signal),S!=null&&!NX(S))throw new TypeError("Expected signal to be an instanceof AbortSignal");this[fr]={method:d,redirect:l.redirect||r.redirect||"follow",headers:g,parsedURL:h,signal:S},this.follow=l.follow!==void 0?l.follow:r.follow!==void 0?r.follow:20,this.compress=l.compress!==void 0?l.compress:r.compress!==void 0?r.compress:!0,this.counter=l.counter||r.counter||0,this.agent=l.agent||r.agent}get method(){return this[fr].method}get url(){return AX(this[fr].parsedURL)}get headers(){return this[fr].headers}get redirect(){return this[fr].redirect}get signal(){return this[fr].signal}clone(){return new Xo(this)}}xn.mixIn(Xo.prototype);Object.defineProperty(Xo.prototype,Symbol.toStringTag,{value:"Request",writable:!1,enumerable:!1,configurable:!0});Object.defineProperties(Xo.prototype,{method:{enumerable:!0},url:{enumerable:!0},headers:{enumerable:!0},redirect:{enumerable:!0},clone:{enumerable:!0},signal:{enumerable:!0}});function CX(r){const l=r[fr].parsedURL,h=new fi(r[fr].headers);if(h.has("Accept")||h.set("Accept","*/*"),!l.protocol||!l.hostname)throw new TypeError("Only absolute URLs are supported");if(!/^https?:$/.test(l.protocol))throw new TypeError("Only HTTP(S) protocols are supported");if(r.signal&&r.body instanceof Ys.default.Readable&&!vX)throw new Error("Cancellation of streamed requests with AbortSignal is not supported in node < 8");let d=null;if(r.body==null&&/^(POST|PUT)$/i.test(r.method)&&(d="0"),r.body!=null){const g=e2(r);typeof g=="number"&&(d=String(g))}d&&h.set("Content-Length",d),h.has("User-Agent")||h.set("User-Agent","node-fetch/1.0 (+https://github.com/bitinn/node-fetch)"),r.compress&&!h.has("Accept-Encoding")&&h.set("Accept-Encoding","gzip,deflate");let f=r.agent;return typeof f=="function"&&(f=f(l)),!h.has("Connection")&&!f&&h.set("Connection","close"),Object.assign({},l,{method:r.method,headers:IX(h),agent:f})}function bu(r){Error.call(this,r),this.type="aborted",this.message=r,Error.captureStackTrace(this,this.constructor)}bu.prototype=Object.create(Error.prototype);bu.prototype.constructor=bu;bu.prototype.name="AbortError";const s2=Ys.default.PassThrough,RX=Cm.default.resolve;function io(r,l){if(!io.Promise)throw new Error("native promise missing, set fetch.Promise to your favorite alternative");return xn.Promise=io.Promise,new io.Promise(function(h,d){const f=new Xo(r,l),g=CX(f),S=(g.protocol==="https:"?KC.default:RS.default).request,L=f.signal;let x=null;const A=function(){let te=new bu("The user aborted a request.");if(d(te),f.body&&f.body instanceof Ys.default.Readable&&f.body.destroy(te),!x||!x.body)return;x.body.emit("error",te)};if(L&&L.aborted){A();return}const O=function(){A(),z()},C=S(g);let $;L&&L.addEventListener("abort",O);function z(){C.abort(),L&&L.removeEventListener("abort",O),clearTimeout($)}f.timeout&&C.once("socket",function(ne){$=setTimeout(function(){d(new Mn(`network timeout at: ${f.url}`,"request-timeout")),z()},f.timeout)}),C.on("error",function(ne){d(new Mn(`request to ${f.url} failed, reason: ${ne.message}`,"system",ne)),z()}),C.on("response",function(ne){clearTimeout($);const te=xX(ne.headers);if(io.isRedirect(ne.statusCode)){const xe=te.get("Location"),Me=xe===null?null:RX(f.url,xe);switch(f.redirect){case"error":d(new Mn(`uri requested responds with a redirect, redirect mode is set to error: ${f.url}`,"no-redirect")),z();return;case"manual":if(Me!==null)try{te.set("Location",Me)}catch(wt){d(wt)}break;case"follow":if(Me===null)break;if(f.counter>=f.follow){d(new Mn(`maximum redirect reached at: ${f.url}`,"max-redirect")),z();return}const Ke={headers:new fi(f.headers),follow:f.follow,counter:f.counter+1,agent:f.agent,compress:f.compress,method:f.method,body:f.body,signal:f.signal,timeout:f.timeout,size:f.size};if(ne.statusCode!==303&&f.body&&e2(f)===null){d(new Mn("Cannot follow redirect with body being a readable stream","unsupported-redirect")),z();return}(ne.statusCode===303||(ne.statusCode===301||ne.statusCode===302)&&f.method==="POST")&&(Ke.method="GET",Ke.body=void 0,Ke.headers.delete("content-length")),h(io(new Xo(Me,Ke))),z();return}}ne.once("end",function(){L&&L.removeEventListener("abort",O)});let se=ne.pipe(new s2);const fe={url:f.url,status:ne.statusCode,statusText:ne.statusMessage,headers:te,size:f.size,timeout:f.timeout,counter:f.counter},de=te.get("Content-Encoding");if(!f.compress||f.method==="HEAD"||de===null||ne.statusCode===204||ne.statusCode===304){x=new Pi(se,fe),h(x);return}const Ae={flush:no.default.Z_SYNC_FLUSH,finishFlush:no.default.Z_SYNC_FLUSH};if(de=="gzip"||de=="x-gzip"){se=se.pipe(no.default.createGunzip(Ae)),x=new Pi(se,fe),h(x);return}if(de=="deflate"||de=="x-deflate"){const xe=ne.pipe(new s2);xe.once("data",function(Me){(Me[0]&15)===8?se=se.pipe(no.default.createInflate()):se=se.pipe(no.default.createInflateRaw()),x=new Pi(se,fe),h(x)});return}if(de=="br"&&typeof no.default.createBrotliDecompress=="function"){se=se.pipe(no.default.createBrotliDecompress()),x=new Pi(se,fe),h(x);return}x=new Pi(se,fe),h(x)}),SX(C,f)})}io.isRedirect=function(r){return r===301||r===302||r===303||r===307||r===308};io.Promise=global.Promise;var OX=io});var Je=CS((Om,r2)=>{(function(r,l){typeof Om=="object"&&typeof r2!="undefined"?l(Om):typeof define=="function"&&define.amd?define(["exports"],l):(r=r||self,l(r.tf=r.tf||{}))})(Om,function(r){"use strict";const l=1e-7,h=1e-4;class d{constructor(e,t){this.backend=e,this.dataMover=t,this.data=new WeakMap,this.dataIdsCount=0}get(e){return this.data.has(e)||this.dataMover.moveData(this.backend,e),this.data.get(e)}set(e,t){this.dataIdsCount++,this.data.set(e,t)}has(e){return this.data.has(e)}delete(e){return this.dataIdsCount--,this.data.delete(e)}numDataIds(){return this.dataIdsCount}}class f{time(e){return g("time")}read(e){return g("read")}readSync(e){return g("readSync")}numDataIds(){return g("numDataIds")}disposeData(e){return g("disposeData")}write(e,t,n){return g("write")}move(e,t,n,s){return g("move")}memory(){return g("memory")}floatPrecision(){return g("floatPrecision")}epsilon(){return this.floatPrecision()===32?l:h}batchMatMul(e,t,n,s){return g("batchMatMul")}fusedBatchMatMul({a:e,b:t,transposeA:n,transposeB:s,bias:i,activation:o,preluActivationWeights:a}){return g("fusedBatchMatMul")}slice(e,t,n){return g("slice")}stridedSlice(e,t,n,s){return g("stridedSlice")}unstack(e,t){return g("unstack")}reverse(e,t){return g("reverse")}concat(e,t){return g("concat")}neg(e){return g("neg")}add(e,t){return g("add")}addN(e){return g("addN")}subtract(e,t){return g("subtract")}multiply(e,t){return g("multiply")}realDivide(e,t){return g("realDivide")}floorDiv(e,t){return g("floorDiv")}sum(e,t){return g("sum")}prod(e,t){return g("prod")}unsortedSegmentSum(e,t,n){return g("unsortedSegmentSum")}argMin(e,t){return g("argMin")}argMax(e,t){return g("argMax")}equal(e,t){return g("equal")}notEqual(e,t){return g("notEqual")}less(e,t){return g("less")}lessEqual(e,t){return g("lessEqual")}greater(e,t){return g("greater")}greaterEqual(e,t){return g("greaterEqual")}logicalNot(e){return g("logicalNot")}logicalAnd(e,t){return g("logicalAnd")}logicalOr(e,t){return g("logicalOr")}where(e){return g("where")}select(e,t,n){return g("select")}topk(e,t,n){return g("topk")}min(e,t){return g("min")}minimum(e,t){return g("minimum")}mod(e,t){return g("mod")}max(e,t){return g("max")}maximum(e,t){return g("maximum")}all(e,t){return g("all")}any(e,t){return g("any")}squaredDifference(e,t){return g("squaredDifference")}ceil(e){return g("ceil")}floor(e){return g("floor")}round(e){return g("round")}sign(e){return g("sign")}isNaN(e){return g("isNaN")}isInf(e){return g("isInf")}isFinite(e){return g("isFinite")}pow(e,t){return g("pow")}exp(e){return g("exp")}expm1(e){return g("expm1")}softmax(e,t){return g("softmax")}log(e){return g("log")}log1p(e){return g("log1p")}sqrt(e){return g("sqrt")}rsqrt(e){return g("rsqrt")}square(e){return g("square")}reciprocal(e){return g("reciprocal")}relu(e){return g("relu")}relu6(e){return g("relu6")}prelu(e,t){return g("prelu")}elu(e){return g("elu")}eluDer(e,t){return g("eluDer")}selu(e){return g("selu")}int(e){return g("int")}clip(e,t,n){return g("clip")}abs(e){return g("abs")}complexAbs(e){return g("complexAbs")}sigmoid(e){return g("sigmoid")}softplus(e){return g("softplus")}sin(e){return g("sin")}cos(e){return g("cos")}tan(e){return g("tan")}asin(e){return g("asin")}acos(e){return g("acos")}atan(e){return g("atan")}atan2(e,t){return g("atan2")}sinh(e){return g("sinh")}cosh(e){return g("cosh")}tanh(e){return g("tanh")}asinh(e){return g("asinh")}acosh(e){return g("acosh")}atanh(e){return g("atanh")}erf(e){return g("erf")}step(e,t){return g("step")}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){return g("fusedConv2d")}conv2d(e,t,n){return g("conv2d")}conv2dDerInput(e,t,n){return g("conv2dDerInput")}conv2dDerFilter(e,t,n){return g("conv2dDerFilter")}fusedDepthwiseConv2D({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){return g("fusedDepthwiseConv2D")}depthwiseConv2D(e,t,n){return g("depthwiseConv2D")}depthwiseConv2DDerInput(e,t,n){return g("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(e,t,n){return g("depthwiseConv2DDerFilter")}conv3d(e,t,n){return g("conv3d")}conv3dDerInput(e,t,n){return g("conv3dDerInput")}conv3dDerFilter(e,t,n){return g("conv3dDerFilter")}maxPool(e,t){return g("maxPool")}maxPoolBackprop(e,t,n,s){return g("maxPoolBackprop")}avgPool(e,t){return g("avgPool")}avgPoolBackprop(e,t,n){return g("avgPoolBackprop")}avgPool3d(e,t){return g("avgPool3d")}avgPool3dBackprop(e,t,n){return g("avgPool3dBackprop")}maxPool3d(e,t){return g("maxPool3d")}maxPool3dBackprop(e,t,n,s){return g("maxPool3dBackprop")}reshape(e,t){return g("reshape")}cast(e,t){return g("cast")}tile(e,t){return g("tile")}pad(e,t,n){return g("pad")}transpose(e,t){return g("transpose")}gather(e,t,n){return g("gather")}gatherND(e,t){return g("gatherND")}scatterND(e,t,n){return g("scatterND")}batchToSpaceND(e,t,n){return g("batchToSpaceND")}spaceToBatchND(e,t,n){return g("spaceToBatchND")}resizeBilinear(e,t,n,s){return g("resizeBilinear")}resizeBilinearBackprop(e,t,n){return g("resizeBilinearBackprop")}resizeNearestNeighbor(e,t,n,s){return g("resizeNearestNeighbor")}resizeNearestNeighborBackprop(e,t,n){return g("resizeNearestNeighborBackprop")}batchNorm(e,t,n,s,i,o){return g("batchNorm")}localResponseNormalization4D(e,t,n,s,i){return g("localResponseNormalization4D")}LRNGrad(e,t,n,s,i,o,a){return g("LRNGrad")}multinomial(e,t,n,s){return g("multinomial")}oneHot(e,t,n,s){return g("oneHot")}cumsum(e,t,n,s){return g("cumsum")}nonMaxSuppression(e,t,n,s,i){return g("nonMaxSuppression")}fft(e){return g("fft")}ifft(e){return g("ifft")}complex(e,t){return g("complex")}real(e){return g("real")}imag(e){return g("imag")}cropAndResize(e,t,n,s,i,o){return g("cropAndResize")}depthToSpace(e,t,n){return g("depthToSpace")}split(e,t,n){return g("split")}sparseToDense(e,t,n,s){return g("sparseToDense")}diag(e){return g("diag")}fill(e,t,n){return g("fill")}onesLike(e){return g("onesLike")}zerosLike(e){return g("zerosLike")}linspace(e,t,n){return g("linspace")}dispose(){return g("dispose")}}function g(e){throw new Error(`'${e}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}const S="tfjsflags";class L{constructor(e){this.global=e,this.flags={},this.flagRegistry={},this.urlFlags={},this.populateURLFlags()}setPlatform(e,t){this.platform!=null&&console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${t}.`),this.platformName=e,this.platform=t}registerFlag(e,t,n){if(this.flagRegistry[e]={evaluationFn:t,setHook:n},this.urlFlags[e]!=null){const s=this.urlFlags[e];console.warn(`Setting feature override from URL ${e}: ${s}.`),this.set(e,s)}}async getAsync(e){return e in this.flags?this.flags[e]:(this.flags[e]=await this.evaluateFlag(e),this.flags[e])}get(e){if(e in this.flags)return this.flags[e];const t=this.evaluateFlag(e);if(t instanceof Promise)throw new Error(`Flag ${e} cannot be synchronously evaluated. Please use getAsync() instead.`);return this.flags[e]=t,this.flags[e]}getNumber(e){return this.get(e)}getBool(e){return this.get(e)}getFlags(){return this.flags}get features(){return this.flags}set(e,t){if(this.flagRegistry[e]==null)throw new Error(`Cannot set flag ${e} as it has not been registered.`);this.flags[e]=t,this.flagRegistry[e].setHook!=null&&this.flagRegistry[e].setHook(t)}evaluateFlag(e){if(this.flagRegistry[e]==null)throw new Error(`Cannot evaluate flag '${e}': no evaluation function found.`);return this.flagRegistry[e].evaluationFn()}setFlags(e){this.flags=Object.assign({},e)}reset(){this.flags={},this.urlFlags={},this.populateURLFlags()}populateURLFlags(){if(typeof this.global=="undefined"||typeof this.global.location=="undefined"||typeof this.global.location.search=="undefined")return;const e=x(this.global.location.search);if(S in e){const t=e[S].split(",");t.forEach(n=>{const[s,i]=n.split(":");this.urlFlags[s]=O(s,i)})}}}function x(e){const t={};return e.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(n,...s)=>(A(t,s[0],s[1]),s.join("="))),t}function A(e,t,n){e[decodeURIComponent(t)]=decodeURIComponent(n||"")}function O(e,t){if(t=t.toLowerCase(),t==="true"||t==="false")return t==="true";if(`${+t}`===t)return+t;throw new Error(`Could not parse value flag value ${t} for flag ${e}.`)}function C(){return r.ENV}r.ENV=null;function $(e){r.ENV=e}let z;function ne(){if(z==null){let e;if(typeof window!="undefined")e=window;else if(typeof global!="undefined")e=global;else if(typeof process!="undefined")e=process;else if(typeof self!="undefined")e=self;else throw new Error("Could not find a global object");z=e}return z}function te(){const e=ne();return e._tfGlobals==null&&(e._tfGlobals=new Map),e._tfGlobals}function se(e,t){const n=te();if(n.has(e))return n.get(e);{const s=t();return n.set(e,s),n.get(e)}}const fe="Abs",de="Acos",Ae="Acosh",xe="Add",Me="AddN",Ke="All",wt="Any",$t="ArgMax",Kt="ArgMin",Fn="Asin",vn="Asinh",Nn="Atan",Qs="Atanh",Ai="Atan2",ei="AvgPool",Sa="AvgPoolBackprop",hl="AvgPool3D",Cx="AvgPool3DBackprop",vg="BatchMatMul",Ng="BatchToSpaceND",Cg="BroadcastTo",ul="Cast",dl="Ceil",pl="ClipByValue",Rg="Complex",td="Concat",Og="Conv2D",Rx="Conv2DBackpropFilter",Eg="Conv2DBackpropInput",Dg="Conv3D",Ox="Conv3DBackpropFilterV2",Ex="Conv3DBackpropInputV2",Ia="Cos",ml="Cosh",kg="Cumsum",Dx="CropAndResize",kx="DepthToSpace",Fg="DepthwiseConv2dNative",Fx="DepthwiseConv2dNativeBackpropFilter",_x="DepthwiseConv2dNativeBackpropInput",Wx="Diag",nd="Dilation2D",sd="Dilation2DBackpropInput",id="Dilation2DBackpropFilter",xa="Div",fl="Elu",$x="EluGrad",gl="Erf",Ux="Equal",yl="Exp",bl="Expm1",_g="FFT",Bx="Fill",rd="FlipLeftRight",wl="Floor",Wg="FloorDiv",Ll="FusedBatchNorm",$g="GatherV2",Mx="GatherNd",Px="Greater",Ug="GreaterEqual",Sl="Identity",Bg="IFFT",Mg="Imag",Il="IsFinite",xl="IsInf",Tl="IsNan",zx="Less",Gx="LessEqual",Vx="LinSpace",Al="Log",vl="Log1p",Hx="LogicalAnd",od="LogicalNot",Yx="LogicalOr",Pg="LogSoftmax",zg="LRN",qx="LRNBackprop",Nl="Max",Gg="Maximum",Cl="MaxPool",ad="MaxPoolBackprop",Vg="MaxPool3D",jx="MaxPool3DBackprop",cd="MaxPoolWithArgmax",VD="Mean",Hg="Min",Yg="Minimum",qg="Mod",Rl="Multiply",jg="Negate",ld="NotEqual",Kg="NonMaxSuppressionV3",hd="NonMaxSuppressionV4",ud="NonMaxSuppressionV5",Xg="OnesLike",Jg="OneHot",dd="PadV2",HD="Pool",Zg="Pow",Qg="Prelu",Kx="Prod",Xx="Range",ey="Real",Ol="Reciprocal",ty="Relu",El="Reshape",ny="ResizeNearestNeighbor",Jx="ResizeNearestNeighborGrad",sy="ResizeBilinear",Zx="ResizeBilinearGrad",iy="Relu6",ry="Reverse",Dl="Round",kl="Rsqrt",Qx="ScatterNd",oy="SelectV2",Fl="Selu",pd="Slice",Ta="Sin",_l="Sinh",Wl="Sign",$l="Sigmoid",Ul="Softplus",Bl="Sqrt",ay="Sum",md="SpaceToBatchND",cy="SplitV",ly="Softmax",Aa="SquaredDifference",fd="Square",Ml="Sub",eT="SparseToDense",tT="StridedSlice",va="Tan",Pl="Tanh",hy="Tile",nT="TopK",zl="Transpose",gd="Unique",uy="Unpack",dy="UnsortedSegmentSum",py="ZerosLike",Gl="Step",yd="FromPixels",bd="RotateWithOffset",my="_FusedMatMul",fy="FusedConv2D",gy="FusedDepthwiseConv2D";const Na=se("kernelRegistry",()=>new Map),Vl=se("gradRegistry",()=>new Map);function yy(e,t){const n=wy(e,t);return Na.get(n)}function by(e){return Vl.get(e)}function wd(e){const t=Na.entries(),n=[];for(;;){const{done:s,value:i}=t.next();if(s)break;const[o,a]=i,[c]=o.split("_");c===e&&n.push(a)}return n}function Ld(e){const{kernelName:t,backendName:n}=e,s=wy(t,n);Na.has(s)&&console.warn(`The kernel '${t}' for backend '${n}' is already registered`),Na.set(s,e)}function sT(e){const{kernelName:t}=e;Vl.has(t)&&(C().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${t}'`)),Vl.set(t,e)}function YD(e,t){const n=wy(e,t);if(!Na.has(n))throw new Error(`The kernel '${e}' for backend '${t}' is not registered`);Na.delete(n)}function qD(e){if(!Vl.has(e))throw new Error(`The gradient '${e}' for backend is not registered`);Vl.delete(e)}function jD(e,t){const n=wd(e);n.forEach(s=>{const i=Object.assign({},s,{backendName:t});Ld(i)})}function wy(e,t){return`${t}_${e}`}function Ly(e){let t=e.length,n=0,s=0;for(;t>0;)s=Math.random()*t|0,t--,n=e[t],e[t]=e[s],e[s]=n}function Hl(e,t,n){return Math.max(e,Math.min(t,n))}function Sy(e){return e%2===0?e:e+1}function iT(e){let t=0;for(let n=0;nn+` Shapes ${e} and ${t} must match`)}function yo(e){k(e!=null,()=>"The input to the tensor constructor must be a non-null value.")}function Ji(e,t=[],n=!1){if(t==null&&(t=[]),Array.isArray(e)||Ln(e)&&!n)for(let s=0;s0,n){return new Promise((s,i)=>{let o=0;const a=()=>{if(e()){s();return}o++;const c=t(o);if(n!=null&&o>=n){i();return}setTimeout(a,c)};a()})}function Id(e,t){let n=1,s=-1;for(let o=0;o=0)n*=e[o];else if(e[o]===-1){if(s!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${s} and dim ${o}`);s=o}else if(e[o]<0)throw Error(`Shapes can not be < 0. Found ${e[o]} at dim ${o}`);if(s===-1){if(t>0&&t!==n)throw Error(`Size(${t}) must match the product of shape ${e}`);return e}if(n===0)throw Error(`Cannot infer the missing size in [${e}] when there are 0 elements`);if(t%n!==0)throw Error(`The implicit shape can't be a fractional number. Got ${t} / ${n}`);const i=e.slice();return i[s]=t/n,i}function ft(e,t){const n=t.length;return e=e==null?t.map((s,i)=>i):[].concat(e),k(e.every(s=>s>=-n&&s`All values in axis param must be in range [-${n}, ${n}) but got axis ${e}`),k(e.every(s=>Ut(s)),()=>`All values in axis param must be integers but got axis ${e}`),e.map(s=>s<0?n+s:s)}function Rr(e,t){const n=[],s=[],i=t!=null&&Array.isArray(t)&&t.length===0,o=t==null||i?null:ft(t,e).sort();let a=0;for(let c=0;cc)&&e[c]===1&&(n.push(e[c]),s.push(c)),o[a]<=c&&a++}e[c]!==1&&(n.push(e[c]),s.push(c))}return{newShape:n,keptDims:s}}function wn(e,t){let n=null;if(e==null||e==="float32")n=new Float32Array(t);else if(e==="int32")n=new Int32Array(t);else if(e==="bool")n=new Uint8Array(t);else throw new Error(`Unknown data type ${e}`);return n}function wo(e,t){let n=null;if(e==null||e==="float32")n=new Float32Array(t);else if(e==="int32")n=new Int32Array(t);else if(e==="bool")n=new Uint8Array(t);else if(e==="string")n=new Array(t);else throw new Error(`Unknown data type ${e}`);return n}function rT(e,t){for(let n=0;nt+=n.length),t}function Or(e){return typeof e=="string"||e instanceof String}function cT(e){return typeof e=="boolean"}function xd(e){return typeof e=="number"}function Ca(e){return Array.isArray(e)?Ca(e[0]):e instanceof Float32Array?"float32":e instanceof Int32Array||e instanceof Uint8Array?"int32":xd(e)?"float32":Or(e)?"string":cT(e)?"bool":"float32"}function Er(e){return!!(e&&e.constructor&&e.call&&e.apply)}function Td(e,t){for(let n=t;n=0;--s)n[s]=n[s+1]*e[s+1];return n}function lT(e,t){return t==="string"?Cy(e):Dr([e],t)}function Dr(e,t){if(t==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(e)&&(e=Ji(e)),C().getBool("DEBUG")&&rT(e,t),ek(e,t))return e;if(t==null||t==="float32"||t==="complex64")return new Float32Array(e);if(t==="int32")return new Int32Array(e);if(t==="bool"){const n=new Uint8Array(e.length);for(let s=0;sc*u);for(let c=0;cs*i);if(n===0)return[];if(n!==t.length)throw new Error(`[${e}] does not match the input size ${t.length}.`);return hT(0,e,t)}function ek(e,t){return e instanceof Float32Array&&t==="float32"||e instanceof Int32Array&&t==="int32"||e instanceof Uint8Array&&t==="bool"}function Ay(e,t){const n=Ra(e,t);for(let s=0;ss*i,1);if(t==null||t==="float32")return Ls(e,new Float32Array(n));if(t==="int32")return Ls(e,new Int32Array(n));if(t==="bool")return Ls(e,new Uint8Array(n));throw new Error(`Unknown data type ${t}`)}function qn(){return C().platform.now()}function Ny(e){e.forEach(t=>{k(Number.isInteger(t)&&t>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${e}].`)})}function uT(e,t){return C().platform.fetch(e,t)}function Cy(e,t="utf-8"){return t=t||"utf-8",C().platform.encode(e,t)}function Yl(e,t="utf-8"){return t=t||"utf-8",C().platform.decode(e,t)}function ti(e,t,n){if(t===0)return 0;if(t===1)return e[0];let s=e[e.length-1];for(let i=0;i{s=n()},o=this.backendTimer.time(i);for(let c=0;c{sk(p,u.dtype,e)})}const a={kernelName:e,outputs:s,inputs:t,timeMs:o.then(c=>c.kernelMs),extraInfo:o.then(c=>c.getExtraProfileInfo!=null?c.getExtraProfileInfo():"")};return a}logKernelProfile(e){const{kernelName:t,outputs:n,timeMs:s,inputs:i,extraInfo:o}=e;n.forEach(a=>{Promise.all([a.data(),s,o]).then(c=>{this.logger.logKernelProfile(t,a,c[0],c[1],i,c[2])})})}}function sk(e,t,n){if(t!=="float32")return!1;for(let s=0;s0?I:""} `}}console.log(`%c${c} %c${a} %c${u}D ${m} %c${p} %c${y} %c${o}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function rk(e,t,n){const s={},i={};for(let u=0;us[T.id]=!0),w=!0,i[p.id]=!0;break}if(w)break}}const o={};o[n.id]=!0;const a={};for(let u=e.length-1;u>=0;u--){const p=e[u],m=p.inputs;for(let y=0;y=0;i--){const o=t[i],a=[];if(o.outputs.forEach(u=>{const p=e[u.id];p!=null?a.push(p):a.push(null)}),o.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${o.kernelName}.`);const c=o.gradient(a);for(const u in o.inputs){if(!(u in c))throw new Error(`Cannot backprop through input ${u}. Available gradients found: ${Object.keys(c)}.`);const p=n(()=>c[u]());if(p.dtype!=="float32")throw new Error(`Error in gradient for op ${o.kernelName}. The gradient of input ${u} must have 'float32' dtype, but has '${p.dtype}'`);const m=o.inputs[u];if(!ot(p.shape,m.shape))throw new Error(`Error in gradient for op ${o.kernelName}. The gradient of input '${u}' has shape '${p.shape}', which does not match the shape of the input '${m.shape}'`);if(e[m.id]==null)e[m.id]=p;else{const y=e[m.id];e[m.id]=s(y,p),y.dispose()}}}}const dT=20,ql=3,Ry=7;function ak(e,t,n,s){const i=Ot(t),o=ck(e,t,n,i),a=t.length,c=Ad(e,t,n,i,o),u=["Tensor"];return s&&(u.push(` dtype: ${n}`),u.push(` rank: ${a}`),u.push(` shape: [${t}]`),u.push(" values:")),u.push(c.map(p=>" "+p).join(` +var Nm=Object.defineProperty,gX=Object.prototype.hasOwnProperty,CS=(r,l)=>()=>(l||(l={exports:{}},r(l.exports,l)),l.exports),jC=r=>Nm(r,"__esModule",{value:!0}),vc=(r,l)=>{jC(r);for(var h in l)Nm(r,h,{get:l[h],enumerable:!0})},yX=(r,l)=>{if(jC(r),typeof l=="object"||typeof l=="function")for(let h in l)!gX.call(r,h)&&h!=="default"&&Nm(r,h,{get:()=>l[h],enumerable:!0});return r},Ye=r=>r&&r.__esModule?r:yX(Nm({},"default",{value:r,enumerable:!0}),r);var i2=CS(bX=>{vc(bX,{FetchError:()=>Mn,Headers:()=>fi,Request:()=>Zo,Response:()=>Pi,default:()=>OX});const Ys=Ye(require("stream")),RS=Ye(require("http")),Cm=Ye(require("url")),KC=Ye(require("https")),no=Ye(require("zlib")),wX=Ys.default.Readable,pr=Symbol("buffer"),OS=Symbol("type");class fu{constructor(){this[OS]="";const r=arguments[0],l=arguments[1],h=[];let d=0;if(r){const g=r,S=Number(g.length);for(let L=0;L1&&arguments[1]!==void 0?arguments[1]:{},d=h.size;let f=d===void 0?0:d;var g=h.timeout;let S=g===void 0?0:g;r==null?r=null:JC(r)?r=Buffer.from(r.toString()):gu(r)||(Buffer.isBuffer(r)||(Object.prototype.toString.call(r)==="[object ArrayBuffer]"?r=Buffer.from(r):ArrayBuffer.isView(r)?r=Buffer.from(r.buffer,r.byteOffset,r.byteLength):r instanceof Ys.default||(r=Buffer.from(String(r))))),this[mr]={body:r,disturbed:!1,error:null},this.size=f,this.timeout=S,r instanceof Ys.default&&r.on("error",function(L){const x=L.name==="AbortError"?L:new Mn(`Invalid response body while trying to fetch ${l.url}: ${L.message}`,"system",L);l[mr].error=x})}xn.prototype={get body(){return this[mr].body},get bodyUsed(){return this[mr].disturbed},arrayBuffer(){return Nc.call(this).then(function(r){return r.buffer.slice(r.byteOffset,r.byteOffset+r.byteLength)})},blob(){let r=this.headers&&this.headers.get("content-type")||"";return Nc.call(this).then(function(l){return Object.assign(new fu([],{type:r.toLowerCase()}),{[pr]:l})})},json(){var r=this;return Nc.call(this).then(function(l){try{return JSON.parse(l.toString())}catch(h){return xn.Promise.reject(new Mn(`invalid json response body at ${r.url} reason: ${h.message}`,"invalid-json"))}})},text(){return Nc.call(this).then(function(r){return r.toString()})},buffer(){return Nc.call(this)},textConverted(){var r=this;return Nc.call(this).then(function(l){return LX(l,r.headers)})}};Object.defineProperties(xn.prototype,{body:{enumerable:!0},bodyUsed:{enumerable:!0},arrayBuffer:{enumerable:!0},blob:{enumerable:!0},json:{enumerable:!0},text:{enumerable:!0}});xn.mixIn=function(r){for(const l of Object.getOwnPropertyNames(xn.prototype))if(!(l in r)){const h=Object.getOwnPropertyDescriptor(xn.prototype,l);Object.defineProperty(r,l,h)}};function Nc(){var r=this;if(this[mr].disturbed)return xn.Promise.reject(new TypeError(`body used already for: ${this.url}`));if(this[mr].disturbed=!0,this[mr].error)return xn.Promise.reject(this[mr].error);let l=this.body;if(l===null)return xn.Promise.resolve(Buffer.alloc(0));if(gu(l)&&(l=l.stream()),Buffer.isBuffer(l))return xn.Promise.resolve(l);if(!(l instanceof Ys.default))return xn.Promise.resolve(Buffer.alloc(0));let h=[],d=0,f=!1;return new xn.Promise(function(g,S){let L;r.timeout&&(L=setTimeout(function(){f=!0,S(new Mn(`Response timeout while trying to fetch ${r.url} (over ${r.timeout}ms)`,"body-timeout"))},r.timeout)),l.on("error",function(x){x.name==="AbortError"?(f=!0,S(x)):S(new Mn(`Invalid response body while trying to fetch ${r.url}: ${x.message}`,"system",x))}),l.on("data",function(x){if(f||x===null)return;if(r.size&&d+x.length>r.size){f=!0,S(new Mn(`content size at ${r.url} over limit: ${r.size}`,"max-size"));return}d+=x.length,h.push(x)}),l.on("end",function(){if(f)return;clearTimeout(L);try{g(Buffer.concat(h,d))}catch(x){S(new Mn(`Could not create Buffer from response body for ${r.url}: ${x.message}`,"system",x))}})})}function LX(r,l){if(typeof ES!="function")throw new Error("The package `encoding` must be installed to use the textConverted() function");const h=l.get("content-type");let d="utf-8",f,g;return h&&(f=/charset=([^;]*)/i.exec(h)),g=r.slice(0,1024).toString(),!f&&g&&(f=/0&&arguments[0]!==void 0?arguments[0]:void 0;if(this[nn]=Object.create(null),r instanceof fi){const l=r.raw(),h=Object.keys(l);for(const d of h)for(const f of l[d])this.append(d,f);return}if(!(r==null))if(typeof r=="object"){const l=r[Symbol.iterator];if(l!=null){if(typeof l!="function")throw new TypeError("Header pairs must be iterable");const h=[];for(const d of r){if(typeof d!="object"||typeof d[Symbol.iterator]!="function")throw new TypeError("Each header pair must be iterable");h.push(Array.from(d))}for(const d of h){if(d.length!==2)throw new TypeError("Each header pair must be a name/value tuple");this.append(d[0],d[1])}}else for(const h of Object.keys(r)){const d=r[h];this.append(h,d)}}else throw new TypeError("Provided initializer must be an object")}get(r){r=`${r}`,yu(r);const l=Cc(this[nn],r);return l===void 0?null:this[nn][l].join(", ")}forEach(r){let l=arguments.length>1&&arguments[1]!==void 0?arguments[1]:void 0,h=kS(this),d=0;for(;d1&&arguments[1]!==void 0?arguments[1]:"key+value";const h=Object.keys(r[nn]).sort();return h.map(l==="key"?function(d){return d.toLowerCase()}:l==="value"?function(d){return r[nn][d].join(", ")}:function(d){return[d.toLowerCase(),r[nn][d].join(", ")]})}const _S=Symbol("internal");function FS(r,l){const h=Object.create(WS);return h[_S]={target:r,kind:l,index:0},h}const WS=Object.setPrototypeOf({next(){if(!this||Object.getPrototypeOf(this)!==WS)throw new TypeError("Value of `this` is not a HeadersIterator");var r=this[_S];const l=r.target,h=r.kind,d=r.index,f=kS(l,h),g=f.length;return d>=g?{value:void 0,done:!0}:(this[_S].index=d+1,{value:f[d],done:!1})}},Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]())));Object.defineProperty(WS,Symbol.toStringTag,{value:"HeadersIterator",writable:!1,enumerable:!1,configurable:!0});function IX(r){const l=Object.assign({__proto__:null},r[nn]),h=Cc(r[nn],"Host");return h!==void 0&&(l[h]=l[h][0]),l}function xX(r){const l=new fi;for(const h of Object.keys(r)){if(t2.test(h))continue;if(Array.isArray(r[h]))for(const d of r[h]){if(DS.test(d))continue;l[nn][h]===void 0?l[nn][h]=[d]:l[nn][h].push(d)}else DS.test(r[h])||(l[nn][h]=[r[h]])}return l}const so=Symbol("Response internals"),TX=RS.default.STATUS_CODES;class Pi{constructor(){let r=arguments.length>0&&arguments[0]!==void 0?arguments[0]:null,l=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{};xn.call(this,r,l);const h=l.status||200,d=new fi(l.headers);if(r!=null&&!d.has("Content-Type")){const f=QC(r);f&&d.append("Content-Type",f)}this[so]={url:l.url,status:h,statusText:l.statusText||TX[h],headers:d,counter:l.counter}}get url(){return this[so].url||""}get status(){return this[so].status}get ok(){return this[so].status>=200&&this[so].status<300}get redirected(){return this[so].counter>0}get statusText(){return this[so].statusText}get headers(){return this[so].headers}clone(){return new Pi(ZC(this),{url:this.url,status:this.status,statusText:this.statusText,headers:this.headers,ok:this.ok,redirected:this.redirected})}}xn.mixIn(Pi.prototype);Object.defineProperties(Pi.prototype,{url:{enumerable:!0},status:{enumerable:!0},ok:{enumerable:!0},redirected:{enumerable:!0},statusText:{enumerable:!0},headers:{enumerable:!0},clone:{enumerable:!0}});Object.defineProperty(Pi.prototype,Symbol.toStringTag,{value:"Response",writable:!1,enumerable:!1,configurable:!0});const fr=Symbol("Request internals"),$S=Cm.default.parse,AX=Cm.default.format,vX="destroy"in Ys.default.Readable.prototype;function Rm(r){return typeof r=="object"&&typeof r[fr]=="object"}function NX(r){const l=r&&typeof r=="object"&&Object.getPrototypeOf(r);return!!(l&&l.constructor.name==="AbortSignal")}class Zo{constructor(r){let l=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{},h;Rm(r)?h=$S(r.url):(r&&r.href?h=$S(r.href):h=$S(`${r}`),r={});let d=l.method||r.method||"GET";if(d=d.toUpperCase(),(l.body!=null||Rm(r)&&r.body!==null)&&(d==="GET"||d==="HEAD"))throw new TypeError("Request with GET/HEAD method cannot have body");let f=l.body!=null?l.body:Rm(r)&&r.body!==null?ZC(r):null;xn.call(this,f,{timeout:l.timeout||r.timeout||0,size:l.size||r.size||0});const g=new fi(l.headers||r.headers||{});if(f!=null&&!g.has("Content-Type")){const L=QC(f);L&&g.append("Content-Type",L)}let S=Rm(r)?r.signal:null;if("signal"in l&&(S=l.signal),S!=null&&!NX(S))throw new TypeError("Expected signal to be an instanceof AbortSignal");this[fr]={method:d,redirect:l.redirect||r.redirect||"follow",headers:g,parsedURL:h,signal:S},this.follow=l.follow!==void 0?l.follow:r.follow!==void 0?r.follow:20,this.compress=l.compress!==void 0?l.compress:r.compress!==void 0?r.compress:!0,this.counter=l.counter||r.counter||0,this.agent=l.agent||r.agent}get method(){return this[fr].method}get url(){return AX(this[fr].parsedURL)}get headers(){return this[fr].headers}get redirect(){return this[fr].redirect}get signal(){return this[fr].signal}clone(){return new Zo(this)}}xn.mixIn(Zo.prototype);Object.defineProperty(Zo.prototype,Symbol.toStringTag,{value:"Request",writable:!1,enumerable:!1,configurable:!0});Object.defineProperties(Zo.prototype,{method:{enumerable:!0},url:{enumerable:!0},headers:{enumerable:!0},redirect:{enumerable:!0},clone:{enumerable:!0},signal:{enumerable:!0}});function CX(r){const l=r[fr].parsedURL,h=new fi(r[fr].headers);if(h.has("Accept")||h.set("Accept","*/*"),!l.protocol||!l.hostname)throw new TypeError("Only absolute URLs are supported");if(!/^https?:$/.test(l.protocol))throw new TypeError("Only HTTP(S) protocols are supported");if(r.signal&&r.body instanceof Ys.default.Readable&&!vX)throw new Error("Cancellation of streamed requests with AbortSignal is not supported in node < 8");let d=null;if(r.body==null&&/^(POST|PUT)$/i.test(r.method)&&(d="0"),r.body!=null){const g=e2(r);typeof g=="number"&&(d=String(g))}d&&h.set("Content-Length",d),h.has("User-Agent")||h.set("User-Agent","node-fetch/1.0 (+https://github.com/bitinn/node-fetch)"),r.compress&&!h.has("Accept-Encoding")&&h.set("Accept-Encoding","gzip,deflate");let f=r.agent;return typeof f=="function"&&(f=f(l)),!h.has("Connection")&&!f&&h.set("Connection","close"),Object.assign({},l,{method:r.method,headers:IX(h),agent:f})}function bu(r){Error.call(this,r),this.type="aborted",this.message=r,Error.captureStackTrace(this,this.constructor)}bu.prototype=Object.create(Error.prototype);bu.prototype.constructor=bu;bu.prototype.name="AbortError";const s2=Ys.default.PassThrough,RX=Cm.default.resolve;function io(r,l){if(!io.Promise)throw new Error("native promise missing, set fetch.Promise to your favorite alternative");return xn.Promise=io.Promise,new io.Promise(function(h,d){const f=new Zo(r,l),g=CX(f),S=(g.protocol==="https:"?KC.default:RS.default).request,L=f.signal;let x=null;const A=function(){let te=new bu("The user aborted a request.");if(d(te),f.body&&f.body instanceof Ys.default.Readable&&f.body.destroy(te),!x||!x.body)return;x.body.emit("error",te)};if(L&&L.aborted){A();return}const O=function(){A(),z()},C=S(g);let $;L&&L.addEventListener("abort",O);function z(){C.abort(),L&&L.removeEventListener("abort",O),clearTimeout($)}f.timeout&&C.once("socket",function(ne){$=setTimeout(function(){d(new Mn(`network timeout at: ${f.url}`,"request-timeout")),z()},f.timeout)}),C.on("error",function(ne){d(new Mn(`request to ${f.url} failed, reason: ${ne.message}`,"system",ne)),z()}),C.on("response",function(ne){clearTimeout($);const te=xX(ne.headers);if(io.isRedirect(ne.statusCode)){const xe=te.get("Location"),Me=xe===null?null:RX(f.url,xe);switch(f.redirect){case"error":d(new Mn(`uri requested responds with a redirect, redirect mode is set to error: ${f.url}`,"no-redirect")),z();return;case"manual":if(Me!==null)try{te.set("Location",Me)}catch(wt){d(wt)}break;case"follow":if(Me===null)break;if(f.counter>=f.follow){d(new Mn(`maximum redirect reached at: ${f.url}`,"max-redirect")),z();return}const Ke={headers:new fi(f.headers),follow:f.follow,counter:f.counter+1,agent:f.agent,compress:f.compress,method:f.method,body:f.body,signal:f.signal,timeout:f.timeout,size:f.size};if(ne.statusCode!==303&&f.body&&e2(f)===null){d(new Mn("Cannot follow redirect with body being a readable stream","unsupported-redirect")),z();return}(ne.statusCode===303||(ne.statusCode===301||ne.statusCode===302)&&f.method==="POST")&&(Ke.method="GET",Ke.body=void 0,Ke.headers.delete("content-length")),h(io(new Zo(Me,Ke))),z();return}}ne.once("end",function(){L&&L.removeEventListener("abort",O)});let se=ne.pipe(new s2);const fe={url:f.url,status:ne.statusCode,statusText:ne.statusMessage,headers:te,size:f.size,timeout:f.timeout,counter:f.counter},de=te.get("Content-Encoding");if(!f.compress||f.method==="HEAD"||de===null||ne.statusCode===204||ne.statusCode===304){x=new Pi(se,fe),h(x);return}const Ae={flush:no.default.Z_SYNC_FLUSH,finishFlush:no.default.Z_SYNC_FLUSH};if(de=="gzip"||de=="x-gzip"){se=se.pipe(no.default.createGunzip(Ae)),x=new Pi(se,fe),h(x);return}if(de=="deflate"||de=="x-deflate"){const xe=ne.pipe(new s2);xe.once("data",function(Me){(Me[0]&15)===8?se=se.pipe(no.default.createInflate()):se=se.pipe(no.default.createInflateRaw()),x=new Pi(se,fe),h(x)});return}if(de=="br"&&typeof no.default.createBrotliDecompress=="function"){se=se.pipe(no.default.createBrotliDecompress()),x=new Pi(se,fe),h(x);return}x=new Pi(se,fe),h(x)}),SX(C,f)})}io.isRedirect=function(r){return r===301||r===302||r===303||r===307||r===308};io.Promise=global.Promise;var OX=io});var Je=CS((Om,r2)=>{(function(r,l){typeof Om=="object"&&typeof r2!="undefined"?l(Om):typeof define=="function"&&define.amd?define(["exports"],l):(r=r||self,l(r.tf=r.tf||{}))})(Om,function(r){"use strict";const l=1e-7,h=1e-4;class d{constructor(e,t){this.backend=e,this.dataMover=t,this.data=new WeakMap,this.dataIdsCount=0}get(e){return this.data.has(e)||this.dataMover.moveData(this.backend,e),this.data.get(e)}set(e,t){this.dataIdsCount++,this.data.set(e,t)}has(e){return this.data.has(e)}delete(e){return this.dataIdsCount--,this.data.delete(e)}numDataIds(){return this.dataIdsCount}}class f{time(e){return g("time")}read(e){return g("read")}readSync(e){return g("readSync")}numDataIds(){return g("numDataIds")}disposeData(e){return g("disposeData")}write(e,t,n){return g("write")}move(e,t,n,s){return g("move")}memory(){return g("memory")}floatPrecision(){return g("floatPrecision")}epsilon(){return this.floatPrecision()===32?l:h}batchMatMul(e,t,n,s){return g("batchMatMul")}fusedBatchMatMul({a:e,b:t,transposeA:n,transposeB:s,bias:i,activation:o,preluActivationWeights:a}){return g("fusedBatchMatMul")}slice(e,t,n){return g("slice")}stridedSlice(e,t,n,s){return g("stridedSlice")}unstack(e,t){return g("unstack")}reverse(e,t){return g("reverse")}concat(e,t){return g("concat")}neg(e){return g("neg")}add(e,t){return g("add")}addN(e){return g("addN")}subtract(e,t){return g("subtract")}multiply(e,t){return g("multiply")}realDivide(e,t){return g("realDivide")}floorDiv(e,t){return g("floorDiv")}sum(e,t){return g("sum")}prod(e,t){return g("prod")}unsortedSegmentSum(e,t,n){return g("unsortedSegmentSum")}argMin(e,t){return g("argMin")}argMax(e,t){return g("argMax")}equal(e,t){return g("equal")}notEqual(e,t){return g("notEqual")}less(e,t){return g("less")}lessEqual(e,t){return g("lessEqual")}greater(e,t){return g("greater")}greaterEqual(e,t){return g("greaterEqual")}logicalNot(e){return g("logicalNot")}logicalAnd(e,t){return g("logicalAnd")}logicalOr(e,t){return g("logicalOr")}where(e){return g("where")}select(e,t,n){return g("select")}topk(e,t,n){return g("topk")}min(e,t){return g("min")}minimum(e,t){return g("minimum")}mod(e,t){return g("mod")}max(e,t){return g("max")}maximum(e,t){return g("maximum")}all(e,t){return g("all")}any(e,t){return g("any")}squaredDifference(e,t){return g("squaredDifference")}ceil(e){return g("ceil")}floor(e){return g("floor")}round(e){return g("round")}sign(e){return g("sign")}isNaN(e){return g("isNaN")}isInf(e){return g("isInf")}isFinite(e){return g("isFinite")}pow(e,t){return g("pow")}exp(e){return g("exp")}expm1(e){return g("expm1")}softmax(e,t){return g("softmax")}log(e){return g("log")}log1p(e){return g("log1p")}sqrt(e){return g("sqrt")}rsqrt(e){return g("rsqrt")}square(e){return g("square")}reciprocal(e){return g("reciprocal")}relu(e){return g("relu")}relu6(e){return g("relu6")}prelu(e,t){return g("prelu")}elu(e){return g("elu")}eluDer(e,t){return g("eluDer")}selu(e){return g("selu")}int(e){return g("int")}clip(e,t,n){return g("clip")}abs(e){return g("abs")}complexAbs(e){return g("complexAbs")}sigmoid(e){return g("sigmoid")}softplus(e){return g("softplus")}sin(e){return g("sin")}cos(e){return g("cos")}tan(e){return g("tan")}asin(e){return g("asin")}acos(e){return g("acos")}atan(e){return g("atan")}atan2(e,t){return g("atan2")}sinh(e){return g("sinh")}cosh(e){return g("cosh")}tanh(e){return g("tanh")}asinh(e){return g("asinh")}acosh(e){return g("acosh")}atanh(e){return g("atanh")}erf(e){return g("erf")}step(e,t){return g("step")}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){return g("fusedConv2d")}conv2d(e,t,n){return g("conv2d")}conv2dDerInput(e,t,n){return g("conv2dDerInput")}conv2dDerFilter(e,t,n){return g("conv2dDerFilter")}fusedDepthwiseConv2D({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){return g("fusedDepthwiseConv2D")}depthwiseConv2D(e,t,n){return g("depthwiseConv2D")}depthwiseConv2DDerInput(e,t,n){return g("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(e,t,n){return g("depthwiseConv2DDerFilter")}conv3d(e,t,n){return g("conv3d")}conv3dDerInput(e,t,n){return g("conv3dDerInput")}conv3dDerFilter(e,t,n){return g("conv3dDerFilter")}maxPool(e,t){return g("maxPool")}maxPoolBackprop(e,t,n,s){return g("maxPoolBackprop")}avgPool(e,t){return g("avgPool")}avgPoolBackprop(e,t,n){return g("avgPoolBackprop")}avgPool3d(e,t){return g("avgPool3d")}avgPool3dBackprop(e,t,n){return g("avgPool3dBackprop")}maxPool3d(e,t){return g("maxPool3d")}maxPool3dBackprop(e,t,n,s){return g("maxPool3dBackprop")}reshape(e,t){return g("reshape")}cast(e,t){return g("cast")}tile(e,t){return g("tile")}pad(e,t,n){return g("pad")}transpose(e,t){return g("transpose")}gather(e,t,n){return g("gather")}gatherND(e,t){return g("gatherND")}scatterND(e,t,n){return g("scatterND")}batchToSpaceND(e,t,n){return g("batchToSpaceND")}spaceToBatchND(e,t,n){return g("spaceToBatchND")}resizeBilinear(e,t,n,s){return g("resizeBilinear")}resizeBilinearBackprop(e,t,n){return g("resizeBilinearBackprop")}resizeNearestNeighbor(e,t,n,s){return g("resizeNearestNeighbor")}resizeNearestNeighborBackprop(e,t,n){return g("resizeNearestNeighborBackprop")}batchNorm(e,t,n,s,i,o){return g("batchNorm")}localResponseNormalization4D(e,t,n,s,i){return g("localResponseNormalization4D")}LRNGrad(e,t,n,s,i,o,a){return g("LRNGrad")}multinomial(e,t,n,s){return g("multinomial")}oneHot(e,t,n,s){return g("oneHot")}cumsum(e,t,n,s){return g("cumsum")}nonMaxSuppression(e,t,n,s,i){return g("nonMaxSuppression")}fft(e){return g("fft")}ifft(e){return g("ifft")}complex(e,t){return g("complex")}real(e){return g("real")}imag(e){return g("imag")}cropAndResize(e,t,n,s,i,o){return g("cropAndResize")}depthToSpace(e,t,n){return g("depthToSpace")}split(e,t,n){return g("split")}sparseToDense(e,t,n,s){return g("sparseToDense")}diag(e){return g("diag")}fill(e,t,n){return g("fill")}onesLike(e){return g("onesLike")}zerosLike(e){return g("zerosLike")}linspace(e,t,n){return g("linspace")}dispose(){return g("dispose")}}function g(e){throw new Error(`'${e}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}const S="tfjsflags";class L{constructor(e){this.global=e,this.flags={},this.flagRegistry={},this.urlFlags={},this.populateURLFlags()}setPlatform(e,t){this.platform!=null&&console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${t}.`),this.platformName=e,this.platform=t}registerFlag(e,t,n){if(this.flagRegistry[e]={evaluationFn:t,setHook:n},this.urlFlags[e]!=null){const s=this.urlFlags[e];console.warn(`Setting feature override from URL ${e}: ${s}.`),this.set(e,s)}}async getAsync(e){return e in this.flags?this.flags[e]:(this.flags[e]=await this.evaluateFlag(e),this.flags[e])}get(e){if(e in this.flags)return this.flags[e];const t=this.evaluateFlag(e);if(t instanceof Promise)throw new Error(`Flag ${e} cannot be synchronously evaluated. Please use getAsync() instead.`);return this.flags[e]=t,this.flags[e]}getNumber(e){return this.get(e)}getBool(e){return this.get(e)}getFlags(){return this.flags}get features(){return this.flags}set(e,t){if(this.flagRegistry[e]==null)throw new Error(`Cannot set flag ${e} as it has not been registered.`);this.flags[e]=t,this.flagRegistry[e].setHook!=null&&this.flagRegistry[e].setHook(t)}evaluateFlag(e){if(this.flagRegistry[e]==null)throw new Error(`Cannot evaluate flag '${e}': no evaluation function found.`);return this.flagRegistry[e].evaluationFn()}setFlags(e){this.flags=Object.assign({},e)}reset(){this.flags={},this.urlFlags={},this.populateURLFlags()}populateURLFlags(){if(typeof this.global=="undefined"||typeof this.global.location=="undefined"||typeof this.global.location.search=="undefined")return;const e=x(this.global.location.search);if(S in e){const t=e[S].split(",");t.forEach(n=>{const[s,i]=n.split(":");this.urlFlags[s]=O(s,i)})}}}function x(e){const t={};return e.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(n,...s)=>(A(t,s[0],s[1]),s.join("="))),t}function A(e,t,n){e[decodeURIComponent(t)]=decodeURIComponent(n||"")}function O(e,t){if(t=t.toLowerCase(),t==="true"||t==="false")return t==="true";if(`${+t}`===t)return+t;throw new Error(`Could not parse value flag value ${t} for flag ${e}.`)}function C(){return r.ENV}r.ENV=null;function $(e){r.ENV=e}let z;function ne(){if(z==null){let e;if(typeof window!="undefined")e=window;else if(typeof global!="undefined")e=global;else if(typeof process!="undefined")e=process;else if(typeof self!="undefined")e=self;else throw new Error("Could not find a global object");z=e}return z}function te(){const e=ne();return e._tfGlobals==null&&(e._tfGlobals=new Map),e._tfGlobals}function se(e,t){const n=te();if(n.has(e))return n.get(e);{const s=t();return n.set(e,s),n.get(e)}}const fe="Abs",de="Acos",Ae="Acosh",xe="Add",Me="AddN",Ke="All",wt="Any",$t="ArgMax",Kt="ArgMin",Fn="Asin",vn="Asinh",Nn="Atan",Qs="Atanh",Ai="Atan2",ei="AvgPool",xa="AvgPoolBackprop",hl="AvgPool3D",Cx="AvgPool3DBackprop",vg="BatchMatMul",Ng="BatchToSpaceND",Cg="BroadcastTo",ul="Cast",dl="Ceil",pl="ClipByValue",Rg="Complex",td="Concat",Og="Conv2D",Rx="Conv2DBackpropFilter",Eg="Conv2DBackpropInput",Dg="Conv3D",Ox="Conv3DBackpropFilterV2",Ex="Conv3DBackpropInputV2",Ta="Cos",ml="Cosh",kg="Cumsum",Dx="CropAndResize",kx="DepthToSpace",Fg="DepthwiseConv2dNative",Fx="DepthwiseConv2dNativeBackpropFilter",_x="DepthwiseConv2dNativeBackpropInput",Wx="Diag",nd="Dilation2D",sd="Dilation2DBackpropInput",id="Dilation2DBackpropFilter",Aa="Div",fl="Elu",$x="EluGrad",gl="Erf",Ux="Equal",yl="Exp",bl="Expm1",_g="FFT",Bx="Fill",rd="FlipLeftRight",wl="Floor",Wg="FloorDiv",Ll="FusedBatchNorm",$g="GatherV2",Mx="GatherNd",Px="Greater",Ug="GreaterEqual",Sl="Identity",Bg="IFFT",Mg="Imag",Il="IsFinite",xl="IsInf",Tl="IsNan",zx="Less",Gx="LessEqual",Vx="LinSpace",Al="Log",vl="Log1p",Hx="LogicalAnd",od="LogicalNot",Yx="LogicalOr",Pg="LogSoftmax",zg="LRN",qx="LRNBackprop",Nl="Max",Gg="Maximum",Cl="MaxPool",ad="MaxPoolBackprop",Vg="MaxPool3D",jx="MaxPool3DBackprop",cd="MaxPoolWithArgmax",VD="Mean",Hg="Min",Yg="Minimum",qg="Mod",Rl="Multiply",jg="Negate",ld="NotEqual",Kg="NonMaxSuppressionV3",hd="NonMaxSuppressionV4",ud="NonMaxSuppressionV5",Xg="OnesLike",Jg="OneHot",dd="PadV2",HD="Pool",Zg="Pow",Qg="Prelu",Kx="Prod",Xx="Range",ey="Real",Ol="Reciprocal",ty="Relu",El="Reshape",ny="ResizeNearestNeighbor",Jx="ResizeNearestNeighborGrad",sy="ResizeBilinear",Zx="ResizeBilinearGrad",iy="Relu6",ry="Reverse",Dl="Round",kl="Rsqrt",Qx="ScatterNd",oy="SelectV2",Fl="Selu",pd="Slice",va="Sin",_l="Sinh",Wl="Sign",$l="Sigmoid",Ul="Softplus",Bl="Sqrt",ay="Sum",md="SpaceToBatchND",cy="SplitV",ly="Softmax",Na="SquaredDifference",fd="Square",Ml="Sub",eT="SparseToDense",tT="StridedSlice",Ca="Tan",Pl="Tanh",hy="Tile",nT="TopK",zl="Transpose",gd="Unique",uy="Unpack",dy="UnsortedSegmentSum",py="ZerosLike",Gl="Step",yd="FromPixels",bd="RotateWithOffset",my="_FusedMatMul",fy="FusedConv2D",gy="FusedDepthwiseConv2D";const Ra=se("kernelRegistry",()=>new Map),Vl=se("gradRegistry",()=>new Map);function yy(e,t){const n=wy(e,t);return Ra.get(n)}function by(e){return Vl.get(e)}function wd(e){const t=Ra.entries(),n=[];for(;;){const{done:s,value:i}=t.next();if(s)break;const[o,a]=i,[c]=o.split("_");c===e&&n.push(a)}return n}function Ld(e){const{kernelName:t,backendName:n}=e,s=wy(t,n);Ra.has(s)&&console.warn(`The kernel '${t}' for backend '${n}' is already registered`),Ra.set(s,e)}function sT(e){const{kernelName:t}=e;Vl.has(t)&&(C().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${t}'`)),Vl.set(t,e)}function YD(e,t){const n=wy(e,t);if(!Ra.has(n))throw new Error(`The kernel '${e}' for backend '${t}' is not registered`);Ra.delete(n)}function qD(e){if(!Vl.has(e))throw new Error(`The gradient '${e}' for backend is not registered`);Vl.delete(e)}function jD(e,t){const n=wd(e);n.forEach(s=>{const i=Object.assign({},s,{backendName:t});Ld(i)})}function wy(e,t){return`${t}_${e}`}function Ly(e){let t=e.length,n=0,s=0;for(;t>0;)s=Math.random()*t|0,t--,n=e[t],e[t]=e[s],e[s]=n}function Hl(e,t,n){return Math.max(e,Math.min(t,n))}function Sy(e){return e%2===0?e:e+1}function iT(e){let t=0;for(let n=0;nn+` Shapes ${e} and ${t} must match`)}function wo(e){k(e!=null,()=>"The input to the tensor constructor must be a non-null value.")}function Ji(e,t=[],n=!1){if(t==null&&(t=[]),Array.isArray(e)||Ln(e)&&!n)for(let s=0;s0,n){return new Promise((s,i)=>{let o=0;const a=()=>{if(e()){s();return}o++;const c=t(o);if(n!=null&&o>=n){i();return}setTimeout(a,c)};a()})}function Id(e,t){let n=1,s=-1;for(let o=0;o=0)n*=e[o];else if(e[o]===-1){if(s!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${s} and dim ${o}`);s=o}else if(e[o]<0)throw Error(`Shapes can not be < 0. Found ${e[o]} at dim ${o}`);if(s===-1){if(t>0&&t!==n)throw Error(`Size(${t}) must match the product of shape ${e}`);return e}if(n===0)throw Error(`Cannot infer the missing size in [${e}] when there are 0 elements`);if(t%n!==0)throw Error(`The implicit shape can't be a fractional number. Got ${t} / ${n}`);const i=e.slice();return i[s]=t/n,i}function ft(e,t){const n=t.length;return e=e==null?t.map((s,i)=>i):[].concat(e),k(e.every(s=>s>=-n&&s`All values in axis param must be in range [-${n}, ${n}) but got axis ${e}`),k(e.every(s=>Ut(s)),()=>`All values in axis param must be integers but got axis ${e}`),e.map(s=>s<0?n+s:s)}function Rr(e,t){const n=[],s=[],i=t!=null&&Array.isArray(t)&&t.length===0,o=t==null||i?null:ft(t,e).sort();let a=0;for(let c=0;cc)&&e[c]===1&&(n.push(e[c]),s.push(c)),o[a]<=c&&a++}e[c]!==1&&(n.push(e[c]),s.push(c))}return{newShape:n,keptDims:s}}function wn(e,t){let n=null;if(e==null||e==="float32")n=new Float32Array(t);else if(e==="int32")n=new Int32Array(t);else if(e==="bool")n=new Uint8Array(t);else throw new Error(`Unknown data type ${e}`);return n}function So(e,t){let n=null;if(e==null||e==="float32")n=new Float32Array(t);else if(e==="int32")n=new Int32Array(t);else if(e==="bool")n=new Uint8Array(t);else if(e==="string")n=new Array(t);else throw new Error(`Unknown data type ${e}`);return n}function rT(e,t){for(let n=0;nt+=n.length),t}function Or(e){return typeof e=="string"||e instanceof String}function cT(e){return typeof e=="boolean"}function xd(e){return typeof e=="number"}function Oa(e){return Array.isArray(e)?Oa(e[0]):e instanceof Float32Array?"float32":e instanceof Int32Array||e instanceof Uint8Array?"int32":xd(e)?"float32":Or(e)?"string":cT(e)?"bool":"float32"}function Er(e){return!!(e&&e.constructor&&e.call&&e.apply)}function Td(e,t){for(let n=t;n=0;--s)n[s]=n[s+1]*e[s+1];return n}function lT(e,t){return t==="string"?Cy(e):Dr([e],t)}function Dr(e,t){if(t==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(e)&&(e=Ji(e)),C().getBool("DEBUG")&&rT(e,t),ek(e,t))return e;if(t==null||t==="float32"||t==="complex64")return new Float32Array(e);if(t==="int32")return new Int32Array(e);if(t==="bool"){const n=new Uint8Array(e.length);for(let s=0;sc*u);for(let c=0;cs*i);if(n===0)return[];if(n!==t.length)throw new Error(`[${e}] does not match the input size ${t.length}.`);return hT(0,e,t)}function ek(e,t){return e instanceof Float32Array&&t==="float32"||e instanceof Int32Array&&t==="int32"||e instanceof Uint8Array&&t==="bool"}function Ay(e,t){const n=Ea(e,t);for(let s=0;ss*i,1);if(t==null||t==="float32")return Ls(e,new Float32Array(n));if(t==="int32")return Ls(e,new Int32Array(n));if(t==="bool")return Ls(e,new Uint8Array(n));throw new Error(`Unknown data type ${t}`)}function qn(){return C().platform.now()}function Ny(e){e.forEach(t=>{k(Number.isInteger(t)&&t>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${e}].`)})}function uT(e,t){return C().platform.fetch(e,t)}function Cy(e,t="utf-8"){return t=t||"utf-8",C().platform.encode(e,t)}function Yl(e,t="utf-8"){return t=t||"utf-8",C().platform.decode(e,t)}function ti(e,t,n){if(t===0)return 0;if(t===1)return e[0];let s=e[e.length-1];for(let i=0;i{s=n()},o=this.backendTimer.time(i);for(let c=0;c{sk(p,u.dtype,e)})}const a={kernelName:e,outputs:s,inputs:t,timeMs:o.then(c=>c.kernelMs),extraInfo:o.then(c=>c.getExtraProfileInfo!=null?c.getExtraProfileInfo():"")};return a}logKernelProfile(e){const{kernelName:t,outputs:n,timeMs:s,inputs:i,extraInfo:o}=e;n.forEach(a=>{Promise.all([a.data(),s,o]).then(c=>{this.logger.logKernelProfile(t,a,c[0],c[1],i,c[2])})})}}function sk(e,t,n){if(t!=="float32")return!1;for(let s=0;s0?I:""} `}}console.log(`%c${c} %c${a} %c${u}D ${m} %c${p} %c${y} %c${o}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function rk(e,t,n){const s={},i={};for(let u=0;us[T.id]=!0),w=!0,i[p.id]=!0;break}if(w)break}}const o={};o[n.id]=!0;const a={};for(let u=e.length-1;u>=0;u--){const p=e[u],m=p.inputs;for(let y=0;y=0;i--){const o=t[i],a=[];if(o.outputs.forEach(u=>{const p=e[u.id];p!=null?a.push(p):a.push(null)}),o.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${o.kernelName}.`);const c=o.gradient(a);for(const u in o.inputs){if(!(u in c))throw new Error(`Cannot backprop through input ${u}. Available gradients found: ${Object.keys(c)}.`);const p=n(()=>c[u]());if(p.dtype!=="float32")throw new Error(`Error in gradient for op ${o.kernelName}. The gradient of input ${u} must have 'float32' dtype, but has '${p.dtype}'`);const m=o.inputs[u];if(!ot(p.shape,m.shape))throw new Error(`Error in gradient for op ${o.kernelName}. The gradient of input '${u}' has shape '${p.shape}', which does not match the shape of the input '${m.shape}'`);if(e[m.id]==null)e[m.id]=p;else{const y=e[m.id];e[m.id]=s(y,p),y.dispose()}}}}const dT=20,ql=3,Ry=7;function ak(e,t,n,s){const i=Ot(t),o=ck(e,t,n,i),a=t.length,c=Ad(e,t,n,i,o),u=["Tensor"];return s&&(u.push(` dtype: ${n}`),u.push(` rank: ${a}`),u.push(` shape: [${t}]`),u.push(" values:")),u.push(c.map(p=>" "+p).join(` `)),u.join(` -`)}function ck(e,t,n,s){const i=we(t),o=s[s.length-1],a=new Array(o).fill(0),c=t.length,u=n==="complex64"?Kl(e):e;if(c>1)for(let p=0;pdT){const v=ql*a;let N=Array.from(e.slice(0,v)),E=Array.from(e.slice((c-ql)*a,c*a));return n==="complex64"&&(N=Kl(N),E=Kl(E)),["["+N.map((D,F)=>jl(D,i[F],n)).join(", ")+", ..., "+E.map((D,F)=>jl(D,i[c-ql+F],n)).join(", ")+"]"]}const T=n==="complex64"?Kl(e):Array.from(e);return["["+T.map((v,N)=>jl(v,i[N],n)).join(", ")+"]"]}const p=t.slice(1),m=s.slice(1),y=s[0]*a,b=[];if(c>dT){for(let T=0;T1)for(let p=0;pdT){const v=ql*a;let N=Array.from(e.slice(0,v)),E=Array.from(e.slice((c-ql)*a,c*a));return n==="complex64"&&(N=Kl(N),E=Kl(E)),["["+N.map((D,F)=>jl(D,i[F],n)).join(", ")+", ..., "+E.map((D,F)=>jl(D,i[c-ql+F],n)).join(", ")+"]"]}const T=n==="complex64"?Kl(e):Array.from(e);return["["+T.map((v,N)=>jl(v,i[N],n)).join(", ")+"]"]}const p=t.slice(1),m=s.slice(1),y=s[0]*a,b=[];if(c>dT){for(let T=0;T`Length of values '${s}' does not match the size inferred by the shape '${this.size}'.`)}if(t==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=n||wo(t,this.size),this.strides=Ot(e)}set(e,...t){t.length===0&&(t=[0]),k(t.length===this.rank,()=>`The number of provided coordinates (${t.length}) must match the rank (${this.rank})`);const n=this.locToIndex(t);this.values[n]=e}get(...e){e.length===0&&(e=[0]);let t=0;for(const s of e){if(s<0||s>=this.shape[t]){const i=`Requested out of range element at ${e}. Buffer shape=${this.shape}`;throw new Error(i)}t++}let n=e[e.length-1];for(let s=0;sYl(n))}catch(n){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return e}dataSync(){this.throwIfDisposed();const e=vi().readSync(this.dataId);if(this.dtype==="string")try{return e.map(t=>Yl(t))}catch(t){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return e}async bytes(){this.throwIfDisposed();const e=await vi().read(this.dataId);return this.dtype==="string"?e:new Uint8Array(e.buffer)}dispose(){if(this.isDisposed)return;vi().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(e=!1){return Ea.print(this,e)}clone(){return this.throwIfDisposed(),Ea.clone(this)}toString(e=!1){const t=this.dataSync();return ak(t,this.shape,this.dtype,e)}cast(e){return this.throwIfDisposed(),Ea.cast(this,e)}variable(e=!0,t,n){return this.throwIfDisposed(),vi().makeVariable(this,e,t,n)}}Object.defineProperty(Q,Symbol.hasInstance,{value:e=>!!e&&e.data!=null&&e.dataSync!=null&&e.throwIfDisposed!=null});class Xl extends Q{constructor(e,t,n,s){super(e.shape,e.dtype,e.dataId,s);this.trainable=t,this.name=n}assign(e){if(e.dtype!==this.dtype)throw new Error(`dtype of the new value (${e.dtype}) and previous value (${this.dtype}) must match`);if(!ot(e.shape,this.shape))throw new Error(`shape of the new value (${e.shape}) and previous value (${this.shape}) must match`);vi().disposeTensor(this),this.dataId=e.dataId,vi().incRef(this,null)}dispose(){vi().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty(Xl,Symbol.hasInstance,{value:e=>e instanceof Q&&e.assign!=null&&e.assign instanceof Function});(function(e){e.R0="R0",e.R1="R1",e.R2="R2",e.R3="R3",e.R4="R4",e.R5="R5",e.R6="R6"})(r.Rank||(r.Rank={}));var Oy;(function(e){e.float32="float32",e.int32="int32",e.bool="int32",e.complex64="complex64"})(Oy||(Oy={}));var Ey;(function(e){e.float32="float32",e.int32="int32",e.bool="bool",e.complex64="complex64"})(Ey||(Ey={}));var Dy;(function(e){e.float32="float32",e.int32="float32",e.bool="float32",e.complex64="complex64"})(Dy||(Dy={}));var ky;(function(e){e.float32="complex64",e.int32="complex64",e.bool="complex64",e.complex64="complex64"})(ky||(ky={}));const dk={float32:Dy,int32:Oy,bool:Ey,complex64:ky};function Cn(e,t){if(e==="string"||t==="string"){if(e==="string"&&t==="string")return"string";throw new Error(`Can not upcast ${e} with ${t}`)}return dk[e][t]}function vd(e){return Cn(e,"int32")}function Bt(e,t){if(e.dtype===t.dtype)return[e,t];const n=Cn(e.dtype,t.dtype);return[e.cast(n),t.cast(n)]}function fT(e,t){k(e.dtype===t.dtype,()=>`The dtypes of the first(${e.dtype}) and second(${t.dtype}) input must match`)}function Nd(e,t){return t.some(n=>n.id===e.id)}function Zi(e){const t=[],n=new Set;return gT(e,t,n),t}function gT(e,t,n){if(e==null)return;if(e instanceof Q){t.push(e);return}if(!pk(e))return;const s=e;for(const i in s){const o=s[i];n.has(o)||(n.add(o),gT(o,t,n))}}function pk(e){return Array.isArray(e)||typeof e=="object"}var mk=Object.freeze({__proto__:null,makeTypesMatch:Bt,assertTypesMatch:fT,isTensorInList:Nd,getTensorsInContainer:Zi});class yT{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(const e in this.registeredVariables)this.registeredVariables[e].dispose()}}class Jl{constructor(e){this.ENV=e,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new yT}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;const e=this.getSortedBackends();for(let t=0;t{t.setupFunc!=null&&t.setupFunc(this.backendInstance)})}disposeRegisteredKernels(e){const t=wd(e);t.forEach(n=>{n.disposeFunc!=null&&n.disposeFunc(this.registry[e])})}initializeBackend(e){const t=this.registryFactory[e];if(t==null)throw new Error(`Cannot initialize backend ${e}, no registration found.`);try{const n=t.factory();if(n&&!(n instanceof f)&&typeof n.then=="function"){const s=++this.pendingBackendInitId,i=n.then(o=>s(sthis.registryFactory[t].priority-this.registryFactory[e].priority)}initializeBackendsAndReturnBest(){const e=this.getSortedBackends();for(let t=0;tthis.startScope(n),()=>this.endScope(s),()=>(s=t(),s instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),s))}scopedRun(e,t,n){e();try{const s=n();return t(),s}catch(s){throw t(),s}}nextTensorId(){return Jl.nextTensorId++}nextVariableId(){return Jl.nextVariableId++}clone(e){const t=this.makeTensorFromDataId(e.dataId,e.shape,e.dtype),n={x:e},s=o=>({x:()=>{const a="float32",c={x:o},u={dtype:a};return V.runKernelFunc(p=>p.cast(o,a),c,null,ul,u)}}),i=[];return this.addTapeNode(this.state.activeScope.name,n,[t],s,i,{}),t}runKernel(e,t,n,s,i){const o=null,a=null;return this.runKernelFunc(o,t,a,e,n,s,i)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(e,t,n){const s=this.backend.numDataIds();let i=0;n.forEach(c=>{i+=c.dtype==="complex64"?3:1});const o=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],a=s-t-i-o;if(a>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${a} data ids) after running '${e}'`)}runKernelFunc(e,t,n,s,i,o,a){let c,u=[];const p=this.isTapeOn();s==null&&(s=this.state.activeScope!=null?this.state.activeScope.name:"");const m=this.state.numBytes,y=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let b;const w=yy(s,this.backendName);let I;if(w!=null)b=()=>{const v=this.backend.numDataIds();I=w.kernelFunc({inputs:t,attrs:i,backend:this.backend});const N=Array.isArray(I)?I:[I];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(s,v,N);const E=N.map(({dataId:D,shape:F,dtype:_})=>this.makeTensorFromDataId(D,F,_));if(p){let D=this.getTensorsForGradient(s,t,E);if(D==null){a==null&&(a=[]);const F=E.filter((_,B)=>a[B]);D=(o||[]).slice().concat(F)}u=this.saveTensorsForBackwardMode(D)}return E};else{const v=N=>{if(!p)return;u=N.map(E=>this.keep(this.clone(E)))};b=()=>{const N=this.backend.numDataIds();I=this.tidy(()=>e(this.backend,v));const E=Array.isArray(I)?I:[I];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(s,N,E),E}}let T;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?c=b():(T=this.profiler.profileKernel(s,t,()=>b()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(T),c=T.outputs)}),p&&this.addTapeNode(s,t,c,n,u,i),this.state.profiling&&this.state.activeProfile.kernels.push({name:s,bytesAdded:this.state.numBytes-m,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-y,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(t).map(v=>t[v]!=null?t[v].shape:null),outputShapes:c.map(v=>v.shape),kernelTimeMs:T.timeMs,extraInfo:T.extraInfo}),Array.isArray(I)?c:c[0]}saveTensorsForBackwardMode(e){const t=e.map(n=>this.keep(this.clone(n)));return t}getTensorsForGradient(e,t,n){const s=by(e);if(s!=null){const i=s.inputsToSave||[],o=s.outputsToSave||[];let a;s.saveAllInputs?(k(Array.isArray(t),()=>"saveAllInputs is true, expected inputs to be an array."),a=Object.keys(t).map(u=>t[u])):a=i.map(u=>t[u]);const c=n.filter((u,p)=>o[p]);return a.concat(c)}return null}makeTensor(e,t,n,s){if(e==null)throw new Error("Values passed to engine.makeTensor() are null");n=n||"float32",s=s||this.backend;let i=e;n==="string"&&Or(e[0])&&(i=e.map(c=>Cy(c)));const o=s.write(i,t,n),a=new Q(t,n,o,this.nextTensorId());if(this.incRef(a,s),n==="string"){const c=this.state.tensorInfo.get(o),u=aT(i);this.state.numBytes+=u-c.bytes,c.bytes=u}return a}makeTensorFromDataId(e,t,n,s){n=n||"float32";const i=new Q(t,n,e,this.nextTensorId());return this.incRef(i,s),i}makeVariable(e,t=!0,n,s){n=n||this.nextVariableId().toString(),s!=null&&s!==e.dtype&&(e=e.cast(s));const i=new Xl(e,t,n,this.nextTensorId());if(this.state.registeredVariables[i.name]!=null)throw new Error(`Variable with name ${i.name} was already registered`);return this.state.registeredVariables[i.name]=i,this.incRef(i,this.backend),i}incRef(e,t){const n=this.state.tensorInfo.has(e.dataId)?this.state.tensorInfo.get(e.dataId).refCount:0;if(this.state.numTensors++,e.dtype==="string"&&this.state.numStringTensors++,n===0){this.state.numDataBuffers++;let s=0;e.dtype!=="complex64"&&e.dtype!=="string"&&(s=e.size*Ty(e.dtype)),this.state.tensorInfo.set(e.dataId,{backend:t||this.backend,dtype:e.dtype,shape:e.shape,bytes:s,refCount:0}),this.state.numBytes+=s}this.state.tensorInfo.get(e.dataId).refCount++,e instanceof Xl||this.track(e)}disposeTensor(e){if(!this.state.tensorInfo.has(e.dataId))return;this.state.numTensors--,e.dtype==="string"&&this.state.numStringTensors--;const t=this.state.tensorInfo.get(e.dataId),n=t.refCount;n<=1?(e.dtype!=="complex64"&&(this.state.numBytes-=t.bytes),this.state.numDataBuffers--,t.backend.disposeData(e.dataId),this.state.tensorInfo.delete(e.dataId)):this.state.tensorInfo.get(e.dataId).refCount--}disposeVariables(){for(const e in this.state.registeredVariables){const t=this.state.registeredVariables[e];this.disposeVariable(t)}}disposeVariable(e){this.disposeTensor(e),this.state.registeredVariables[e.name]!=null&&delete this.state.registeredVariables[e.name]}memory(){const e=this.backend.memory();return e.numTensors=this.state.numTensors,e.numDataBuffers=this.state.numDataBuffers,e.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(e.unreliable=!0,e.reasons==null&&(e.reasons=[]),e.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),e}async profile(e){this.state.profiling=!0;const t=this.state.numBytes,n=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await e(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(s=>s.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-t,this.state.activeProfile.newTensors=this.state.numTensors-n;for(const s of this.state.activeProfile.kernels)s.kernelTimeMs=await s.kernelTimeMs,s.extraInfo=await s.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(e,t,n,s,i,o){const a={id:this.state.nextTapeNodeId++,kernelName:e,inputs:t,outputs:n,saved:i},c=by(e);c!=null&&(s=c.gradFunc),s!=null&&(a.gradient=u=>(u=u.map((p,m)=>{if(p==null){const y=n[m],b=Ra(y.size,y.dtype);return this.makeTensor(b,y.shape,y.dtype)}return p}),s(u.length>1?u:u[0],i,o))),this.state.activeTape.push(a)}keep(e){return e.kept=!0,e}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(e){const t={track:[],name:"unnamed scope",id:this.state.nextScopeId++};e&&(t.name=e),this.state.scopeStack.push(t),this.state.activeScope=t}endScope(e){const t=Zi(e),n=new Set(t.map(i=>i.id));for(let i=0;i{!i.kept&&i.scopeId===s.id&&this.track(i)})}gradients(e,t,n,s=!1){if(k(t.length>0,()=>"gradients() received an empty list of xs."),n!=null&&n.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${n.dtype}'`);const i=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",e));k(i instanceof Q,()=>"The result y returned by f() must be a tensor.");const o=rk(this.state.activeTape,t,i);if(!s&&o.length===0&&t.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{const a={};a[i.id]=n==null?fk(i.shape):n,ok(a,o,u=>this.tidy(u),gk);const c=t.map(u=>a[u.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(u=>{for(const p of u.saved)p.dispose()}),this.state.activeTape=null),{value:i,grads:c}})}customGrad(e){return k(Er(e),()=>"The f passed in customGrad(f) must be a function."),(...t)=>{k(t.every(i=>i instanceof Q),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let n;const s={};return t.forEach((i,o)=>{s[o]=i}),this.runKernelFunc((i,o)=>(n=e(...t,o),k(n.value instanceof Q,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),k(Er(n.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),n.value),s,(i,o)=>{const a=n.gradFunc(i,o),c=Array.isArray(a)?a:[a];k(c.length===t.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),k(c.every(p=>p instanceof Q),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");const u={};return c.forEach((p,m)=>{u[m]=()=>p}),u})}}readSync(e){const t=this.state.tensorInfo.get(e);return t.backend.readSync(e)}read(e){const t=this.state.tensorInfo.get(e);return t.backend.read(e)}async time(e){const t=qn(),n=await this.backend.time(e);return n.wallMs=qn()-t,n}track(e){return this.state.activeScope!=null&&(e.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(e)),e}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new yT;for(const e in this.registry)this.disposeRegisteredKernels(e),this.registry[e].dispose(),delete this.registry[e];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}Jl.nextTensorId=0,Jl.nextVariableId=0;function fk(e){const t=Ay(we(e),"float32");return V.makeTensor(t,e,"float32")}function bT(){const e=ne();if(e._tfengine==null){const t=new L(e);e._tfengine=new Jl(t)}return $(e._tfengine.ENV),lk(()=>e._tfengine),e._tfengine}const V=bT();function gk(e,t){const n={a:e,b:t};return V.runKernelFunc((s,i)=>{const o=s.add(e,t);return i([e,t]),o},n,null,xe)}function yk(){return typeof navigator!="undefined"&&navigator!=null}function wT(){if(yk()){const e=navigator.userAgent||navigator.vendor||window.opera;return/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i.test(e)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(e.substr(0,4))}return!1}function Fy(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}var bk=Object.freeze({__proto__:null,isMobile:wT,isBrowser:Fy});const Qi=C();Qi.registerFlag("DEBUG",()=>!1,e=>{e&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")}),Qi.registerFlag("IS_BROWSER",()=>Fy()),Qi.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined"),Qi.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)),Qi.registerFlag("PROD",()=>!1),Qi.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>Qi.getBool("DEBUG")),Qi.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0),Qi.registerFlag("IS_TEST",()=>!1);function Ni(e,t){let n=e;if(Ln(e))return t==="string"?[]:[e.length];if(!Array.isArray(e))return[];const s=[];for(;Array.isArray(n)||Ln(n)&&t!=="string";)s.push(n.length),n=n[0];return Array.isArray(e)&&C().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&<(e,s,[]),s}function LT(e,t,n){if(n=n||[],!Array.isArray(e)&&!Ln(e)){k(t.length===0,()=>`Element arr[${n.join("][")}] is a primitive, but should be an array/TypedArray of ${t[0]} elements`);return}k(t.length>0,()=>`Element arr[${n.join("][")}] should be a primitive, but is an array of ${e.length} elements`),k(e.length===t[0],()=>`Element arr[${n.join("][")}] should have ${t[0]} elements, but has ${e.length} elements`);const s=t.slice(1);for(let i=0;i=0&&(i=s),ST(s,i,t,n),e==null||!Ln(e)&&!Array.isArray(e)&&typeof e!="number"&&typeof e!="boolean"&&typeof e!="string"){const u=e==null?"null":e.constructor.name;throw new Error(`Argument '${t}' passed to '${n}' must be a Tensor or TensorLike, but got '${u}'`)}const o=Ni(e,i);!Ln(e)&&!Array.isArray(e)&&(e=[e]);const a=!0,c=i!=="string"?Dr(e,i):Ji(e,[],a);return V.makeTensor(c,o,i)}function Zl(e,t,n,s="numeric"){if(!Array.isArray(e))throw new Error(`Argument ${t} passed to ${n} must be a \`Tensor[]\` or \`TensorLike[]\``);const i=e;return i.map((o,a)=>W(o,`${t}[${a}]`,n),s)}const IT="__op";function P(e){const t=Object.keys(e);if(t.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${t.length} keys.`);let n=t[0];const s=e[n];n.endsWith("_")&&(n=n.substring(0,n.length-1)),n=n+IT;const i=(...o)=>{V.startScope(n);try{const a=s(...o);return a instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),V.endScope(a),a}catch(a){throw V.endScope(null),a}};return Object.defineProperty(i,"name",{value:n,configurable:!0}),i}function wk(e,t){const n=W(e,"real","complex"),s=W(t,"imag","complex");dt(n.shape,s.shape,`real and imag shapes, ${n.shape} and ${s.shape}, must match in call to tf.complex().`);const i=a=>a.complex(n,s),o={real:n,imag:s};return V.runKernelFunc(i,o,null,Rg)}const Ci=P({complex_:wk});function Fr(e,t,n,s){if(s==null&&(s=Ca(e)),s==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!Ln(e)&&!Array.isArray(e)&&typeof e!="number"&&typeof e!="boolean"&&typeof e!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(t!=null){Ny(t);const i=we(t),o=we(n);k(i===o,()=>`Based on the provided shape, [${t}], the tensor should have ${i} values but has ${o}`);for(let a=0;a`Error creating a new Tensor. Inferred shape (${n}) does not match the provided shape (${t}). `)}}return!Ln(e)&&!Array.isArray(e)&&(e=[e]),t=t||n,e=s!=="string"?Dr(e,s):Ji(e,[],!0),V.makeTensor(e,t,s)}function en(e,t,n){const s=Ni(e,n);return Fr(e,t,s,n)}const _y={float32:4,float16:2,int32:4,uint16:2,uint8:1,bool:1,complex64:8};const Cd=4;async function Wy(e,t){const n=[],s=[],i=Array.isArray(e)?e.map(a=>a.name):Object.keys(e);for(let a=0;a{const b=await u.bytes(),w=b.reduce((v,N)=>v+N.length,0)+Cd*b.length,I=new Uint8Array(w);let T=0;for(let v=0;v{if(t+=o.byteLength,n.push(o.byteLength===o.buffer.byteLength?o:new o.constructor(o)),!(o instanceof Float32Array||o instanceof Int32Array||o instanceof Uint8Array))throw new Error(`Unsupported TypedArray subtype: ${o.constructor.name}`)});const s=new Uint8Array(t);let i=0;return n.forEach(o=>{s.set(new Uint8Array(o.buffer),i),i+=o.byteLength}),s.buffer}const $y=typeof Buffer!="undefined"&&(typeof Blob=="undefined"||typeof atob=="undefined"||typeof btoa=="undefined");function xT(e){return $y?Buffer.byteLength(e):new Blob([e]).size}function Sk(e){if($y)return Buffer.from(e).toString("base64");const t=new Uint8Array(e);let n="";for(let s=0,i=t.length;s{t+=i.byteLength});const n=new Uint8Array(t);let s=0;return e.forEach(i=>{n.set(new Uint8Array(i),s),s+=i.byteLength}),n.buffer}function TT(e){const t="/";for(e=e.trim();e.endsWith(t);)e=e.slice(0,e.length-1);const n=e.split(t);return n[n.length-1]}function Ql(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("Expected JSON model topology, received ArrayBuffer.");return{dateSaved:new Date,modelTopologyType:"JSON",modelTopologyBytes:e.modelTopology==null?0:xT(JSON.stringify(e.modelTopology)),weightSpecsBytes:e.weightSpecs==null?0:xT(JSON.stringify(e.weightSpecs)),weightDataBytes:e.weightData==null?0:e.weightData.byteLength}}function xk(){const e=n=>{let s=n<<13,i=0;for(;(s&8388608)===0;)i-=8388608,s<<=1;return s&=~8388608,i+=947912704,s|i},t=new Uint32Array(2048);t[0]=0;for(let n=1;n<1024;n++)t[n]=e(n);for(let n=1024;n<2048;n++)t[n]=939524096+(n-1024<<13);return t}function Tk(){const e=new Uint32Array(64);e[0]=0,e[31]=1199570944,e[32]=2147483648,e[63]=3347054592;for(let t=1;t<31;t++)e[t]=t<<23;for(let t=33;t<63;t++)e[t]=2147483648+(t-32<<23);return e}function Ak(){const e=new Uint32Array(64);for(let t=0;t<64;t++)e[t]=1024;return e[0]=e[32]=0,e}function vk(){const e=xk(),t=Tk(),n=Ak();return s=>{const i=new ArrayBuffer(4*s.length),o=new Uint32Array(i);for(let a=0;a>10]+(c&1023)]+t[c>>10];o[a]=u}return new Float32Array(i)}}class Xt{constructor(){this.saveRouters=[],this.loadRouters=[]}static getInstance(){return Xt.instance==null&&(Xt.instance=new Xt),Xt.instance}static registerSaveRouter(e){Xt.getInstance().saveRouters.push(e)}static registerLoadRouter(e){Xt.getInstance().loadRouters.push(e)}static getSaveHandlers(e){return Xt.getHandlers(e,"save")}static getLoadHandlers(e,t){return Xt.getHandlers(e,"load",t)}static getHandlers(e,t,n){const s=[],i=t==="load"?Xt.getInstance().loadRouters:Xt.getInstance().saveRouters;return i.forEach(o=>{const a=o(e,n);a!==null&&s.push(a)}),s}}const Nk=e=>Xt.registerSaveRouter(e),Ck=e=>Xt.registerLoadRouter(e),Uy=e=>Xt.getSaveHandlers(e),By=(e,t)=>Xt.getLoadHandlers(e,t);const Ed="tensorflowjs",My=1,Lo="models_store",_r="model_info_store";async function NQ(){const e=Py();return new Promise((t,n)=>{const s=e.deleteDatabase(Ed);s.onsuccess=()=>t(),s.onerror=i=>n(i)})}function Py(){if(!C().getBool("IS_BROWSER"))throw new Error("Failed to obtain IndexedDB factory because the current environmentis not a web browser.");const e=typeof window=="undefined"?self:window,t=e.indexedDB||e.mozIndexedDB||e.webkitIndexedDB||e.msIndexedDB||e.shimIndexedDB;if(t==null)throw new Error("The current browser does not appear to support IndexedDB.");return t}function zy(e){const t=e.result;t.createObjectStore(Lo,{keyPath:"modelPath"}),t.createObjectStore(_r,{keyPath:"modelPath"})}class So{constructor(e){if(this.indexedDB=Py(),e==null||!e)throw new Error("For IndexedDB, modelPath must not be null, undefined or empty.");this.modelPath=e}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");return this.databaseAction(this.modelPath,e)}async load(){return this.databaseAction(this.modelPath)}databaseAction(e,t){return new Promise((n,s)=>{const i=this.indexedDB.open(Ed,My);i.onupgradeneeded=()=>zy(i),i.onsuccess=()=>{const o=i.result;if(t==null){const a=o.transaction(Lo,"readonly"),c=a.objectStore(Lo),u=c.get(this.modelPath);u.onsuccess=()=>{if(u.result==null)return o.close(),s(new Error(`Cannot find model with path '${this.modelPath}' in IndexedDB.`));n(u.result.modelArtifacts)},u.onerror=p=>(o.close(),s(u.error)),a.oncomplete=()=>o.close()}else{const a=Ql(t),c=o.transaction(_r,"readwrite");let u=c.objectStore(_r);const p=u.put({modelPath:this.modelPath,modelArtifactsInfo:a});let m;p.onsuccess=()=>{m=o.transaction(Lo,"readwrite");const y=m.objectStore(Lo),b=y.put({modelPath:this.modelPath,modelArtifacts:t,modelArtifactsInfo:a});b.onsuccess=()=>n({modelArtifactsInfo:a}),b.onerror=w=>{u=c.objectStore(_r);const I=u.delete(this.modelPath);I.onsuccess=()=>(o.close(),s(b.error)),I.onerror=T=>(o.close(),s(b.error))}},p.onerror=y=>(o.close(),s(p.error)),c.oncomplete=()=>{m==null?o.close():m.oncomplete=()=>o.close()}}},i.onerror=o=>s(i.error)})}}So.URL_SCHEME="indexeddb://";const AT=e=>C().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(So.URL_SCHEME))?Rk(e.slice(So.URL_SCHEME.length)):null;Xt.registerSaveRouter(AT),Xt.registerLoadRouter(AT);function Rk(e){return new So(e)}function Ok(e){return e.startsWith(So.URL_SCHEME)?e.slice(So.URL_SCHEME.length):e}class Ek{constructor(){this.indexedDB=Py()}async listModels(){return new Promise((e,t)=>{const n=this.indexedDB.open(Ed,My);n.onupgradeneeded=()=>zy(n),n.onsuccess=()=>{const s=n.result,i=s.transaction(_r,"readonly"),o=i.objectStore(_r),a=o.getAll();a.onsuccess=()=>{const c={};for(const u of a.result)c[u.modelPath]=u.modelArtifactsInfo;e(c)},a.onerror=c=>(s.close(),t(a.error)),i.oncomplete=()=>s.close()},n.onerror=s=>t(n.error)})}async removeModel(e){return e=Ok(e),new Promise((t,n)=>{const s=this.indexedDB.open(Ed,My);s.onupgradeneeded=()=>zy(s),s.onsuccess=()=>{const i=s.result,o=i.transaction(_r,"readwrite"),a=o.objectStore(_r),c=a.get(e);let u;c.onsuccess=()=>{if(c.result==null)return i.close(),n(new Error(`Cannot find model with path '${e}' in IndexedDB.`));{const p=a.delete(e),m=()=>{u=i.transaction(Lo,"readwrite");const y=u.objectStore(Lo),b=y.delete(e);b.onsuccess=()=>t(c.result.modelArtifactsInfo),b.onerror=w=>n(c.error)};p.onsuccess=m,p.onerror=y=>(m(),i.close(),n(c.error))}},c.onerror=p=>(i.close(),n(c.error)),o.oncomplete=()=>{u==null?i.close():u.oncomplete=()=>i.close()}},s.onerror=i=>n(s.error)})}}const Ri="/",Io="tensorflowjs_models",vT="info",Dk="model_topology",kk="weight_specs",Fk="weight_data",_k="model_metadata";function CQ(){if(!C().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("purgeLocalStorageModels() cannot proceed because local storage is unavailable in the current environment.");const e=window.localStorage,t=[];for(let n=0;ni.length){e.removeItem(s);const o=CT(s);t.indexOf(o)===-1&&t.push(o)}}return t}function NT(e){return{info:[Io,e,vT].join(Ri),topology:[Io,e,Dk].join(Ri),weightSpecs:[Io,e,kk].join(Ri),weightData:[Io,e,Fk].join(Ri),modelMetadata:[Io,e,_k].join(Ri)}}function CT(e){const t=e.split(Ri);if(t.length<3)throw new Error(`Invalid key format: ${e}`);return t.slice(1,t.length-1).join(Ri)}function Wk(e){return e.startsWith(xo.URL_SCHEME)?e.slice(xo.URL_SCHEME.length):e}class xo{constructor(e){if(!C().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("The current environment does not support local storage.");if(this.LS=window.localStorage,e==null||!e)throw new Error("For local storage, modelPath must not be null, undefined or empty.");this.modelPath=e,this.keys=NT(this.modelPath)}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");{const t=JSON.stringify(e.modelTopology),n=JSON.stringify(e.weightSpecs),s=Ql(e);try{return this.LS.setItem(this.keys.info,JSON.stringify(s)),this.LS.setItem(this.keys.topology,t),this.LS.setItem(this.keys.weightSpecs,n),this.LS.setItem(this.keys.weightData,Sk(e.weightData)),this.LS.setItem(this.keys.modelMetadata,JSON.stringify({format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,userDefinedMetadata:e.userDefinedMetadata})),{modelArtifactsInfo:s}}catch(i){throw this.LS.removeItem(this.keys.info),this.LS.removeItem(this.keys.topology),this.LS.removeItem(this.keys.weightSpecs),this.LS.removeItem(this.keys.weightData),this.LS.removeItem(this.keys.modelMetadata),new Error(`Failed to save model '${this.modelPath}' to local storage: size quota being exceeded is a possible cause of this failure: modelTopologyBytes=${s.modelTopologyBytes}, weightSpecsBytes=${s.weightSpecsBytes}, weightDataBytes=${s.weightDataBytes}.`)}}}async load(){const e=JSON.parse(this.LS.getItem(this.keys.info));if(e==null)throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);if(e.modelTopologyType!=="JSON")throw new Error("BrowserLocalStorage does not support loading non-JSON model topology yet.");const t={},n=JSON.parse(this.LS.getItem(this.keys.topology));if(n==null)throw new Error(`In local storage, the topology of model '${this.modelPath}' is missing.`);t.modelTopology=n;const s=JSON.parse(this.LS.getItem(this.keys.weightSpecs));if(s==null)throw new Error(`In local storage, the weight specs of model '${this.modelPath}' are missing.`);t.weightSpecs=s;const i=this.LS.getItem(this.keys.modelMetadata);if(i!=null){const a=JSON.parse(i);t.format=a.format,t.generatedBy=a.generatedBy,t.convertedBy=a.convertedBy,t.userDefinedMetadata=a.userDefinedMetadata}const o=this.LS.getItem(this.keys.weightData);if(o==null)throw new Error(`In local storage, the binary weight values of model '${this.modelPath}' are missing.`);return t.weightData=Ik(o),t}}xo.URL_SCHEME="localstorage://";const RT=e=>C().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(xo.URL_SCHEME))?$k(e.slice(xo.URL_SCHEME.length)):null;Xt.registerSaveRouter(RT),Xt.registerLoadRouter(RT);function $k(e){return new xo(e)}class Uk{constructor(){k(C().getBool("IS_BROWSER"),()=>"Current environment is not a web browser"),k(typeof window=="undefined"||typeof window.localStorage!="undefined",()=>"Current browser does not appear to support localStorage"),this.LS=window.localStorage}async listModels(){const e={},t=Io+Ri,n=Ri+vT;for(let s=0;s"scheme must not be undefined or null."),e.endsWith(Da)&&(e=e.slice(0,e.indexOf(Da))),k(e.length>0,()=>"scheme must not be an empty string.");const n=Ss.getInstance();k(n.managers[e]==null,()=>`A model store manager is already registered for scheme '${e}'.`),n.managers[e]=t}static getManager(e){const t=this.getInstance().managers[e];if(t==null)throw new Error(`Cannot find model manager for scheme '${e}'`);return t}static getSchemes(){return Object.keys(this.getInstance().managers)}}function Dd(e){if(e.indexOf(Da)===-1)throw new Error(`The url string provided does not contain a scheme. Supported schemes are: ${Ss.getSchemes().join(",")}`);return{scheme:e.split(Da)[0],path:e.split(Da)[1]}}async function OT(e,t,n=!1){k(e!==t,()=>`Old path and new path are the same: '${e}'`);const s=Xt.getLoadHandlers(e);k(s.length>0,()=>`Copying failed because no load handler is found for source URL ${e}.`),k(s.length<2,()=>`Copying failed because more than one (${s.length}) load handlers for source URL ${e}.`);const i=s[0],o=Xt.getSaveHandlers(t);k(o.length>0,()=>`Copying failed because no save handler is found for destination URL ${t}.`),k(o.length<2,()=>`Copying failed because more than one (${s.length}) save handlers for destination URL ${t}.`);const a=o[0],c=Dd(e).scheme,u=Dd(e).path,p=c===Dd(e).scheme,m=await i.load();n&&p&&await Ss.getManager(c).removeModel(u);const y=await a.save(m);return n&&!p&&await Ss.getManager(c).removeModel(u),y.modelArtifactsInfo}async function Bk(){const e=Ss.getSchemes(),t={};for(const n of e){const s=await Ss.getManager(n).listModels();for(const i in s){const o=n+Da+i;t[o]=s[i]}}return t}async function Mk(e){const t=Dd(e),n=Ss.getManager(t.scheme);return n.removeModel(t.path)}async function Pk(e,t){const n=!1;return OT(e,t,n)}async function zk(e,t){const n=!0;return OT(e,t,n)}class Gk{fetch(e,t){return fetch(e,t)}now(){return performance.now()}encode(e,t){if(t!=="utf-8"&&t!=="utf8")throw new Error(`Browser's encoder only supports utf-8, but got ${t}`);return this.textEncoder==null&&(this.textEncoder=new TextEncoder),this.textEncoder.encode(e)}decode(e,t){return new TextDecoder(t).decode(e)}}if(C().get("IS_BROWSER")){C().setPlatform("browser",new Gk);try{Ss.registerManager(xo.URL_SCHEME,new Uk)}catch(e){}try{Ss.registerManager(So.URL_SCHEME,new Ek)}catch(e){}}const Vk={importFetch:()=>i2()};let ka;function RQ(){ka=null}function OQ(e){ka=e}function EQ(){return ka}class Hk{constructor(){this.util=require("util"),this.textEncoder=new this.util.TextEncoder}fetch(e,t){return C().global.fetch!=null?C().global.fetch(e,t):(ka==null&&(ka=Vk.importFetch()),ka(e,t))}now(){const e=process.hrtime();return e[0]*1e3+e[1]/1e6}encode(e,t){if(t!=="utf-8"&&t!=="utf8")throw new Error(`Node built-in encoder only supports utf-8, but got ${t}`);return this.textEncoder.encode(e)}decode(e,t){return e.length===0?"":new this.util.TextDecoder(t).decode(e)}}C().get("IS_NODE")&&C().setPlatform("node",new Hk);function Qe(e,t="float32",n){return t=t||"float32",Ny(e),new kr(e,t,n)}function Yk(e,t){const n=W(e,"x","cast");if(!oT(t))throw new Error(`Failed to cast to unknown dtype ${t}`);if(t==="string"&&n.dtype!=="string"||t!=="string"&&n.dtype==="string")throw new Error("Only strings can be casted to strings");const s={x:n},i={dtype:t};return V.runKernelFunc(o=>o.cast(n,t),s,null,ul,i)}const ve=P({cast_:Yk});function qk(e){const t=W(e,"x","clone",null),n=()=>V.makeTensorFromDataId(t.dataId,t.shape,t.dtype),s={x:t};return V.runKernelFunc(n,s,null,Sl)}const Wr=P({clone_:qk});function ET(e,t=!1){console.log(e.toString(t))}bT();const jk={buffer:Qe,cast:ve,clone:Wr,print:ET};hk(jk);const Kk="model",Xk=".json",Jk=".weights.bin";function DT(e){return new Promise(t=>setTimeout(t)).then(e)}class Fa{constructor(e){if(!C().getBool("IS_BROWSER"))throw new Error("browserDownloads() cannot proceed because the current environment is not a browser.");e.startsWith(Fa.URL_SCHEME)&&(e=e.slice(Fa.URL_SCHEME.length)),(e==null||e.length===0)&&(e=Kk),this.modelTopologyFileName=e+Xk,this.weightDataFileName=e+Jk}async save(e){if(typeof document=="undefined")throw new Error("Browser downloads are not supported in this environment since `document` is not present");const t=window.URL.createObjectURL(new Blob([e.weightData],{type:"application/octet-stream"}));if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserDownloads.save() does not support saving model topology in binary formats yet.");{const n=[{paths:["./"+this.weightDataFileName],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,weightsManifest:n},i=window.URL.createObjectURL(new Blob([JSON.stringify(s)],{type:"application/json"})),o=this.jsonAnchor==null?document.createElement("a"):this.jsonAnchor;if(o.download=this.modelTopologyFileName,o.href=i,await DT(()=>o.dispatchEvent(new MouseEvent("click"))),e.weightData!=null){const a=this.weightDataAnchor==null?document.createElement("a"):this.weightDataAnchor;a.download=this.weightDataFileName,a.href=t,await DT(()=>a.dispatchEvent(new MouseEvent("click")))}return{modelArtifactsInfo:Ql(e)}}}}Fa.URL_SCHEME="downloads://";class Zk{constructor(e){if(e==null||e.length<1)throw new Error(`When calling browserFiles, at least 1 file is required, but received ${e}`);this.files=e}async load(){const e=this.files[0],t=this.files.slice(1);return new Promise((n,s)=>{const i=new FileReader;i.onload=o=>{const a=JSON.parse(o.target.result),c=a.modelTopology;if(c==null){s(new Error(`modelTopology field is missing from file ${e.name}`));return}t.length===0&&n({modelTopology:c});const u=a.weightsManifest;if(u==null){s(new Error(`weightManifest field is missing from file ${e.name}`));return}let p;try{p=this.checkManifestAndWeightFiles(u,t)}catch(w){s(w);return}const m=[],y=[],b=[];u.forEach(w=>{w.paths.forEach(I=>{y.push(I),b.push(null)}),m.push(...w.weights)}),u.forEach(w=>{w.paths.forEach(I=>{const T=new FileReader;T.onload=v=>{const N=v.target.result,E=y.indexOf(I);b[E]=N,b.indexOf(null)===-1&&n({modelTopology:c,weightSpecs:m,weightData:Od(b),format:a.format,generatedBy:a.generatedBy,convertedBy:a.convertedBy,userDefinedMetadata:a.userDefinedMetadata})},T.onerror=v=>s(`Failed to weights data from file of path '${I}'.`),T.readAsArrayBuffer(p[I])})})},i.onerror=o=>s(`Failed to read model topology and weights manifest JSON from file '${e.name}'. BrowserFiles supports loading Keras-style tf.Model artifacts only.`),i.readAsText(e)})}checkManifestAndWeightFiles(e,t){const n=[],s=t.map(o=>TT(o.name)),i={};for(const o of e)o.paths.forEach(a=>{const c=TT(a);if(n.indexOf(c)!==-1)throw new Error(`Duplicate file basename found in weights manifest: '${c}'`);if(n.push(c),s.indexOf(c)===-1)throw new Error(`Weight file with basename '${c}' is not provided.`);i[a]=t[s.indexOf(c)]});if(n.length!==t.length)throw new Error(`Mismatch in the number of files in weights manifest (${n.length}) and the number of weight files provided (${t.length}).`);return i}}const Qk=e=>C().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(Fa.URL_SCHEME))?eF(e.slice(Fa.URL_SCHEME.length)):null;Xt.registerSaveRouter(Qk);function eF(e="model"){return new Fa(e)}function tF(e){return new Zk(e)}function kT(e,t,n,s){a(e),n=n==null?0:n,s=s==null?1:s,c(n,s);let i=0;const o=u=>(u.then(p=>{const m=n+ ++i/e.length*(s-n);return t(m),p}),u);function a(u){k(u!=null&&Array.isArray(u)&&u.length>0,()=>"promises must be a none empty array")}function c(u,p){k(u>=0&&u<=1,()=>`Progress fraction must be in range [0, 1], but got startFraction ${u}`),k(p>=0&&p<=1,()=>`Progress fraction must be in range [0, 1], but got endFraction ${p}`),k(p>=u,()=>`startFraction must be no more than endFraction, but got startFraction ${u} and endFraction ${p}`)}return Promise.all(e.map(o))}async function FT(e,t){t==null&&(t={});const n=t.fetchFunc==null?C().platform.fetch:t.fetchFunc,s=e.map(y=>n(y,t.requestInit,{isBinary:!0})),i=0,o=.5,a=t.onProgress==null?await Promise.all(s):await kT(s,t.onProgress,i,o),c=a.map(y=>y.arrayBuffer()),u=.5,p=1,m=t.onProgress==null?await Promise.all(c):await kT(c,t.onProgress,u,p);return m}async function _T(e,t="",n,s){const i=a=>FT(a,{requestInit:s}),o=WT(i);return o(e,t,n)}function WT(e){return async(t,n="",s)=>{const i=t.map(()=>!1),o={},a=s!=null?s.map(()=>!1):[],c=[];if(t.forEach((w,I)=>{let T=0;w.weights.forEach(v=>{const N="quantization"in v?v.quantization.dtype:v.dtype,E=_y[N]*we(v.shape),D=()=>{i[I]=!0,o[I]==null&&(o[I]=[]),o[I].push({manifestEntry:v,groupOffset:T,sizeBytes:E})};s!=null?s.forEach((F,_)=>{F===v.name&&(D(),a[_]=!0)}):D(),c.push(v.name),T+=E})}),!a.every(w=>w)){const w=s.filter((I,T)=>!a[T]);throw new Error(`Could not find weights in manifest with names: ${w.join(", ")}. -Manifest JSON has weights with names: ${c.join(", ")}.`)}const u=i.reduce((w,I,T)=>(I&&w.push(T),w),[]),p=[];u.forEach(w=>{t[w].paths.forEach(I=>{const T=n+(n.endsWith("/")?"":"/")+I;p.push(T)})});const m=await e(p),y={};let b=0;return u.forEach(w=>{const I=t[w].paths.length;let T=0;for(let F=0;F{const _=v.slice(F.groupOffset,F.groupOffset+F.sizeBytes),B=Rd(_,[F.manifestEntry]);for(const U in B)y[U]=B[U]}),b+=I}),y}}const nF="application/octet-stream",sF="application/json";class Gy{constructor(e,t){if(this.DEFAULT_METHOD="POST",t==null&&(t={}),this.weightPathPrefix=t.weightPathPrefix,this.onProgress=t.onProgress,this.weightUrlConverter=t.weightUrlConverter,t.fetchFunc!=null?(k(typeof t.fetchFunc=="function",()=>"Must pass a function that matches the signature of `fetch` (see https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)"),this.fetch=t.fetchFunc):this.fetch=C().platform.fetch,k(e!=null&&e.length>0,()=>"URL path for http must not be null, undefined or empty."),Array.isArray(e)&&k(e.length===2,()=>`URL paths for http must have a length of 2, (actual length is ${e.length}).`),this.path=e,t.requestInit!=null&&t.requestInit.body!=null)throw new Error("requestInit is expected to have no pre-existing body, but has one.");this.requestInit=t.requestInit||{}}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserHTTPRequest.save() does not support saving model topology in binary formats yet.");const t=Object.assign({method:this.DEFAULT_METHOD},this.requestInit);t.body=new FormData;const n=[{paths:["./model.weights.bin"],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,userDefinedMetadata:e.userDefinedMetadata,weightsManifest:n};t.body.append("model.json",new Blob([JSON.stringify(s)],{type:sF}),"model.json"),e.weightData!=null&&t.body.append("model.weights.bin",new Blob([e.weightData],{type:nF}),"model.weights.bin");const i=await this.fetch(this.path,t);if(i.ok)return{modelArtifactsInfo:Ql(e),responses:[i]};throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ${i.status}.`)}async load(){const e=await this.fetch(this.path,this.requestInit);if(!e.ok)throw new Error(`Request to ${this.path} failed with status code ${e.status}. Please verify this URL points to the model JSON of the model to load.`);let t;try{t=await e.json()}catch(b){let w=`Failed to parse model JSON of response from ${this.path}.`;throw this.path.endsWith(".pb")?w+=" Your path contains a .pb file extension. Support for .pb models have been removed in TensorFlow.js 1.0 in favor of .json models. You can re-convert your Python TensorFlow model using the TensorFlow.js 1.0 conversion scripts or you can convert your.pb models with the 'pb2json'NPM script in the tensorflow/tfjs-converter repository.":w+=" Please make sure the server is serving valid JSON for this request.",new Error(w)}const n=t.modelTopology,s=t.weightsManifest,i=t.generatedBy,o=t.convertedBy,a=t.format,c=t.userDefinedMetadata;if(n==null&&s==null)throw new Error(`The JSON from HTTP path ${this.path} contains neither model topology or manifest for weights.`);let u,p;if(s!=null){const b=await this.loadWeights(s);[u,p]=b}const m={modelTopology:n,weightSpecs:u,weightData:p,userDefinedMetadata:c,generatedBy:i,convertedBy:o,format:a},y=t.modelInitializer;return y&&(m.modelInitializer=y),m}async loadWeights(e){const t=Array.isArray(this.path)?this.path[1]:this.path,[n,s]=iF(t),i=this.weightPathPrefix||n,o=[];for(const p of e)o.push(...p.weights);const a=[],c=[];for(const p of e)for(const m of p.paths)this.weightUrlConverter!=null?c.push(this.weightUrlConverter(m)):a.push(i+m+s);this.weightUrlConverter&&a.push(...await Promise.all(c));const u=await FT(a,{requestInit:this.requestInit,fetchFunc:this.fetch,onProgress:this.onProgress});return[o,Od(u)]}}Gy.URL_SCHEME_REGEX=/^https?:\/\//;function iF(e){const t=e.lastIndexOf("/"),n=e.lastIndexOf("?"),s=e.substring(0,t),i=n>t?e.substring(n):"";return[s+"/",i]}function Vy(e){return e.match(Gy.URL_SCHEME_REGEX)!=null}const $T=(e,t)=>{if(typeof fetch=="undefined"&&(t==null||t.fetchFunc==null))return null;{let n=!0;if(Array.isArray(e)?n=e.every(s=>Vy(s)):n=Vy(e),n)return Hy(e,t)}return null};Xt.registerSaveRouter($T),Xt.registerLoadRouter($T);function Hy(e,t){return new Gy(e,t)}function kd(e,t){return Hy(e,t)}class Yy{constructor(e){this.modelArtifacts=e}async load(){return this.modelArtifacts}}class rF{constructor(e){this.saveHandler=e}async save(e){return this.saveHandler(e)}}function oF(e,t,n,s){if(arguments.length===1){const i=e.modelTopology!=null||e.weightSpecs!=null;return i?new Yy(e):(console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Yy({modelTopology:e}))}else return console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Yy({modelTopology:e,weightSpecs:t,weightData:n,trainingConfig:s})}function aF(e){return new rF(e)}var cF=Object.freeze({__proto__:null,browserFiles:tF,browserHTTPRequest:kd,concatenateArrayBuffers:Od,decodeWeights:Rd,encodeWeights:Wy,fromMemory:oF,getLoadHandlers:By,getModelArtifactsInfoForJSON:Ql,getSaveHandlers:Uy,http:Hy,isHTTPScheme:Vy,loadWeights:_T,registerLoadRouter:Ck,registerSaveRouter:Nk,weightsLoaderFactory:WT,withSaveHandler:aF,copyModel:Pk,listModels:Bk,moveModel:zk,removeModel:Mk});function lF(e,t){const n=W(e,"x","reshape",null),s={x:n},i={shape:t},o=(a,c)=>(t=Id(t,n.size),k(n.size===we(t),()=>"new shape and old shape must have the same number of elements."),c([n]),a.reshape(n,t));return V.runKernelFunc(o,s,null,El,i)}const K=P({reshape_:lF});function hF(e,t,n=!1,s=!1){let i=W(e,"a","matMul"),o=W(t,"b","matMul");[i,o]=Bt(i,o),k(i.rank>=2&&o.rank>=2&&i.rank===o.rank,()=>`Error in matMul: inputs must have the same rank of at least 2, got ranks ${i.rank} and ${o.rank}.`);const a=n?i.shape[i.rank-2]:i.shape[i.rank-1],c=s?o.shape[o.rank-1]:o.shape[o.rank-2],u=n?i.shape[i.rank-1]:i.shape[i.rank-2],p=s?o.shape[o.rank-2]:o.shape[o.rank-1],m=i.shape.slice(0,-2),y=o.shape.slice(0,-2),b=we(m),w=we(y);k(ot(m,y),()=>`Error in matMul: outer dimensions (${m}) and (${y}) of Tensors with shapes ${i.shape} and ${o.shape} must match.`),k(a===c,()=>`Error in matMul: inner shapes (${a}) and (${c}) of Tensors with shapes ${i.shape} and ${o.shape} and transposeA=${n} and transposeB=${s} must match.`);const I=i.shape.slice(0,-2).concat([u,p]),T=n?K(i,[b,a,u]):K(i,[b,u,a]),v=s?K(o,[w,p,c]):K(o,[w,c,p]),N=(_,B)=>(B([T,v]),_.batchMatMul(T,v,n,s)),E={a:T,b:v},D={transposeA:n,transposeB:s},F=V.runKernelFunc(N,E,null,vg,D);return K(F,I)}const at=P({matMul_:hF});function uF(e,t,n=1,s=0){if(t<2)throw new Error(`Error in oneHot: depth must be >=2, but it is ${t}`);const i=W(e,"indices","oneHot","int32"),o=[...i.shape,t],a=(p,m)=>(m([i]),K(p.oneHot(K(i,[i.size]),t,n,s),o)),c={indices:i},u={depth:t,onValue:n,offValue:s};return V.runKernelFunc(a,c,null,Jg,u)}const To=P({oneHot_:uF});function dF(e,t){const n=W(e,"x","transpose");if(t==null&&(t=n.shape.map((o,a)=>a).reverse()),k(n.rank===t.length,()=>`Error in transpose: rank of input ${n.rank} must match length of perm ${t}.`),t.forEach(o=>{k(o>=0&&o`All entries in 'perm' must be between 0 and ${n.rank-1} but got ${t}`)}),n.rank<=1)return n.clone();const s={x:n},i={perm:t};return V.runKernelFunc(o=>o.transpose(n,t),s,null,zl,i)}const Pe=P({transpose_:dF});function pF(e,t,n){const s=W(e,"labels","confusionMatrix"),i=W(t,"predictions","confusionMatrix");k(n==null||n>0&&Number.isInteger(n),()=>`If provided, numClasses must be a positive integer, but got ${n}`),k(s.rank===1,()=>`Expected the rank of labels to be 1, but got ${s.rank}`),k(i.rank===1,()=>`Expected the rank of predictions to be 1, but got ${i.rank}`),k(s.shape[0]===i.shape[0],()=>`Mismatch in the number of examples: ${s.shape[0]} vs. ${i.shape[0]}. Labels and predictions should have the same number of elements.`),k(n>0&&Number.isInteger(n),()=>`numClasses is required to be a positive integer, but got ${n}`);const o=To(ve(s,"int32"),n),a=To(ve(i,"int32"),n),c=Pe(o);return ve(at(c,a),"int32")}const mF=P({confusionMatrix_:pF});var fF=Object.freeze({__proto__:null,confusionMatrix:mF});function UT(e,t,n){if(yo(e),t!=null&&t.length!==3)throw new Error("tensor3d() requires shape to have three numbers");const s=Ni(e,n);if(s.length!==3&&s.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return Fr(e,t,s,n)}let _a;function gF(e,t=3){if(t>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(e==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let n=!1,s=!1,i=!1,o=!1,a=!1;if(e.data instanceof Uint8Array)n=!0;else if(typeof ImageData!="undefined"&&e instanceof ImageData)s=!0;else if(typeof HTMLVideoElement!="undefined"&&e instanceof HTMLVideoElement)i=!0;else if(typeof HTMLImageElement!="undefined"&&e instanceof HTMLImageElement)o=!0;else if(e.getContext!=null)a=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${e.constructor.name}`);if(i){const w=2;if(i&&e.readyState element.")}const c=yy(yd,V.backendName);if(c!=null){const w={pixels:e},I={numChannels:t};return V.runKernel(yd,w,I)}const[u,p]=i?[e.videoWidth,e.videoHeight]:[e.width,e.height];let m;a?m=e.getContext("2d").getImageData(0,0,u,p).data:s||n?m=e.data:(o||i)&&(_a==null&&(_a=document.createElement("canvas").getContext("2d")),_a.canvas.width=u,_a.canvas.height=p,_a.drawImage(e,0,0,u,p),m=_a.getImageData(0,0,u,p).data);let y;if(t===4)y=new Int32Array(m);else{const w=u*p;y=new Int32Array(w*t);for(let I=0;I4||o===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${o}`);if(n.dtype!=="float32"&&n.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${n.dtype}. Please use float32 or int32 tensors.`);const a=await n.data(),c=n.dtype==="float32"?255:1,u=new Uint8ClampedArray(i*s*4);for(let p=0;p1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${w}.`)}else if(n.dtype==="int32"&&(w<0||w>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${w}.`);o===1?(m[0]=w*c,m[1]=w*c,m[2]=w*c):m[b]=w*c}const y=p*4;u[y+0]=Math.round(m[0]),u[y+1]=Math.round(m[1]),u[y+2]=Math.round(m[2]),u[y+3]=Math.round(m[3])}if(t!=null){t.width=i,t.height=s;const p=t.getContext("2d"),m=new ImageData(u,i,s);p.putImageData(m,0,0)}return n!==e&&n.dispose(),u}const BT=P({fromPixels_:gF});var bF=Object.freeze({__proto__:null,toPixels:yF,fromPixels:BT});function Fd(e,t){if(e.rank<1)throw new Error(`tf.gatherND() expects the input to be rank 1 or higher, but the rank was ${e.rank}.`);if(t.rank<1)throw new Error(`tf.gatherND() expects the indices to be rank 1 or higher, but the rank was ${t.rank}.`);if(t.dtype!=="int32")throw new Error(`tf.gatherND() expects the indices to be int32 type, but the dtype was ${t.dtype}.`);if(t.shape[t.rank-1]>e.rank)throw new Error(`index innermost dimension length must be <= tensor rank; saw: ${t.shape[t.rank-1]} vs. ${e.rank}`);if(e.size===0)throw new Error(`Requested more than 0 entries, but input is empty. Input shape: ${e.shape}.`);const n=t.shape,s=n[n.length-1];let i=1;for(let p=0;pp/c),1].slice(0,s);return[a,i,c,u]}var wF=Object.freeze({__proto__:null,prepareAndValidate:Fd});function qy(e,t,n){const s=t.rank>1?t.shape[t.rank-1]:1,i=t.rank>1?t.rank-1:1,o=`Must have updates.shape = indices.shape[:batchDim] + shape[sliceDim:], got updates.shape: ${n.shape}, indices.shape: ${t.shape}, shape: ${e}, sliceDim: ${s}, and batchDim: ${i}.`;if(n.rank1?t.shape[s-1]:1,o=n.length;let a=1;for(let y=i;y`Error in slice${s}D: Length of begin ${t} must match the rank of the array (${s}).`),k(s===n.length,()=>`Error in slice${s}D: Length of size ${n} must match the rank of the array (${s}).`);for(let i=0;i`Error in slice${s}D: begin[${i}] + size[${i}] (${t[i]+n[i]}) would overflow input.shape[${i}] (${e.shape[i]})`)}function _d(e){const t=[];let n=0;for(;e>0;)e&1&&t.push(n),e/=2,n++;return t}function Wd(e,t,n){const s=[];for(let i=0;i0){const w=t[0],I=n+1;m=VT(a,w,I,s,e),y=HT(c,w,I,i,e),b=MT(o,w,I,e)}else for(let w=0;w-1)o[c]=0;else{const u=PT(t,n,c);let p=s[u];e&1<-1)o[c]=Number.MAX_SAFE_INTEGER;else{const u=PT(t,n,c);let p=s[u];e&1<0?a=Number.MIN_SAFE_INTEGER:a=Number.MAX_SAFE_INTEGER);const u=s[i];return a<0&&(a+=u),a=Hl(0,a,u-1),a}function jT(e,t,n,s,i,o){let a=t[i];const c=n[i]||1;(e&1<0?a=Number.MAX_SAFE_INTEGER:a=Number.MIN_SAFE_INTEGER);const u=s[i];return a<0&&(a+=u),c>0?a=Hl(0,a,u):a=Hl(-1,a,u-1),a}function Xy(e,t,n){let s=n.length;for(let i=0;i1){s=i;break}for(let i=s+1;i0||n[i]!==e[i])return!1;return!0}function Jy(e,t){let n=e.length>0?e[e.length-1]:1;for(let s=0;s{k(a!==-1,()=>"slice() does not support negative begin indexing.")});let o;return n==null?o=new Array(i).fill(-1):typeof n=="number"?o=[n,...new Array(i-1).fill(-1)]:n.lengtha>=0?a:(k(a===-1,()=>`Negative size values should be exactly -1 but got ${a} for the slice() size at index ${c}.`),e.shape[c]-s[c])),[s,o]}var KT=Object.freeze({__proto__:null,assertParamsValid:Ky,maskToAxes:_d,computeOutShape:Wd,stridesWithElidedDims:MT,getNormalizedAxes:GT,startIndicesWithElidedDims:VT,stopIndicesWithElidedDims:HT,stridesForAxis:YT,startForAxis:qT,stopForAxis:jT,isSliceContinous:Xy,computeFlatOffset:Jy,parseSliceParams:$d});class Ao{getClassName(){return this.constructor.className}static fromConfig(e,t){return new e(t)}}class Ws{constructor(){this.classNameMap={}}static getMap(){return Ws.instance==null&&(Ws.instance=new Ws),Ws.instance}static register(e){Ws.getMap().classNameMap[e.className]=[e,e.fromConfig]}}function ge(e){k(e.className!=null,()=>"Class being registered does not have the static className property defined."),k(typeof e.className=="string",()=>"className is required to be a string, but got type "+typeof e.className),k(e.className.length>0,()=>"Class being registered has an empty-string as its className, which is disallowed."),Ws.register(e)}var SF=Object.freeze({__proto__:null,Serializable:Ao,SerializationMap:Ws,registerClass:ge});const IF=.001,XT=.1;function xF(e,t,n){return n==null&&(n=Ud()),Zy(e,t,(s,i)=>eb(s,i,n))}function Ud(){return V.backend.floatPrecision()===32?IF:XT}function Zy(e,t,n){let s=!0;if((Ln(e)||Ln(t))&&(s=!1),Ln(e)&&Ln(t)&&(s=!0),s){const a=e.constructor.name,c=t.constructor.name;if(a!==c)throw new Error(`Arrays are of different type. Actual: ${a}. Expected: ${c}`)}if(Array.isArray(e)&&Array.isArray(t)){const a=Ni(e),c=Ni(t);if(!ot(a,c))throw new Error(`Arrays have different shapes. Actual: [${a}]. Expected: [${c}]`)}const i=Ln(e)?e:Ji(e),o=Ln(t)?t:Ji(t);if(i.length!==o.length)throw new Error(`Arrays have different lengths actual: ${i.length} vs expected: ${o.length}. +`;return b[b.length-1]=" "+b[b.length-1]+"]"+(o?"":I),b}function Kl(e){const t=[];for(let n=0;n`Length of values '${s}' does not match the size inferred by the shape '${this.size}'.`)}if(t==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=n||So(t,this.size),this.strides=Ot(e)}set(e,...t){t.length===0&&(t=[0]),k(t.length===this.rank,()=>`The number of provided coordinates (${t.length}) must match the rank (${this.rank})`);const n=this.locToIndex(t);this.values[n]=e}get(...e){e.length===0&&(e=[0]);let t=0;for(const s of e){if(s<0||s>=this.shape[t]){const i=`Requested out of range element at ${e}. Buffer shape=${this.shape}`;throw new Error(i)}t++}let n=e[e.length-1];for(let s=0;sYl(n))}catch(n){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return e}dataSync(){this.throwIfDisposed();const e=vi().readSync(this.dataId);if(this.dtype==="string")try{return e.map(t=>Yl(t))}catch(t){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return e}async bytes(){this.throwIfDisposed();const e=await vi().read(this.dataId);return this.dtype==="string"?e:new Uint8Array(e.buffer)}dispose(){if(this.isDisposed)return;vi().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(e=!1){return ka.print(this,e)}clone(){return this.throwIfDisposed(),ka.clone(this)}toString(e=!1){const t=this.dataSync();return ak(t,this.shape,this.dtype,e)}cast(e){return this.throwIfDisposed(),ka.cast(this,e)}variable(e=!0,t,n){return this.throwIfDisposed(),vi().makeVariable(this,e,t,n)}}Object.defineProperty(Q,Symbol.hasInstance,{value:e=>!!e&&e.data!=null&&e.dataSync!=null&&e.throwIfDisposed!=null});class Xl extends Q{constructor(e,t,n,s){super(e.shape,e.dtype,e.dataId,s);this.trainable=t,this.name=n}assign(e){if(e.dtype!==this.dtype)throw new Error(`dtype of the new value (${e.dtype}) and previous value (${this.dtype}) must match`);if(!ot(e.shape,this.shape))throw new Error(`shape of the new value (${e.shape}) and previous value (${this.shape}) must match`);vi().disposeTensor(this),this.dataId=e.dataId,vi().incRef(this,null)}dispose(){vi().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty(Xl,Symbol.hasInstance,{value:e=>e instanceof Q&&e.assign!=null&&e.assign instanceof Function});(function(e){e.R0="R0",e.R1="R1",e.R2="R2",e.R3="R3",e.R4="R4",e.R5="R5",e.R6="R6"})(r.Rank||(r.Rank={}));var Oy;(function(e){e.float32="float32",e.int32="int32",e.bool="int32",e.complex64="complex64"})(Oy||(Oy={}));var Ey;(function(e){e.float32="float32",e.int32="int32",e.bool="bool",e.complex64="complex64"})(Ey||(Ey={}));var Dy;(function(e){e.float32="float32",e.int32="float32",e.bool="float32",e.complex64="complex64"})(Dy||(Dy={}));var ky;(function(e){e.float32="complex64",e.int32="complex64",e.bool="complex64",e.complex64="complex64"})(ky||(ky={}));const dk={float32:Dy,int32:Oy,bool:Ey,complex64:ky};function Cn(e,t){if(e==="string"||t==="string"){if(e==="string"&&t==="string")return"string";throw new Error(`Can not upcast ${e} with ${t}`)}return dk[e][t]}function vd(e){return Cn(e,"int32")}function Bt(e,t){if(e.dtype===t.dtype)return[e,t];const n=Cn(e.dtype,t.dtype);return[e.cast(n),t.cast(n)]}function fT(e,t){k(e.dtype===t.dtype,()=>`The dtypes of the first(${e.dtype}) and second(${t.dtype}) input must match`)}function Nd(e,t){return t.some(n=>n.id===e.id)}function Zi(e){const t=[],n=new Set;return gT(e,t,n),t}function gT(e,t,n){if(e==null)return;if(e instanceof Q){t.push(e);return}if(!pk(e))return;const s=e;for(const i in s){const o=s[i];n.has(o)||(n.add(o),gT(o,t,n))}}function pk(e){return Array.isArray(e)||typeof e=="object"}var mk=Object.freeze({__proto__:null,makeTypesMatch:Bt,assertTypesMatch:fT,isTensorInList:Nd,getTensorsInContainer:Zi});class yT{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(const e in this.registeredVariables)this.registeredVariables[e].dispose()}}class Jl{constructor(e){this.ENV=e,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new yT}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;const e=this.getSortedBackends();for(let t=0;t{t.setupFunc!=null&&t.setupFunc(this.backendInstance)})}disposeRegisteredKernels(e){const t=wd(e);t.forEach(n=>{n.disposeFunc!=null&&n.disposeFunc(this.registry[e])})}initializeBackend(e){const t=this.registryFactory[e];if(t==null)throw new Error(`Cannot initialize backend ${e}, no registration found.`);try{const n=t.factory();if(n&&!(n instanceof f)&&typeof n.then=="function"){const s=++this.pendingBackendInitId,i=n.then(o=>s(sthis.registryFactory[t].priority-this.registryFactory[e].priority)}initializeBackendsAndReturnBest(){const e=this.getSortedBackends();for(let t=0;tthis.startScope(n),()=>this.endScope(s),()=>(s=t(),s instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),s))}scopedRun(e,t,n){e();try{const s=n();return t(),s}catch(s){throw t(),s}}nextTensorId(){return Jl.nextTensorId++}nextVariableId(){return Jl.nextVariableId++}clone(e){const t=this.makeTensorFromDataId(e.dataId,e.shape,e.dtype),n={x:e},s=o=>({x:()=>{const a="float32",c={x:o},u={dtype:a};return V.runKernelFunc(p=>p.cast(o,a),c,null,ul,u)}}),i=[];return this.addTapeNode(this.state.activeScope.name,n,[t],s,i,{}),t}runKernel(e,t,n,s,i){const o=null,a=null;return this.runKernelFunc(o,t,a,e,n,s,i)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(e,t,n){const s=this.backend.numDataIds();let i=0;n.forEach(c=>{i+=c.dtype==="complex64"?3:1});const o=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],a=s-t-i-o;if(a>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${a} data ids) after running '${e}'`)}runKernelFunc(e,t,n,s,i,o,a){let c,u=[];const p=this.isTapeOn();s==null&&(s=this.state.activeScope!=null?this.state.activeScope.name:"");const m=this.state.numBytes,y=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let b;const w=yy(s,this.backendName);let I;if(w!=null)b=()=>{const v=this.backend.numDataIds();I=w.kernelFunc({inputs:t,attrs:i,backend:this.backend});const N=Array.isArray(I)?I:[I];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(s,v,N);const E=N.map(({dataId:D,shape:F,dtype:_})=>this.makeTensorFromDataId(D,F,_));if(p){let D=this.getTensorsForGradient(s,t,E);if(D==null){a==null&&(a=[]);const F=E.filter((_,B)=>a[B]);D=(o||[]).slice().concat(F)}u=this.saveTensorsForBackwardMode(D)}return E};else{const v=N=>{if(!p)return;u=N.map(E=>this.keep(this.clone(E)))};b=()=>{const N=this.backend.numDataIds();I=this.tidy(()=>e(this.backend,v));const E=Array.isArray(I)?I:[I];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(s,N,E),E}}let T;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?c=b():(T=this.profiler.profileKernel(s,t,()=>b()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(T),c=T.outputs)}),p&&this.addTapeNode(s,t,c,n,u,i),this.state.profiling&&this.state.activeProfile.kernels.push({name:s,bytesAdded:this.state.numBytes-m,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-y,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(t).map(v=>t[v]!=null?t[v].shape:null),outputShapes:c.map(v=>v.shape),kernelTimeMs:T.timeMs,extraInfo:T.extraInfo}),Array.isArray(I)?c:c[0]}saveTensorsForBackwardMode(e){const t=e.map(n=>this.keep(this.clone(n)));return t}getTensorsForGradient(e,t,n){const s=by(e);if(s!=null){const i=s.inputsToSave||[],o=s.outputsToSave||[];let a;s.saveAllInputs?(k(Array.isArray(t),()=>"saveAllInputs is true, expected inputs to be an array."),a=Object.keys(t).map(u=>t[u])):a=i.map(u=>t[u]);const c=n.filter((u,p)=>o[p]);return a.concat(c)}return null}makeTensor(e,t,n,s){if(e==null)throw new Error("Values passed to engine.makeTensor() are null");n=n||"float32",s=s||this.backend;let i=e;n==="string"&&Or(e[0])&&(i=e.map(c=>Cy(c)));const o=s.write(i,t,n),a=new Q(t,n,o,this.nextTensorId());if(this.incRef(a,s),n==="string"){const c=this.state.tensorInfo.get(o),u=aT(i);this.state.numBytes+=u-c.bytes,c.bytes=u}return a}makeTensorFromDataId(e,t,n,s){n=n||"float32";const i=new Q(t,n,e,this.nextTensorId());return this.incRef(i,s),i}makeVariable(e,t=!0,n,s){n=n||this.nextVariableId().toString(),s!=null&&s!==e.dtype&&(e=e.cast(s));const i=new Xl(e,t,n,this.nextTensorId());if(this.state.registeredVariables[i.name]!=null)throw new Error(`Variable with name ${i.name} was already registered`);return this.state.registeredVariables[i.name]=i,this.incRef(i,this.backend),i}incRef(e,t){const n=this.state.tensorInfo.has(e.dataId)?this.state.tensorInfo.get(e.dataId).refCount:0;if(this.state.numTensors++,e.dtype==="string"&&this.state.numStringTensors++,n===0){this.state.numDataBuffers++;let s=0;e.dtype!=="complex64"&&e.dtype!=="string"&&(s=e.size*Ty(e.dtype)),this.state.tensorInfo.set(e.dataId,{backend:t||this.backend,dtype:e.dtype,shape:e.shape,bytes:s,refCount:0}),this.state.numBytes+=s}this.state.tensorInfo.get(e.dataId).refCount++,e instanceof Xl||this.track(e)}disposeTensor(e){if(!this.state.tensorInfo.has(e.dataId))return;this.state.numTensors--,e.dtype==="string"&&this.state.numStringTensors--;const t=this.state.tensorInfo.get(e.dataId),n=t.refCount;n<=1?(e.dtype!=="complex64"&&(this.state.numBytes-=t.bytes),this.state.numDataBuffers--,t.backend.disposeData(e.dataId),this.state.tensorInfo.delete(e.dataId)):this.state.tensorInfo.get(e.dataId).refCount--}disposeVariables(){for(const e in this.state.registeredVariables){const t=this.state.registeredVariables[e];this.disposeVariable(t)}}disposeVariable(e){this.disposeTensor(e),this.state.registeredVariables[e.name]!=null&&delete this.state.registeredVariables[e.name]}memory(){const e=this.backend.memory();return e.numTensors=this.state.numTensors,e.numDataBuffers=this.state.numDataBuffers,e.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(e.unreliable=!0,e.reasons==null&&(e.reasons=[]),e.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),e}async profile(e){this.state.profiling=!0;const t=this.state.numBytes,n=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await e(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(s=>s.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-t,this.state.activeProfile.newTensors=this.state.numTensors-n;for(const s of this.state.activeProfile.kernels)s.kernelTimeMs=await s.kernelTimeMs,s.extraInfo=await s.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(e,t,n,s,i,o){const a={id:this.state.nextTapeNodeId++,kernelName:e,inputs:t,outputs:n,saved:i},c=by(e);c!=null&&(s=c.gradFunc),s!=null&&(a.gradient=u=>(u=u.map((p,m)=>{if(p==null){const y=n[m],b=Ea(y.size,y.dtype);return this.makeTensor(b,y.shape,y.dtype)}return p}),s(u.length>1?u:u[0],i,o))),this.state.activeTape.push(a)}keep(e){return e.kept=!0,e}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(e){const t={track:[],name:"unnamed scope",id:this.state.nextScopeId++};e&&(t.name=e),this.state.scopeStack.push(t),this.state.activeScope=t}endScope(e){const t=Zi(e),n=new Set(t.map(i=>i.id));for(let i=0;i{!i.kept&&i.scopeId===s.id&&this.track(i)})}gradients(e,t,n,s=!1){if(k(t.length>0,()=>"gradients() received an empty list of xs."),n!=null&&n.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${n.dtype}'`);const i=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",e));k(i instanceof Q,()=>"The result y returned by f() must be a tensor.");const o=rk(this.state.activeTape,t,i);if(!s&&o.length===0&&t.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{const a={};a[i.id]=n==null?fk(i.shape):n,ok(a,o,u=>this.tidy(u),gk);const c=t.map(u=>a[u.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(u=>{for(const p of u.saved)p.dispose()}),this.state.activeTape=null),{value:i,grads:c}})}customGrad(e){return k(Er(e),()=>"The f passed in customGrad(f) must be a function."),(...t)=>{k(t.every(i=>i instanceof Q),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let n;const s={};return t.forEach((i,o)=>{s[o]=i}),this.runKernelFunc((i,o)=>(n=e(...t,o),k(n.value instanceof Q,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),k(Er(n.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),n.value),s,(i,o)=>{const a=n.gradFunc(i,o),c=Array.isArray(a)?a:[a];k(c.length===t.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),k(c.every(p=>p instanceof Q),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");const u={};return c.forEach((p,m)=>{u[m]=()=>p}),u})}}readSync(e){const t=this.state.tensorInfo.get(e);return t.backend.readSync(e)}read(e){const t=this.state.tensorInfo.get(e);return t.backend.read(e)}async time(e){const t=qn(),n=await this.backend.time(e);return n.wallMs=qn()-t,n}track(e){return this.state.activeScope!=null&&(e.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(e)),e}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new yT;for(const e in this.registry)this.disposeRegisteredKernels(e),this.registry[e].dispose(),delete this.registry[e];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}Jl.nextTensorId=0,Jl.nextVariableId=0;function fk(e){const t=Ay(we(e),"float32");return V.makeTensor(t,e,"float32")}function bT(){const e=ne();if(e._tfengine==null){const t=new L(e);e._tfengine=new Jl(t)}return $(e._tfengine.ENV),lk(()=>e._tfengine),e._tfengine}const V=bT();function gk(e,t){const n={a:e,b:t};return V.runKernelFunc((s,i)=>{const o=s.add(e,t);return i([e,t]),o},n,null,xe)}function yk(){return typeof navigator!="undefined"&&navigator!=null}function wT(){if(yk()){const e=navigator.userAgent||navigator.vendor||window.opera;return/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i.test(e)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(e.substr(0,4))}return!1}function Fy(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}var bk=Object.freeze({__proto__:null,isMobile:wT,isBrowser:Fy});const Qi=C();Qi.registerFlag("DEBUG",()=>!1,e=>{e&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")}),Qi.registerFlag("IS_BROWSER",()=>Fy()),Qi.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined"),Qi.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)),Qi.registerFlag("PROD",()=>!1),Qi.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>Qi.getBool("DEBUG")),Qi.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0),Qi.registerFlag("IS_TEST",()=>!1);function Ni(e,t){let n=e;if(Ln(e))return t==="string"?[]:[e.length];if(!Array.isArray(e))return[];const s=[];for(;Array.isArray(n)||Ln(n)&&t!=="string";)s.push(n.length),n=n[0];return Array.isArray(e)&&C().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&<(e,s,[]),s}function LT(e,t,n){if(n=n||[],!Array.isArray(e)&&!Ln(e)){k(t.length===0,()=>`Element arr[${n.join("][")}] is a primitive, but should be an array/TypedArray of ${t[0]} elements`);return}k(t.length>0,()=>`Element arr[${n.join("][")}] should be a primitive, but is an array of ${e.length} elements`),k(e.length===t[0],()=>`Element arr[${n.join("][")}] should have ${t[0]} elements, but has ${e.length} elements`);const s=t.slice(1);for(let i=0;i=0&&(i=s),ST(s,i,t,n),e==null||!Ln(e)&&!Array.isArray(e)&&typeof e!="number"&&typeof e!="boolean"&&typeof e!="string"){const u=e==null?"null":e.constructor.name;throw new Error(`Argument '${t}' passed to '${n}' must be a Tensor or TensorLike, but got '${u}'`)}const o=Ni(e,i);!Ln(e)&&!Array.isArray(e)&&(e=[e]);const a=!0,c=i!=="string"?Dr(e,i):Ji(e,[],a);return V.makeTensor(c,o,i)}function Zl(e,t,n,s="numeric"){if(!Array.isArray(e))throw new Error(`Argument ${t} passed to ${n} must be a \`Tensor[]\` or \`TensorLike[]\``);const i=e;return i.map((o,a)=>W(o,`${t}[${a}]`,n),s)}const IT="__op";function P(e){const t=Object.keys(e);if(t.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${t.length} keys.`);let n=t[0];const s=e[n];n.endsWith("_")&&(n=n.substring(0,n.length-1)),n=n+IT;const i=(...o)=>{V.startScope(n);try{const a=s(...o);return a instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),V.endScope(a),a}catch(a){throw V.endScope(null),a}};return Object.defineProperty(i,"name",{value:n,configurable:!0}),i}function wk(e,t){const n=W(e,"real","complex"),s=W(t,"imag","complex");dt(n.shape,s.shape,`real and imag shapes, ${n.shape} and ${s.shape}, must match in call to tf.complex().`);const i=a=>a.complex(n,s),o={real:n,imag:s};return V.runKernelFunc(i,o,null,Rg)}const Ci=P({complex_:wk});function Fr(e,t,n,s){if(s==null&&(s=Oa(e)),s==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!Ln(e)&&!Array.isArray(e)&&typeof e!="number"&&typeof e!="boolean"&&typeof e!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(t!=null){Ny(t);const i=we(t),o=we(n);k(i===o,()=>`Based on the provided shape, [${t}], the tensor should have ${i} values but has ${o}`);for(let a=0;a`Error creating a new Tensor. Inferred shape (${n}) does not match the provided shape (${t}). `)}}return!Ln(e)&&!Array.isArray(e)&&(e=[e]),t=t||n,e=s!=="string"?Dr(e,s):Ji(e,[],!0),V.makeTensor(e,t,s)}function en(e,t,n){const s=Ni(e,n);return Fr(e,t,s,n)}const _y={float32:4,float16:2,int32:4,uint16:2,uint8:1,bool:1,complex64:8};const Cd=4;async function Wy(e,t){const n=[],s=[],i=Array.isArray(e)?e.map(a=>a.name):Object.keys(e);for(let a=0;a{const b=await u.bytes(),w=b.reduce((v,N)=>v+N.length,0)+Cd*b.length,I=new Uint8Array(w);let T=0;for(let v=0;v{if(t+=o.byteLength,n.push(o.byteLength===o.buffer.byteLength?o:new o.constructor(o)),!(o instanceof Float32Array||o instanceof Int32Array||o instanceof Uint8Array))throw new Error(`Unsupported TypedArray subtype: ${o.constructor.name}`)});const s=new Uint8Array(t);let i=0;return n.forEach(o=>{s.set(new Uint8Array(o.buffer),i),i+=o.byteLength}),s.buffer}const $y=typeof Buffer!="undefined"&&(typeof Blob=="undefined"||typeof atob=="undefined"||typeof btoa=="undefined");function xT(e){return $y?Buffer.byteLength(e):new Blob([e]).size}function Sk(e){if($y)return Buffer.from(e).toString("base64");const t=new Uint8Array(e);let n="";for(let s=0,i=t.length;s{t+=i.byteLength});const n=new Uint8Array(t);let s=0;return e.forEach(i=>{n.set(new Uint8Array(i),s),s+=i.byteLength}),n.buffer}function TT(e){const t="/";for(e=e.trim();e.endsWith(t);)e=e.slice(0,e.length-1);const n=e.split(t);return n[n.length-1]}function Ql(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("Expected JSON model topology, received ArrayBuffer.");return{dateSaved:new Date,modelTopologyType:"JSON",modelTopologyBytes:e.modelTopology==null?0:xT(JSON.stringify(e.modelTopology)),weightSpecsBytes:e.weightSpecs==null?0:xT(JSON.stringify(e.weightSpecs)),weightDataBytes:e.weightData==null?0:e.weightData.byteLength}}function xk(){const e=n=>{let s=n<<13,i=0;for(;(s&8388608)===0;)i-=8388608,s<<=1;return s&=~8388608,i+=947912704,s|i},t=new Uint32Array(2048);t[0]=0;for(let n=1;n<1024;n++)t[n]=e(n);for(let n=1024;n<2048;n++)t[n]=939524096+(n-1024<<13);return t}function Tk(){const e=new Uint32Array(64);e[0]=0,e[31]=1199570944,e[32]=2147483648,e[63]=3347054592;for(let t=1;t<31;t++)e[t]=t<<23;for(let t=33;t<63;t++)e[t]=2147483648+(t-32<<23);return e}function Ak(){const e=new Uint32Array(64);for(let t=0;t<64;t++)e[t]=1024;return e[0]=e[32]=0,e}function vk(){const e=xk(),t=Tk(),n=Ak();return s=>{const i=new ArrayBuffer(4*s.length),o=new Uint32Array(i);for(let a=0;a>10]+(c&1023)]+t[c>>10];o[a]=u}return new Float32Array(i)}}class Xt{constructor(){this.saveRouters=[],this.loadRouters=[]}static getInstance(){return Xt.instance==null&&(Xt.instance=new Xt),Xt.instance}static registerSaveRouter(e){Xt.getInstance().saveRouters.push(e)}static registerLoadRouter(e){Xt.getInstance().loadRouters.push(e)}static getSaveHandlers(e){return Xt.getHandlers(e,"save")}static getLoadHandlers(e,t){return Xt.getHandlers(e,"load",t)}static getHandlers(e,t,n){const s=[],i=t==="load"?Xt.getInstance().loadRouters:Xt.getInstance().saveRouters;return i.forEach(o=>{const a=o(e,n);a!==null&&s.push(a)}),s}}const Nk=e=>Xt.registerSaveRouter(e),Ck=e=>Xt.registerLoadRouter(e),Uy=e=>Xt.getSaveHandlers(e),By=(e,t)=>Xt.getLoadHandlers(e,t);const Ed="tensorflowjs",My=1,Io="models_store",_r="model_info_store";async function NQ(){const e=Py();return new Promise((t,n)=>{const s=e.deleteDatabase(Ed);s.onsuccess=()=>t(),s.onerror=i=>n(i)})}function Py(){if(!C().getBool("IS_BROWSER"))throw new Error("Failed to obtain IndexedDB factory because the current environmentis not a web browser.");const e=typeof window=="undefined"?self:window,t=e.indexedDB||e.mozIndexedDB||e.webkitIndexedDB||e.msIndexedDB||e.shimIndexedDB;if(t==null)throw new Error("The current browser does not appear to support IndexedDB.");return t}function zy(e){const t=e.result;t.createObjectStore(Io,{keyPath:"modelPath"}),t.createObjectStore(_r,{keyPath:"modelPath"})}class xo{constructor(e){if(this.indexedDB=Py(),e==null||!e)throw new Error("For IndexedDB, modelPath must not be null, undefined or empty.");this.modelPath=e}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");return this.databaseAction(this.modelPath,e)}async load(){return this.databaseAction(this.modelPath)}databaseAction(e,t){return new Promise((n,s)=>{const i=this.indexedDB.open(Ed,My);i.onupgradeneeded=()=>zy(i),i.onsuccess=()=>{const o=i.result;if(t==null){const a=o.transaction(Io,"readonly"),c=a.objectStore(Io),u=c.get(this.modelPath);u.onsuccess=()=>{if(u.result==null)return o.close(),s(new Error(`Cannot find model with path '${this.modelPath}' in IndexedDB.`));n(u.result.modelArtifacts)},u.onerror=p=>(o.close(),s(u.error)),a.oncomplete=()=>o.close()}else{const a=Ql(t),c=o.transaction(_r,"readwrite");let u=c.objectStore(_r);const p=u.put({modelPath:this.modelPath,modelArtifactsInfo:a});let m;p.onsuccess=()=>{m=o.transaction(Io,"readwrite");const y=m.objectStore(Io),b=y.put({modelPath:this.modelPath,modelArtifacts:t,modelArtifactsInfo:a});b.onsuccess=()=>n({modelArtifactsInfo:a}),b.onerror=w=>{u=c.objectStore(_r);const I=u.delete(this.modelPath);I.onsuccess=()=>(o.close(),s(b.error)),I.onerror=T=>(o.close(),s(b.error))}},p.onerror=y=>(o.close(),s(p.error)),c.oncomplete=()=>{m==null?o.close():m.oncomplete=()=>o.close()}}},i.onerror=o=>s(i.error)})}}xo.URL_SCHEME="indexeddb://";const AT=e=>C().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(xo.URL_SCHEME))?Rk(e.slice(xo.URL_SCHEME.length)):null;Xt.registerSaveRouter(AT),Xt.registerLoadRouter(AT);function Rk(e){return new xo(e)}function Ok(e){return e.startsWith(xo.URL_SCHEME)?e.slice(xo.URL_SCHEME.length):e}class Ek{constructor(){this.indexedDB=Py()}async listModels(){return new Promise((e,t)=>{const n=this.indexedDB.open(Ed,My);n.onupgradeneeded=()=>zy(n),n.onsuccess=()=>{const s=n.result,i=s.transaction(_r,"readonly"),o=i.objectStore(_r),a=o.getAll();a.onsuccess=()=>{const c={};for(const u of a.result)c[u.modelPath]=u.modelArtifactsInfo;e(c)},a.onerror=c=>(s.close(),t(a.error)),i.oncomplete=()=>s.close()},n.onerror=s=>t(n.error)})}async removeModel(e){return e=Ok(e),new Promise((t,n)=>{const s=this.indexedDB.open(Ed,My);s.onupgradeneeded=()=>zy(s),s.onsuccess=()=>{const i=s.result,o=i.transaction(_r,"readwrite"),a=o.objectStore(_r),c=a.get(e);let u;c.onsuccess=()=>{if(c.result==null)return i.close(),n(new Error(`Cannot find model with path '${e}' in IndexedDB.`));{const p=a.delete(e),m=()=>{u=i.transaction(Io,"readwrite");const y=u.objectStore(Io),b=y.delete(e);b.onsuccess=()=>t(c.result.modelArtifactsInfo),b.onerror=w=>n(c.error)};p.onsuccess=m,p.onerror=y=>(m(),i.close(),n(c.error))}},c.onerror=p=>(i.close(),n(c.error)),o.oncomplete=()=>{u==null?i.close():u.oncomplete=()=>i.close()}},s.onerror=i=>n(s.error)})}}const Ri="/",To="tensorflowjs_models",vT="info",Dk="model_topology",kk="weight_specs",Fk="weight_data",_k="model_metadata";function CQ(){if(!C().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("purgeLocalStorageModels() cannot proceed because local storage is unavailable in the current environment.");const e=window.localStorage,t=[];for(let n=0;ni.length){e.removeItem(s);const o=CT(s);t.indexOf(o)===-1&&t.push(o)}}return t}function NT(e){return{info:[To,e,vT].join(Ri),topology:[To,e,Dk].join(Ri),weightSpecs:[To,e,kk].join(Ri),weightData:[To,e,Fk].join(Ri),modelMetadata:[To,e,_k].join(Ri)}}function CT(e){const t=e.split(Ri);if(t.length<3)throw new Error(`Invalid key format: ${e}`);return t.slice(1,t.length-1).join(Ri)}function Wk(e){return e.startsWith(Ao.URL_SCHEME)?e.slice(Ao.URL_SCHEME.length):e}class Ao{constructor(e){if(!C().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("The current environment does not support local storage.");if(this.LS=window.localStorage,e==null||!e)throw new Error("For local storage, modelPath must not be null, undefined or empty.");this.modelPath=e,this.keys=NT(this.modelPath)}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");{const t=JSON.stringify(e.modelTopology),n=JSON.stringify(e.weightSpecs),s=Ql(e);try{return this.LS.setItem(this.keys.info,JSON.stringify(s)),this.LS.setItem(this.keys.topology,t),this.LS.setItem(this.keys.weightSpecs,n),this.LS.setItem(this.keys.weightData,Sk(e.weightData)),this.LS.setItem(this.keys.modelMetadata,JSON.stringify({format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,userDefinedMetadata:e.userDefinedMetadata})),{modelArtifactsInfo:s}}catch(i){throw this.LS.removeItem(this.keys.info),this.LS.removeItem(this.keys.topology),this.LS.removeItem(this.keys.weightSpecs),this.LS.removeItem(this.keys.weightData),this.LS.removeItem(this.keys.modelMetadata),new Error(`Failed to save model '${this.modelPath}' to local storage: size quota being exceeded is a possible cause of this failure: modelTopologyBytes=${s.modelTopologyBytes}, weightSpecsBytes=${s.weightSpecsBytes}, weightDataBytes=${s.weightDataBytes}.`)}}}async load(){const e=JSON.parse(this.LS.getItem(this.keys.info));if(e==null)throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);if(e.modelTopologyType!=="JSON")throw new Error("BrowserLocalStorage does not support loading non-JSON model topology yet.");const t={},n=JSON.parse(this.LS.getItem(this.keys.topology));if(n==null)throw new Error(`In local storage, the topology of model '${this.modelPath}' is missing.`);t.modelTopology=n;const s=JSON.parse(this.LS.getItem(this.keys.weightSpecs));if(s==null)throw new Error(`In local storage, the weight specs of model '${this.modelPath}' are missing.`);t.weightSpecs=s;const i=this.LS.getItem(this.keys.modelMetadata);if(i!=null){const a=JSON.parse(i);t.format=a.format,t.generatedBy=a.generatedBy,t.convertedBy=a.convertedBy,t.userDefinedMetadata=a.userDefinedMetadata}const o=this.LS.getItem(this.keys.weightData);if(o==null)throw new Error(`In local storage, the binary weight values of model '${this.modelPath}' are missing.`);return t.weightData=Ik(o),t}}Ao.URL_SCHEME="localstorage://";const RT=e=>C().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(Ao.URL_SCHEME))?$k(e.slice(Ao.URL_SCHEME.length)):null;Xt.registerSaveRouter(RT),Xt.registerLoadRouter(RT);function $k(e){return new Ao(e)}class Uk{constructor(){k(C().getBool("IS_BROWSER"),()=>"Current environment is not a web browser"),k(typeof window=="undefined"||typeof window.localStorage!="undefined",()=>"Current browser does not appear to support localStorage"),this.LS=window.localStorage}async listModels(){const e={},t=To+Ri,n=Ri+vT;for(let s=0;s"scheme must not be undefined or null."),e.endsWith(Fa)&&(e=e.slice(0,e.indexOf(Fa))),k(e.length>0,()=>"scheme must not be an empty string.");const n=Ss.getInstance();k(n.managers[e]==null,()=>`A model store manager is already registered for scheme '${e}'.`),n.managers[e]=t}static getManager(e){const t=this.getInstance().managers[e];if(t==null)throw new Error(`Cannot find model manager for scheme '${e}'`);return t}static getSchemes(){return Object.keys(this.getInstance().managers)}}function Dd(e){if(e.indexOf(Fa)===-1)throw new Error(`The url string provided does not contain a scheme. Supported schemes are: ${Ss.getSchemes().join(",")}`);return{scheme:e.split(Fa)[0],path:e.split(Fa)[1]}}async function OT(e,t,n=!1){k(e!==t,()=>`Old path and new path are the same: '${e}'`);const s=Xt.getLoadHandlers(e);k(s.length>0,()=>`Copying failed because no load handler is found for source URL ${e}.`),k(s.length<2,()=>`Copying failed because more than one (${s.length}) load handlers for source URL ${e}.`);const i=s[0],o=Xt.getSaveHandlers(t);k(o.length>0,()=>`Copying failed because no save handler is found for destination URL ${t}.`),k(o.length<2,()=>`Copying failed because more than one (${s.length}) save handlers for destination URL ${t}.`);const a=o[0],c=Dd(e).scheme,u=Dd(e).path,p=c===Dd(e).scheme,m=await i.load();n&&p&&await Ss.getManager(c).removeModel(u);const y=await a.save(m);return n&&!p&&await Ss.getManager(c).removeModel(u),y.modelArtifactsInfo}async function Bk(){const e=Ss.getSchemes(),t={};for(const n of e){const s=await Ss.getManager(n).listModels();for(const i in s){const o=n+Fa+i;t[o]=s[i]}}return t}async function Mk(e){const t=Dd(e),n=Ss.getManager(t.scheme);return n.removeModel(t.path)}async function Pk(e,t){const n=!1;return OT(e,t,n)}async function zk(e,t){const n=!0;return OT(e,t,n)}class Gk{fetch(e,t){return fetch(e,t)}now(){return performance.now()}encode(e,t){if(t!=="utf-8"&&t!=="utf8")throw new Error(`Browser's encoder only supports utf-8, but got ${t}`);return this.textEncoder==null&&(this.textEncoder=new TextEncoder),this.textEncoder.encode(e)}decode(e,t){return new TextDecoder(t).decode(e)}}if(C().get("IS_BROWSER")){C().setPlatform("browser",new Gk);try{Ss.registerManager(Ao.URL_SCHEME,new Uk)}catch(e){}try{Ss.registerManager(xo.URL_SCHEME,new Ek)}catch(e){}}const Vk={importFetch:()=>i2()};let _a;function RQ(){_a=null}function OQ(e){_a=e}function EQ(){return _a}class Hk{constructor(){this.util=require("util"),this.textEncoder=new this.util.TextEncoder}fetch(e,t){return C().global.fetch!=null?C().global.fetch(e,t):(_a==null&&(_a=Vk.importFetch()),_a(e,t))}now(){const e=process.hrtime();return e[0]*1e3+e[1]/1e6}encode(e,t){if(t!=="utf-8"&&t!=="utf8")throw new Error(`Node built-in encoder only supports utf-8, but got ${t}`);return this.textEncoder.encode(e)}decode(e,t){return e.length===0?"":new this.util.TextDecoder(t).decode(e)}}C().get("IS_NODE")&&C().setPlatform("node",new Hk);function Qe(e,t="float32",n){return t=t||"float32",Ny(e),new kr(e,t,n)}function Yk(e,t){const n=W(e,"x","cast");if(!oT(t))throw new Error(`Failed to cast to unknown dtype ${t}`);if(t==="string"&&n.dtype!=="string"||t!=="string"&&n.dtype==="string")throw new Error("Only strings can be casted to strings");const s={x:n},i={dtype:t};return V.runKernelFunc(o=>o.cast(n,t),s,null,ul,i)}const ve=P({cast_:Yk});function qk(e){const t=W(e,"x","clone",null),n=()=>V.makeTensorFromDataId(t.dataId,t.shape,t.dtype),s={x:t};return V.runKernelFunc(n,s,null,Sl)}const Wr=P({clone_:qk});function ET(e,t=!1){console.log(e.toString(t))}bT();const jk={buffer:Qe,cast:ve,clone:Wr,print:ET};hk(jk);const Kk="model",Xk=".json",Jk=".weights.bin";function DT(e){return new Promise(t=>setTimeout(t)).then(e)}class Wa{constructor(e){if(!C().getBool("IS_BROWSER"))throw new Error("browserDownloads() cannot proceed because the current environment is not a browser.");e.startsWith(Wa.URL_SCHEME)&&(e=e.slice(Wa.URL_SCHEME.length)),(e==null||e.length===0)&&(e=Kk),this.modelTopologyFileName=e+Xk,this.weightDataFileName=e+Jk}async save(e){if(typeof document=="undefined")throw new Error("Browser downloads are not supported in this environment since `document` is not present");const t=window.URL.createObjectURL(new Blob([e.weightData],{type:"application/octet-stream"}));if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserDownloads.save() does not support saving model topology in binary formats yet.");{const n=[{paths:["./"+this.weightDataFileName],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,weightsManifest:n},i=window.URL.createObjectURL(new Blob([JSON.stringify(s)],{type:"application/json"})),o=this.jsonAnchor==null?document.createElement("a"):this.jsonAnchor;if(o.download=this.modelTopologyFileName,o.href=i,await DT(()=>o.dispatchEvent(new MouseEvent("click"))),e.weightData!=null){const a=this.weightDataAnchor==null?document.createElement("a"):this.weightDataAnchor;a.download=this.weightDataFileName,a.href=t,await DT(()=>a.dispatchEvent(new MouseEvent("click")))}return{modelArtifactsInfo:Ql(e)}}}}Wa.URL_SCHEME="downloads://";class Zk{constructor(e){if(e==null||e.length<1)throw new Error(`When calling browserFiles, at least 1 file is required, but received ${e}`);this.files=e}async load(){const e=this.files[0],t=this.files.slice(1);return new Promise((n,s)=>{const i=new FileReader;i.onload=o=>{const a=JSON.parse(o.target.result),c=a.modelTopology;if(c==null){s(new Error(`modelTopology field is missing from file ${e.name}`));return}t.length===0&&n({modelTopology:c});const u=a.weightsManifest;if(u==null){s(new Error(`weightManifest field is missing from file ${e.name}`));return}let p;try{p=this.checkManifestAndWeightFiles(u,t)}catch(w){s(w);return}const m=[],y=[],b=[];u.forEach(w=>{w.paths.forEach(I=>{y.push(I),b.push(null)}),m.push(...w.weights)}),u.forEach(w=>{w.paths.forEach(I=>{const T=new FileReader;T.onload=v=>{const N=v.target.result,E=y.indexOf(I);b[E]=N,b.indexOf(null)===-1&&n({modelTopology:c,weightSpecs:m,weightData:Od(b),format:a.format,generatedBy:a.generatedBy,convertedBy:a.convertedBy,userDefinedMetadata:a.userDefinedMetadata})},T.onerror=v=>s(`Failed to weights data from file of path '${I}'.`),T.readAsArrayBuffer(p[I])})})},i.onerror=o=>s(`Failed to read model topology and weights manifest JSON from file '${e.name}'. BrowserFiles supports loading Keras-style tf.Model artifacts only.`),i.readAsText(e)})}checkManifestAndWeightFiles(e,t){const n=[],s=t.map(o=>TT(o.name)),i={};for(const o of e)o.paths.forEach(a=>{const c=TT(a);if(n.indexOf(c)!==-1)throw new Error(`Duplicate file basename found in weights manifest: '${c}'`);if(n.push(c),s.indexOf(c)===-1)throw new Error(`Weight file with basename '${c}' is not provided.`);i[a]=t[s.indexOf(c)]});if(n.length!==t.length)throw new Error(`Mismatch in the number of files in weights manifest (${n.length}) and the number of weight files provided (${t.length}).`);return i}}const Qk=e=>C().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(Wa.URL_SCHEME))?eF(e.slice(Wa.URL_SCHEME.length)):null;Xt.registerSaveRouter(Qk);function eF(e="model"){return new Wa(e)}function tF(e){return new Zk(e)}function kT(e,t,n,s){a(e),n=n==null?0:n,s=s==null?1:s,c(n,s);let i=0;const o=u=>(u.then(p=>{const m=n+ ++i/e.length*(s-n);return t(m),p}),u);function a(u){k(u!=null&&Array.isArray(u)&&u.length>0,()=>"promises must be a none empty array")}function c(u,p){k(u>=0&&u<=1,()=>`Progress fraction must be in range [0, 1], but got startFraction ${u}`),k(p>=0&&p<=1,()=>`Progress fraction must be in range [0, 1], but got endFraction ${p}`),k(p>=u,()=>`startFraction must be no more than endFraction, but got startFraction ${u} and endFraction ${p}`)}return Promise.all(e.map(o))}async function FT(e,t){t==null&&(t={});const n=t.fetchFunc==null?C().platform.fetch:t.fetchFunc,s=e.map(y=>n(y,t.requestInit,{isBinary:!0})),i=0,o=.5,a=t.onProgress==null?await Promise.all(s):await kT(s,t.onProgress,i,o),c=a.map(y=>y.arrayBuffer()),u=.5,p=1,m=t.onProgress==null?await Promise.all(c):await kT(c,t.onProgress,u,p);return m}async function _T(e,t="",n,s){const i=a=>FT(a,{requestInit:s}),o=WT(i);return o(e,t,n)}function WT(e){return async(t,n="",s)=>{const i=t.map(()=>!1),o={},a=s!=null?s.map(()=>!1):[],c=[];if(t.forEach((w,I)=>{let T=0;w.weights.forEach(v=>{const N="quantization"in v?v.quantization.dtype:v.dtype,E=_y[N]*we(v.shape),D=()=>{i[I]=!0,o[I]==null&&(o[I]=[]),o[I].push({manifestEntry:v,groupOffset:T,sizeBytes:E})};s!=null?s.forEach((F,_)=>{F===v.name&&(D(),a[_]=!0)}):D(),c.push(v.name),T+=E})}),!a.every(w=>w)){const w=s.filter((I,T)=>!a[T]);throw new Error(`Could not find weights in manifest with names: ${w.join(", ")}. +Manifest JSON has weights with names: ${c.join(", ")}.`)}const u=i.reduce((w,I,T)=>(I&&w.push(T),w),[]),p=[];u.forEach(w=>{t[w].paths.forEach(I=>{const T=n+(n.endsWith("/")?"":"/")+I;p.push(T)})});const m=await e(p),y={};let b=0;return u.forEach(w=>{const I=t[w].paths.length;let T=0;for(let F=0;F{const _=v.slice(F.groupOffset,F.groupOffset+F.sizeBytes),B=Rd(_,[F.manifestEntry]);for(const U in B)y[U]=B[U]}),b+=I}),y}}const nF="application/octet-stream",sF="application/json";class Gy{constructor(e,t){if(this.DEFAULT_METHOD="POST",t==null&&(t={}),this.weightPathPrefix=t.weightPathPrefix,this.onProgress=t.onProgress,this.weightUrlConverter=t.weightUrlConverter,t.fetchFunc!=null?(k(typeof t.fetchFunc=="function",()=>"Must pass a function that matches the signature of `fetch` (see https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)"),this.fetch=t.fetchFunc):this.fetch=C().platform.fetch,k(e!=null&&e.length>0,()=>"URL path for http must not be null, undefined or empty."),Array.isArray(e)&&k(e.length===2,()=>`URL paths for http must have a length of 2, (actual length is ${e.length}).`),this.path=e,t.requestInit!=null&&t.requestInit.body!=null)throw new Error("requestInit is expected to have no pre-existing body, but has one.");this.requestInit=t.requestInit||{}}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserHTTPRequest.save() does not support saving model topology in binary formats yet.");const t=Object.assign({method:this.DEFAULT_METHOD},this.requestInit);t.body=new FormData;const n=[{paths:["./model.weights.bin"],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,userDefinedMetadata:e.userDefinedMetadata,weightsManifest:n};t.body.append("model.json",new Blob([JSON.stringify(s)],{type:sF}),"model.json"),e.weightData!=null&&t.body.append("model.weights.bin",new Blob([e.weightData],{type:nF}),"model.weights.bin");const i=await this.fetch(this.path,t);if(i.ok)return{modelArtifactsInfo:Ql(e),responses:[i]};throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ${i.status}.`)}async load(){const e=await this.fetch(this.path,this.requestInit);if(!e.ok)throw new Error(`Request to ${this.path} failed with status code ${e.status}. Please verify this URL points to the model JSON of the model to load.`);let t;try{t=await e.json()}catch(b){let w=`Failed to parse model JSON of response from ${this.path}.`;throw this.path.endsWith(".pb")?w+=" Your path contains a .pb file extension. Support for .pb models have been removed in TensorFlow.js 1.0 in favor of .json models. You can re-convert your Python TensorFlow model using the TensorFlow.js 1.0 conversion scripts or you can convert your.pb models with the 'pb2json'NPM script in the tensorflow/tfjs-converter repository.":w+=" Please make sure the server is serving valid JSON for this request.",new Error(w)}const n=t.modelTopology,s=t.weightsManifest,i=t.generatedBy,o=t.convertedBy,a=t.format,c=t.userDefinedMetadata;if(n==null&&s==null)throw new Error(`The JSON from HTTP path ${this.path} contains neither model topology or manifest for weights.`);let u,p;if(s!=null){const b=await this.loadWeights(s);[u,p]=b}const m={modelTopology:n,weightSpecs:u,weightData:p,userDefinedMetadata:c,generatedBy:i,convertedBy:o,format:a},y=t.modelInitializer;return y&&(m.modelInitializer=y),m}async loadWeights(e){const t=Array.isArray(this.path)?this.path[1]:this.path,[n,s]=iF(t),i=this.weightPathPrefix||n,o=[];for(const p of e)o.push(...p.weights);const a=[],c=[];for(const p of e)for(const m of p.paths)this.weightUrlConverter!=null?c.push(this.weightUrlConverter(m)):a.push(i+m+s);this.weightUrlConverter&&a.push(...await Promise.all(c));const u=await FT(a,{requestInit:this.requestInit,fetchFunc:this.fetch,onProgress:this.onProgress});return[o,Od(u)]}}Gy.URL_SCHEME_REGEX=/^https?:\/\//;function iF(e){const t=e.lastIndexOf("/"),n=e.lastIndexOf("?"),s=e.substring(0,t),i=n>t?e.substring(n):"";return[s+"/",i]}function Vy(e){return e.match(Gy.URL_SCHEME_REGEX)!=null}const $T=(e,t)=>{if(typeof fetch=="undefined"&&(t==null||t.fetchFunc==null))return null;{let n=!0;if(Array.isArray(e)?n=e.every(s=>Vy(s)):n=Vy(e),n)return Hy(e,t)}return null};Xt.registerSaveRouter($T),Xt.registerLoadRouter($T);function Hy(e,t){return new Gy(e,t)}function kd(e,t){return Hy(e,t)}class Yy{constructor(e){this.modelArtifacts=e}async load(){return this.modelArtifacts}}class rF{constructor(e){this.saveHandler=e}async save(e){return this.saveHandler(e)}}function oF(e,t,n,s){if(arguments.length===1){const i=e.modelTopology!=null||e.weightSpecs!=null;return i?new Yy(e):(console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Yy({modelTopology:e}))}else return console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Yy({modelTopology:e,weightSpecs:t,weightData:n,trainingConfig:s})}function aF(e){return new rF(e)}var cF=Object.freeze({__proto__:null,browserFiles:tF,browserHTTPRequest:kd,concatenateArrayBuffers:Od,decodeWeights:Rd,encodeWeights:Wy,fromMemory:oF,getLoadHandlers:By,getModelArtifactsInfoForJSON:Ql,getSaveHandlers:Uy,http:Hy,isHTTPScheme:Vy,loadWeights:_T,registerLoadRouter:Ck,registerSaveRouter:Nk,weightsLoaderFactory:WT,withSaveHandler:aF,copyModel:Pk,listModels:Bk,moveModel:zk,removeModel:Mk});function lF(e,t){const n=W(e,"x","reshape",null),s={x:n},i={shape:t},o=(a,c)=>(t=Id(t,n.size),k(n.size===we(t),()=>"new shape and old shape must have the same number of elements."),c([n]),a.reshape(n,t));return V.runKernelFunc(o,s,null,El,i)}const K=P({reshape_:lF});function hF(e,t,n=!1,s=!1){let i=W(e,"a","matMul"),o=W(t,"b","matMul");[i,o]=Bt(i,o),k(i.rank>=2&&o.rank>=2&&i.rank===o.rank,()=>`Error in matMul: inputs must have the same rank of at least 2, got ranks ${i.rank} and ${o.rank}.`);const a=n?i.shape[i.rank-2]:i.shape[i.rank-1],c=s?o.shape[o.rank-1]:o.shape[o.rank-2],u=n?i.shape[i.rank-1]:i.shape[i.rank-2],p=s?o.shape[o.rank-2]:o.shape[o.rank-1],m=i.shape.slice(0,-2),y=o.shape.slice(0,-2),b=we(m),w=we(y);k(ot(m,y),()=>`Error in matMul: outer dimensions (${m}) and (${y}) of Tensors with shapes ${i.shape} and ${o.shape} must match.`),k(a===c,()=>`Error in matMul: inner shapes (${a}) and (${c}) of Tensors with shapes ${i.shape} and ${o.shape} and transposeA=${n} and transposeB=${s} must match.`);const I=i.shape.slice(0,-2).concat([u,p]),T=n?K(i,[b,a,u]):K(i,[b,u,a]),v=s?K(o,[w,p,c]):K(o,[w,c,p]),N=(_,B)=>(B([T,v]),_.batchMatMul(T,v,n,s)),E={a:T,b:v},D={transposeA:n,transposeB:s},F=V.runKernelFunc(N,E,null,vg,D);return K(F,I)}const at=P({matMul_:hF});function uF(e,t,n=1,s=0){if(t<2)throw new Error(`Error in oneHot: depth must be >=2, but it is ${t}`);const i=W(e,"indices","oneHot","int32"),o=[...i.shape,t],a=(p,m)=>(m([i]),K(p.oneHot(K(i,[i.size]),t,n,s),o)),c={indices:i},u={depth:t,onValue:n,offValue:s};return V.runKernelFunc(a,c,null,Jg,u)}const vo=P({oneHot_:uF});function dF(e,t){const n=W(e,"x","transpose");if(t==null&&(t=n.shape.map((o,a)=>a).reverse()),k(n.rank===t.length,()=>`Error in transpose: rank of input ${n.rank} must match length of perm ${t}.`),t.forEach(o=>{k(o>=0&&o`All entries in 'perm' must be between 0 and ${n.rank-1} but got ${t}`)}),n.rank<=1)return n.clone();const s={x:n},i={perm:t};return V.runKernelFunc(o=>o.transpose(n,t),s,null,zl,i)}const Pe=P({transpose_:dF});function pF(e,t,n){const s=W(e,"labels","confusionMatrix"),i=W(t,"predictions","confusionMatrix");k(n==null||n>0&&Number.isInteger(n),()=>`If provided, numClasses must be a positive integer, but got ${n}`),k(s.rank===1,()=>`Expected the rank of labels to be 1, but got ${s.rank}`),k(i.rank===1,()=>`Expected the rank of predictions to be 1, but got ${i.rank}`),k(s.shape[0]===i.shape[0],()=>`Mismatch in the number of examples: ${s.shape[0]} vs. ${i.shape[0]}. Labels and predictions should have the same number of elements.`),k(n>0&&Number.isInteger(n),()=>`numClasses is required to be a positive integer, but got ${n}`);const o=vo(ve(s,"int32"),n),a=vo(ve(i,"int32"),n),c=Pe(o);return ve(at(c,a),"int32")}const mF=P({confusionMatrix_:pF});var fF=Object.freeze({__proto__:null,confusionMatrix:mF});function UT(e,t,n){if(wo(e),t!=null&&t.length!==3)throw new Error("tensor3d() requires shape to have three numbers");const s=Ni(e,n);if(s.length!==3&&s.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return Fr(e,t,s,n)}let $a;function gF(e,t=3){if(t>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(e==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let n=!1,s=!1,i=!1,o=!1,a=!1;if(e.data instanceof Uint8Array)n=!0;else if(typeof ImageData!="undefined"&&e instanceof ImageData)s=!0;else if(typeof HTMLVideoElement!="undefined"&&e instanceof HTMLVideoElement)i=!0;else if(typeof HTMLImageElement!="undefined"&&e instanceof HTMLImageElement)o=!0;else if(e.getContext!=null)a=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${e.constructor.name}`);if(i){const w=2;if(i&&e.readyState element.")}const c=yy(yd,V.backendName);if(c!=null){const w={pixels:e},I={numChannels:t};return V.runKernel(yd,w,I)}const[u,p]=i?[e.videoWidth,e.videoHeight]:[e.width,e.height];let m;a?m=e.getContext("2d").getImageData(0,0,u,p).data:s||n?m=e.data:(o||i)&&($a==null&&($a=document.createElement("canvas").getContext("2d")),$a.canvas.width=u,$a.canvas.height=p,$a.drawImage(e,0,0,u,p),m=$a.getImageData(0,0,u,p).data);let y;if(t===4)y=new Int32Array(m);else{const w=u*p;y=new Int32Array(w*t);for(let I=0;I4||o===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${o}`);if(n.dtype!=="float32"&&n.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${n.dtype}. Please use float32 or int32 tensors.`);const a=await n.data(),c=n.dtype==="float32"?255:1,u=new Uint8ClampedArray(i*s*4);for(let p=0;p1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${w}.`)}else if(n.dtype==="int32"&&(w<0||w>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${w}.`);o===1?(m[0]=w*c,m[1]=w*c,m[2]=w*c):m[b]=w*c}const y=p*4;u[y+0]=Math.round(m[0]),u[y+1]=Math.round(m[1]),u[y+2]=Math.round(m[2]),u[y+3]=Math.round(m[3])}if(t!=null){t.width=i,t.height=s;const p=t.getContext("2d"),m=new ImageData(u,i,s);p.putImageData(m,0,0)}return n!==e&&n.dispose(),u}const BT=P({fromPixels_:gF});var bF=Object.freeze({__proto__:null,toPixels:yF,fromPixels:BT});function Fd(e,t){if(e.rank<1)throw new Error(`tf.gatherND() expects the input to be rank 1 or higher, but the rank was ${e.rank}.`);if(t.rank<1)throw new Error(`tf.gatherND() expects the indices to be rank 1 or higher, but the rank was ${t.rank}.`);if(t.dtype!=="int32")throw new Error(`tf.gatherND() expects the indices to be int32 type, but the dtype was ${t.dtype}.`);if(t.shape[t.rank-1]>e.rank)throw new Error(`index innermost dimension length must be <= tensor rank; saw: ${t.shape[t.rank-1]} vs. ${e.rank}`);if(e.size===0)throw new Error(`Requested more than 0 entries, but input is empty. Input shape: ${e.shape}.`);const n=t.shape,s=n[n.length-1];let i=1;for(let p=0;pp/c),1].slice(0,s);return[a,i,c,u]}var wF=Object.freeze({__proto__:null,prepareAndValidate:Fd});function qy(e,t,n){const s=t.rank>1?t.shape[t.rank-1]:1,i=t.rank>1?t.rank-1:1,o=`Must have updates.shape = indices.shape[:batchDim] + shape[sliceDim:], got updates.shape: ${n.shape}, indices.shape: ${t.shape}, shape: ${e}, sliceDim: ${s}, and batchDim: ${i}.`;if(n.rank1?t.shape[s-1]:1,o=n.length;let a=1;for(let y=i;y`Error in slice${s}D: Length of begin ${t} must match the rank of the array (${s}).`),k(s===n.length,()=>`Error in slice${s}D: Length of size ${n} must match the rank of the array (${s}).`);for(let i=0;i`Error in slice${s}D: begin[${i}] + size[${i}] (${t[i]+n[i]}) would overflow input.shape[${i}] (${e.shape[i]})`)}function _d(e){const t=[];let n=0;for(;e>0;)e&1&&t.push(n),e/=2,n++;return t}function Wd(e,t,n){const s=[];for(let i=0;i0){const w=t[0],I=n+1;m=VT(a,w,I,s,e),y=HT(c,w,I,i,e),b=MT(o,w,I,e)}else for(let w=0;w-1)o[c]=0;else{const u=PT(t,n,c);let p=s[u];e&1<-1)o[c]=Number.MAX_SAFE_INTEGER;else{const u=PT(t,n,c);let p=s[u];e&1<0?a=Number.MIN_SAFE_INTEGER:a=Number.MAX_SAFE_INTEGER);const u=s[i];return a<0&&(a+=u),a=Hl(0,a,u-1),a}function jT(e,t,n,s,i,o){let a=t[i];const c=n[i]||1;(e&1<0?a=Number.MAX_SAFE_INTEGER:a=Number.MIN_SAFE_INTEGER);const u=s[i];return a<0&&(a+=u),c>0?a=Hl(0,a,u):a=Hl(-1,a,u-1),a}function Xy(e,t,n){let s=n.length;for(let i=0;i1){s=i;break}for(let i=s+1;i0||n[i]!==e[i])return!1;return!0}function Jy(e,t){let n=e.length>0?e[e.length-1]:1;for(let s=0;s{k(a!==-1,()=>"slice() does not support negative begin indexing.")});let o;return n==null?o=new Array(i).fill(-1):typeof n=="number"?o=[n,...new Array(i-1).fill(-1)]:n.lengtha>=0?a:(k(a===-1,()=>`Negative size values should be exactly -1 but got ${a} for the slice() size at index ${c}.`),e.shape[c]-s[c])),[s,o]}var KT=Object.freeze({__proto__:null,assertParamsValid:Ky,maskToAxes:_d,computeOutShape:Wd,stridesWithElidedDims:MT,getNormalizedAxes:GT,startIndicesWithElidedDims:VT,stopIndicesWithElidedDims:HT,stridesForAxis:YT,startForAxis:qT,stopForAxis:jT,isSliceContinous:Xy,computeFlatOffset:Jy,parseSliceParams:$d});class No{getClassName(){return this.constructor.className}static fromConfig(e,t){return new e(t)}}class Ws{constructor(){this.classNameMap={}}static getMap(){return Ws.instance==null&&(Ws.instance=new Ws),Ws.instance}static register(e){Ws.getMap().classNameMap[e.className]=[e,e.fromConfig]}}function ge(e){k(e.className!=null,()=>"Class being registered does not have the static className property defined."),k(typeof e.className=="string",()=>"className is required to be a string, but got type "+typeof e.className),k(e.className.length>0,()=>"Class being registered has an empty-string as its className, which is disallowed."),Ws.register(e)}var SF=Object.freeze({__proto__:null,Serializable:No,SerializationMap:Ws,registerClass:ge});const IF=.001,XT=.1;function xF(e,t,n){return n==null&&(n=Ud()),Zy(e,t,(s,i)=>eb(s,i,n))}function Ud(){return V.backend.floatPrecision()===32?IF:XT}function Zy(e,t,n){let s=!0;if((Ln(e)||Ln(t))&&(s=!1),Ln(e)&&Ln(t)&&(s=!0),s){const a=e.constructor.name,c=t.constructor.name;if(a!==c)throw new Error(`Arrays are of different type. Actual: ${a}. Expected: ${c}`)}if(Array.isArray(e)&&Array.isArray(t)){const a=Ni(e),c=Ni(t);if(!ot(a,c))throw new Error(`Arrays have different shapes. Actual: [${a}]. Expected: [${c}]`)}const i=Ln(e)?e:Ji(e),o=Ln(t)?t:Ji(t);if(i.length!==o.length)throw new Error(`Arrays have different lengths actual: ${i.length} vs expected: ${o.length}. Actual: ${i}. Expected: ${o}.`);for(let a=0;at.fail(),()=>t())}function AF(e,t){const n=typeof t=="string"||typeof t=="number"||typeof t=="boolean"?[t]:t;return Or(e)||Or(e[0])||Or(t)||Or(t[0])?Zy(e,n,(s,i)=>s==i):Zy(e,t,(s,i)=>eb(s,i,0))}function Qy(e,t,n){if(n==null&&(n=Ud()),!eb(e,t,n))throw new Error(`Numbers differ: actual === ${e}, expected === ${t}`)}function eb(e,t,n){return!isFinite(e)&&!isFinite(t)?!0:!(isNaN(e)||isNaN(t)||Math.abs(e-t)>n)}function vF(e,t,n){for(let s=0;sn)throw new Error(`Value out of range:${e[s]} low: ${t}, high: ${n}`)}function NF(e,t){expect(new Float32Array(e)).toEqual(new Float32Array(t))}var CF=Object.freeze({__proto__:null,TEST_EPSILON_FLOAT16:XT,expectArraysClose:xF,testEpsilon:Ud,expectPromiseToFail:TF,expectArraysEqual:AF,expectNumbersClose:Qy,expectValuesInRange:vF,expectArrayBuffersEqual:NF});const JT="2.6.0";function RF(){C().set("PROD",!0)}function OF(){C().set("DEBUG",!0)}function EF(){C().set("DEPRECATION_WARNINGS_ENABLED",!1),console.warn("TensorFlow.js deprecation warnings have been disabled.")}function nn(e){C().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(e+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}uk(nn);function DF(){V.disposeVariables()}function $s(){return V}function Bd(){return V.memory()}function kF(e){return V.profile(e)}function ee(e,t){return V.tidy(e,t)}function qe(e){const t=Zi(e);t.forEach(n=>n.dispose())}function Rn(e){return V.keep(e)}function FF(e){return V.time(e)}function ZT(e){return V.setBackend(e)}function _F(){return V.ready()}function WF(){return V.backendName}function $F(e){V.removeBackend(e)}function UF(e){return V.findBackend(e)}function BF(e){return V.findBackendFactory(e)}function tb(e,t,n=1){return V.registerBackend(e,t,n)}function QT(){return V.backend}function MF(e,t){C().setPlatform(e,t)}function PF(e,t){let n=W(e,"a","add"),s=W(t,"b","add");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.add(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,xe)}const be=P({add_:PF});function zF(e,t){let n=W(e,"a","floorDiv"),s=W(t,"b","floorDiv");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.floorDiv(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Wg)}const Md=P({floorDiv_:zF});function GF(e,t){let n=W(e,"a","div"),s=W(t,"b","div");if([n,s]=Bt(n,s),n.dtype==="int32"&&s.dtype==="int32")return Md(n,s);const i=(c,u)=>{const p=c.realDivide(n,s);return u([n,s]),p},o={a:n,b:s},a={};return V.runKernelFunc(i,o,null,xa,a)}const _e=P({div_:GF});function VF(e,t){let n=W(e,"a","mul"),s=W(t,"b","mul");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.multiply(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Rl)}const X=P({mul_:VF});function HF(e){const t=W(e,"x","abs"),n={x:t};return V.runKernelFunc((s,i)=>(i([t]),t.dtype==="complex64"?s.complexAbs(t):s.abs(t)),n,null,fe)}const sn=P({abs_:HF});function YF(e){const t=W(e,"x","acos"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.acos(t);return i([t]),o},n,null,de)}const nb=P({acos_:YF});function qF(e){const t=W(e,"x","acosh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.acosh(t);return i([t]),o},n,null,Ae)}const sb=P({acosh_:qF});function jF(e){k(Array.isArray(e),()=>"The argument passed to tf.addN() must be a list of tensors"),k(e.length>=1,()=>`Must pass at least one tensor to tf.addN(), but got ${e.length}`);const t=e.map((o,a)=>W(o,`tensors${a}`,"addN")),n=t[0];t.forEach(o=>{if(o.dtype!==n.dtype)throw new Error("All tensors passed to tf.addN() must have the same dtype")}),t.forEach(o=>{if(!ot(o.shape,n.shape))throw new Error("All tensors passed to tf.addN() must have the same shape")});const s=(o,a)=>{const c=o.addN(t);return a(t),c},i=t;return V.runKernelFunc(s,i,null,Me)}const eA=P({addN_:jF});function ib(e,t){for(let n=0;ne[o]);return[n,i]}function En(e,t){const n=t.map(s=>1);return tA(e,n,t)}function ss(e,t,n){k(ib(t,n),()=>`${e} supports only inner-most axes for now. Got axes ${t} and rank-${n} input.`)}function _n(e,t){if(ib(e,t))return null;const n=[];for(let s=0;sn.push(s)),n}function eh(e){return e.map((t,n)=>[n,t]).sort((t,n)=>t[1]-n[1]).map(t=>t[0])}function Is(e,t){const n=[];for(let s=t-e;s{const u=ft(t,s.shape);let p=u;const m=_n(p,s.rank);m!=null&&(s=Pe(s,m),p=Is(p.length,s.rank));const y=c.all(s,p);if(n){const b=En(y.shape,u);return K(y,b)}return y},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,Ke,a)}const Pd=P({all_:KF});function XF(e,t=null,n=!1){let s=W(e,"x","any","bool");const i=c=>{const u=ft(t,s.shape);let p=u;const m=_n(p,s.rank);m!=null&&(s=Pe(s,m),p=Is(p.length,s.rank));const y=c.any(s,p);if(n){const b=En(y.shape,u);return K(y,b)}return y},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,wt,a)}const th=P({any_:XF});function JF(e,t=0){let n=W(e,"x","argMax");const s=(a,c)=>{c([n]);let u=ft(t,n.shape);const p=_n(u,n.rank);return p!=null&&(n=Pe(n,p),u=Is(u.length,n.rank)),a.argMax(n,u[0])},i={x:n},o={axis:t};return V.runKernelFunc(s,i,null,$t,o)}const nh=P({argMax_:JF});function ZF(e,t=0){let n=W(e,"x","argMin");const s=(a,c)=>{c([n]),t==null&&(t=0);let u=ft(t,n.shape);const p=_n(u,n.rank);return p!=null&&(n=Pe(n,p),u=Is(u.length,n.rank)),a.argMin(n,u[0])},i={x:n},o={axis:t};return V.runKernelFunc(s,i,null,Kt,o)}const rb=P({argMin_:ZF});function QF(e){const t=W(e,"x","asin"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.asin(t);return i([t]),o},n,null,Fn)}const ob=P({asin_:QF});function e_(e){const t=W(e,"x","asinh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.asinh(t);return i([t]),o},n,null,vn)}const ab=P({asinh_:e_});function t_(e){const t=W(e,"x","atan"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.atan(t);return i([t]),o},n,null,Nn)}const cb=P({atan_:t_});function n_(e,t){let n=W(e,"a","atan2"),s=W(t,"b","atan2");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.atan2(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Ai)}const lb=P({atan2_:n_});function s_(e){const t=W(e,"x","atanh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.atanh(t);return i([t]),o},n,null,Qs)}const hb=P({atanh_:s_});function zd(e,t,n,s,i="NHWC",o){const a=e[3],c=[...t,a],u=rh(i);return Oi(e,c,n,o,s,null,null,u)}function Wn(e,t,n,s,i,o,a="channelsLast"){const[c,u]=Gd(t);let p;if(a==="channelsLast")p=[c,u,e[3],e[3]];else if(a==="channelsFirst")p=[c,u,e[1],e[1]];else throw new Error(`Unknown dataFormat ${a}`);return Oi(e,p,n,s,i,o,!1,a)}function sh(e,t,n,s,i,o,a="NDHWC"){const[c,u,p]=db(t);let m,y;if(a==="NDHWC")y="channelsLast",m=[c,u,p,e[4],e[4]];else if(a==="NCDHW")y="channelsFirst",m=[c,u,p,e[1],e[1]];else throw new Error(`Unknown dataFormat ${a}`);return ih(e,m,n,s,i,!1,y,o)}function Oi(e,t,n,s,i,o,a=!1,c="channelsLast"){let[u,p,m,y]=[-1,-1,-1,-1];if(c==="channelsLast")[u,p,m,y]=e;else if(c==="channelsFirst")[u,y,p,m]=e;else throw new Error(`Unknown dataFormat ${c}`);const[b,w,,I]=t,[T,v]=Gd(n),[N,E]=Gd(s),D=$a(b,N),F=$a(w,E),{padInfo:_,outHeight:B,outWidth:U}=o_(i,p,m,T,v,D,F,o,c),Y=a?I*y:I;let q;return c==="channelsFirst"?q=[u,Y,B,U]:c==="channelsLast"&&(q=[u,B,U,Y]),{batchSize:u,dataFormat:c,inHeight:p,inWidth:m,inChannels:y,outHeight:B,outWidth:U,outChannels:Y,padInfo:_,strideHeight:T,strideWidth:v,filterHeight:b,filterWidth:w,effectiveFilterHeight:D,effectiveFilterWidth:F,dilationHeight:N,dilationWidth:E,inShape:e,outShape:q,filterShape:t}}function ih(e,t,n,s,i,o=!1,a="channelsLast",c){let[u,p,m,y,b]=[-1,-1,-1,-1,-1];if(a==="channelsLast")[u,p,m,y,b]=e;else if(a==="channelsFirst")[u,b,p,m,y]=e;else throw new Error(`Unknown dataFormat ${a}`);const[w,I,T,,v]=t,[N,E,D]=db(n),[F,_,B]=db(s),U=$a(w,F),Y=$a(I,_),q=$a(T,B),{padInfo:J,outDepth:oe,outHeight:ce,outWidth:ue}=a_(i,p,m,y,N,E,D,U,Y,q,c),he=o?v*b:v;let pe;return a==="channelsFirst"?pe=[u,he,oe,ce,ue]:a==="channelsLast"&&(pe=[u,oe,ce,ue,he]),{batchSize:u,dataFormat:a,inDepth:p,inHeight:m,inWidth:y,inChannels:b,outDepth:oe,outHeight:ce,outWidth:ue,outChannels:he,padInfo:J,strideDepth:N,strideHeight:E,strideWidth:D,filterDepth:w,filterHeight:I,filterWidth:T,effectiveFilterDepth:U,effectiveFilterHeight:Y,effectiveFilterWidth:q,dilationDepth:F,dilationHeight:_,dilationWidth:B,inShape:e,outShape:pe,filterShape:t}}function i_(e,t,n,s,i){s==null&&(s=ub(e,t,n));const o=e[0],a=e[1],c=vo((o-t+2*s)/n+1,i);k(Ut(c),()=>`The output # of rows (${c}) must be an integer. Change the stride and/or zero pad parameters`);const u=vo((a-t+2*s)/n+1,i);return k(Ut(u),()=>`The output # of columns (${u}) must be an integer. Change the stride and/or zero pad parameters`),[c,u]}function r_(e,t,n,s,i,o){i==null&&(i=ub(e,t,s));const a=e[0],c=e[1],u=e[2],p=vo((a-t+2*i)/s+1,o);k(Ut(p),()=>`The output # of depths (${p}) must be an integer. Change the stride and/or zero pad parameters`);const m=vo((c-t+2*i)/s+1,o);k(Ut(m),()=>`The output # of rows (${m}) must be an integer. Change the stride and/or zero pad parameters`);const y=vo((u-t+2*i)/s+1,o);return k(Ut(y),()=>`The output # of columns (${y}) must be an integer. Change the stride and/or zero pad parameters`),[p,m,y,n]}function ub(e,t,n,s=1){const i=$a(t,s);return Math.floor((e[0]*(n-1)-n+i)/2)}function Gd(e){return typeof e=="number"?[e,e,e]:e.length===2?[e[0],e[1],1]:e}function db(e){return typeof e=="number"?[e,e,e]:e}function $a(e,t){return t<=1?e:e+(e-1)*(t-1)}function o_(e,t,n,s,i,o,a,c,u){let p,m,y;if(typeof e=="number"){const b=e===0?"VALID":"NUMBER";p={top:e,bottom:e,left:e,right:e,type:b};const w=i_([t,n],o,s,e,c);m=w[0],y=w[1]}else if(e==="same"){m=Math.ceil(t/s),y=Math.ceil(n/i);const b=Math.max(0,(m-1)*s+o-t),w=Math.max(0,(y-1)*i+a-n),I=Math.floor(b/2),T=b-I,v=Math.floor(w/2),N=w-v;p={top:I,bottom:T,left:v,right:N,type:"SAME"}}else if(e==="valid")p={top:0,bottom:0,left:0,right:0,type:"VALID"},m=Math.ceil((t-o+1)/s),y=Math.ceil((n-a+1)/i);else if(typeof e=="object"){const b=u==="channelsLast"?e[1][0]:e[2][0],w=u==="channelsLast"?e[1][1]:e[2][1],I=u==="channelsLast"?e[2][0]:e[3][0],T=u==="channelsLast"?e[2][1]:e[3][1],v=b===0&&w===0&&I===0&&T===0?"VALID":"EXPLICIT";p={top:b,bottom:w,left:I,right:T,type:v},m=vo((t-o+b+w)/s+1,c),y=vo((n-a+I+T)/i+1,c)}else throw Error(`Unknown padding parameter: ${e}`);return{padInfo:p,outHeight:m,outWidth:y}}function a_(e,t,n,s,i,o,a,c,u,p,m){let y,b,w,I;if(typeof e=="number"){const T=e===0?"VALID":"NUMBER";y={top:e,bottom:e,left:e,right:e,front:e,back:e,type:T};const v=r_([t,n,s,1],c,1,i,e,m);b=v[0],w=v[1],I=v[2]}else if(e==="same"){b=Math.ceil(t/i),w=Math.ceil(n/o),I=Math.ceil(s/a);const T=(b-1)*i+c-t,v=(w-1)*o+u-n,N=(I-1)*a+p-s,E=Math.floor(T/2),D=T-E,F=Math.floor(v/2),_=v-F,B=Math.floor(N/2),U=N-B;y={top:F,bottom:_,left:B,right:U,front:E,back:D,type:"SAME"}}else if(e==="valid")y={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},b=Math.ceil((t-c+1)/i),w=Math.ceil((n-u+1)/o),I=Math.ceil((s-p+1)/a);else throw Error(`Unknown padding parameter: ${e}`);return{padInfo:y,outDepth:b,outHeight:w,outWidth:I}}function vo(e,t){if(!t)return e;switch(t){case"round":return Math.round(e);case"ceil":return Math.ceil(e);case"floor":return Math.floor(e);default:throw new Error(`Unknown roundingMode ${t}`)}}function $r(e){const[t,n,s]=Gd(e);return t===1&&n===1&&s===1}function rn(e,t){return $r(e)||$r(t)}function rh(e){if(e==="NHWC")return"channelsLast";if(e==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${e}`)}function c_(e,t,n,s,i){const o=W(e,"x","avgPool","float32"),a=1;k(rn(n,a),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`);let c=o,u=!1;o.rank===3&&(u=!0,c=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),k(c.rank===4,()=>`Error in avgPool: x must be rank 4 but got rank ${c.rank}.`),i!=null&&k(Ut(s),()=>`Error in avgPool: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const p=(w,I)=>{const T=Wn(c.shape,t,n,1,s,i);return I([c]),T.filterWidth===1&&T.filterHeight===1&&ot(T.inShape,T.outShape)?c.clone():w.avgPool(c,T)},m={x:c},y={filterSize:t,strides:n,pad:s,dimRoundingMode:i};let b=V.runKernelFunc(p,m,null,ei,y);return b=ve(b,o.dtype),u?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const oh=P({avgPool_:c_});function l_(e,t,n,s,i,o="NDHWC",a){a==null?a=[1,1,1]:nn("dilations is deprecated, this field will be gone in v3.0.0.");const c=W(e,"x","avgPool3d","float32");let u=c,p=!1;c.rank===4&&(p=!0,u=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]])),k(u.rank===5,()=>`Error in avgPool3d: x must be rank 5 but got rank ${u.rank}.`),k(o==="NDHWC",()=>`Error in avgPool3d: Only NDHWC is currently supported, but got dataFormat of ${o}`),k(rn(n,a),()=>`Error in avgPool3d: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&k(Ut(s),()=>`Error in avgPool3d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const m=(I,T)=>{a==null&&(a=[1,1,1]);const v=sh(u.shape,t,n,a,s,i,o);return T([u]),I.avgPool3d(u,v)},y={x:u},b={filterSize:t,strides:n,pad:s,dimRoundingMode:i,dataFormat:o,dilations:a};let w=V.runKernelFunc(m,y,null,hl,b);return w=ve(w,u.dtype),p?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const pb=P({avgPool3d_:l_});function mb(e,t){const n=e[0].length;e.forEach((i,o)=>{k(i.length===n,()=>`Error in concat${n}D: rank of tensors[${o}] must be the same as the rank of the rest (${n})`)}),k(t>=0&&t`Error in concat${n}D: axis must be between 0 and ${n-1}.`);const s=e[0];e.forEach((i,o)=>{for(let a=0;a`Error in concat${n}D: Shape of tensors[${o}] (${i}) does not match the shape of the rest (${s}) along the non-concatenated axis ${o}.`)})}function Ur(e,t){const n=e[0].slice();for(let s=1;s=1,()=>"Pass at least one tensor to concat");let n=Zl(e,"tensors","concat");n[0].dtype==="complex64"&&n.forEach(a=>{if(a.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor - with dtype ${a.dtype}. `)});const s=(a,c)=>{const u=ft(t,n[0].shape)[0],p=Ur(n.map(b=>b.shape),u);if(we(p)===0)return en([],p);if(n=n.filter(b=>b.size>0),n.length===1)return n[0];const m=n.map(b=>b.shape);mb(m,u);const y=a.concat(n,u);return c(n),y},i=n,o={axis:t};return V.runKernelFunc(s,i,null,td,o)}const Mt=P({concat_:h_});function u_(e){const t=W(e,"x","sigmoid"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sigmoid(t);return i([o]),o},n,null,$l)}const Ei=P({sigmoid_:u_});function d_(e,t,n){const s=W(e,"x","slice");if(s.rank===0)throw new Error("Slicing scalar is not possible");const i=(c,u)=>{const[p,m]=$d(s,t,n);return Ky(s,p,m),u([s]),c.slice(s,p,m)},o={x:s},a={begin:t,size:n};return V.runKernelFunc(i,o,null,pd,a)}const st=P({slice_:d_});function p_(e){const t=W(e,"x","tanh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.tanh(t);return i([o]),o},n,null,Pl)}const Ua=P({tanh_:p_});function m_(e,t,n,s,i,o){const a=W(e,"forgetBias","basicLSTMCell"),c=W(t,"lstmKernel","basicLSTMCell"),u=W(n,"lstmBias","basicLSTMCell"),p=W(s,"data","basicLSTMCell"),m=W(i,"c","basicLSTMCell"),y=W(o,"h","basicLSTMCell"),b=Mt([p,y],1),w=at(b,c),I=be(w,u),T=I.shape[0],v=I.shape[1]/4,N=[T,v],E=st(I,[0,0],N),D=st(I,[0,v],N),F=st(I,[0,v*2],N),_=st(I,[0,v*3],N),B=be(X(Ei(E),Ua(D)),X(m,Ei(be(a,F)))),U=X(Ua(B),Ei(_));return[B,U]}const f_=P({basicLSTMCell_:m_});function g_(e,t,n){const s=W(e,"x","batchToSpaceND"),i=t.reduce((u,p)=>u*p);k(s.rank>=1+t.length,()=>`input rank is ${s.rank} but should be > than blockShape.length ${t.length}`),k(n.length===t.length,()=>`crops.length is ${n.length} but should be equal to blockShape.length ${t.length}`),k(s.shape[0]%i===0,()=>`input tensor batch is ${s.shape[0]} but is not divisible by the product of the elements of blockShape ${t.join(" * ")} === ${i}`);const o=u=>u.batchToSpaceND(s,t,n),a={x:s},c={blockShape:t,crops:n};return V.runKernelFunc(o,a,null,Ng,c)}const ah=P({batchToSpaceND_:g_});function y_(e){let t;return e.rank===0||e.rank===1?t=K(e,[1,1,1,e.size]):e.rank===2?t=K(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?t=K(e,[1,e.shape[0],e.shape[1],e.shape[2]]):t=e,t}function b_(e,t,n,s,i,o){o==null&&(o=.001);const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),u=W(n,"variance","batchNorm");let p;i!=null&&(p=W(i,"scale","batchNorm"));let m;s!=null&&(m=W(s,"offset","batchNorm")),k(c.rank===u.rank,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),k(m==null||c.rank===m.rank,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),k(p==null||c.rank===p.rank,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");const y=y_(a),b=(v,N)=>(N([y,c,u,p]),v.batchNorm(y,Vd(c),Vd(u),Vd(m),Vd(p),o)),w={x:y,scale:p,offset:m,mean:c,variance:u},I={varianceEpsilon:o},T=V.runKernelFunc(b,w,null,Ll,I);return K(T,a.shape)}function Vd(e){return e==null?null:e.rank===0?K(e,[e.size]):e.rank===1?e:e.rank===2?K(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?K(e,[1,e.shape[0],e.shape[1],e.shape[2]]):e}const No=P({batchNorm_:b_});function w_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),u=W(n,"variance","batchNorm");let p;i!=null&&(p=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),k(a.rank===2,()=>`Error in batchNorm2D: x must be rank 2 but got rank ${a.rank}.`),k(c.rank===2||c.rank===1,()=>`Error in batchNorm2D: mean must be rank 2 or rank 1 but got rank ${c.rank}.`),k(u.rank===2||u.rank===1,()=>`Error in batchNorm2D: variance must be rank 2 or rank 1 but got rank ${u.rank}.`),p!=null&&k(p.rank===2||p.rank===1,()=>`Error in batchNorm2D: scale must be rank 2 or rank 1 but got rank ${p.rank}.`),m!=null&&k(m.rank===2||m.rank===1,()=>`Error in batchNorm2D: offset must be rank 2 or rank 1 but got rank ${m.rank}.`),No(a,c,u,m,p,o)}const nA=P({batchNorm2d_:w_});function L_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),u=W(n,"variance","batchNorm");let p;i!=null&&(p=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),k(a.rank===3,()=>`Error in batchNorm3D: x must be rank 3 but got rank ${a.rank}.`),k(c.rank===3||c.rank===1,()=>`Error in batchNorm3D: mean must be rank 3 or rank 1 but got rank ${c.rank}.`),k(u.rank===3||u.rank===1,()=>`Error in batchNorm3D: variance must be rank 3 or rank 1 but got rank ${u.rank}.`),p!=null&&k(p.rank===3||p.rank===1,()=>`Error in batchNorm3D: scale must be rank 3 or rank 1 but got rank ${p.rank}.`),m!=null&&k(m.rank===3||m.rank===1,()=>`Error in batchNorm3D: offset must be rank 3 or rank 1 but got rank ${m.rank}.`),No(a,c,u,m,p,o)}const sA=P({batchNorm3d_:L_});function S_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),u=W(n,"variance","batchNorm");let p;i!=null&&(p=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),k(a.rank===4,()=>`Error in batchNorm4D: x must be rank 4 but got rank ${a.rank}.`),k(c.rank===4||c.rank===1,()=>`Error in batchNorm4D: mean must be rank 4 or rank 1 but got rank ${c.rank}.`),k(u.rank===4||u.rank===1,()=>`Error in batchNorm4D: variance must be rank 4 or rank 1 but got rank ${u.rank}.`),p!=null&&k(p.rank===4||p.rank===1,()=>`Error in batchNorm4D: scale must be rank 4 or rank 1 but got rank ${p.rank}.`),m!=null&&k(m.rank===4||m.rank===1,()=>`Error in batchNorm4D: offset must be rank 4 or rank 1 but got rank ${m.rank}.`),No(a,c,u,m,p,o)}const iA=P({batchNorm4d_:S_});function I_(e,t){let n=W(e,"broadcastTo","x");const s=n.shape;if(t.some(m=>!(m>0)||m%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${t}].`);if(t.lengthn.rank){const m=n.shape.slice();for(;m.length=0;m--)if(i[m]===t[m])o[m]=1;else if(n.shape[m]!==1)throw new Error(`broadcastTo(): [${s}] cannot be broadcast to [${t}].`);const a=o.map((m,y)=>m>1?y:-1).filter(m=>m>=0);if(a.length===0)return Wr(n);const c=m=>m.tile(n,o),u={x:n},p={shape:t,inputShape:i};return V.runKernelFunc(c,u,null,Cg,p)}const ch=P({broadcastTo_:I_});function x_(e){const t=W(e,"x","ceil"),n={x:t};return V.runKernelFunc(s=>s.ceil(t),n,null,dl)}const fb=P({ceil_:x_});function T_(e,t,n){const s=W(e,"x","clipByValue");k(t<=n,()=>`Error in clip: min (${t}) must be less than or equal to max (${n}).`);const i={x:s},o={clipValueMin:t,clipValueMax:n};return V.runKernelFunc((a,c)=>{const u=a.clip(s,t,n);return c([s]),u},i,null,pl,o)}const jn=P({clipByValue_:T_});function A_(e){return Mt(e,0)}const rA=P({concat1d_:A_});function v_(e,t){return Mt(e,t)}const oA=P({concat2d_:v_});function N_(e,t){return Mt(e,t)}const aA=P({concat3d_:N_});function C_(e,t){return Mt(e,t)}const cA=P({concat4d_:C_});function R_(e,t,n,s,i="NHWC",o=[1,1],a){const c=W(e,"x","conv2d"),u=W(t,"filter","conv2d");let p=c,m=!1;c.rank===3&&(m=!0,p=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),k(p.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${p.rank}.`),k(u.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${u.rank}.`),a!=null&&k(Ut(s),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`);const y=i==="NHWC"?p.shape[3]:p.shape[1];k(y===u.shape[2],()=>`Error in conv2d: depth of input (${y}) must match input depth for filter ${u.shape[2]}.`),k(rn(n,o),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`);const b=(v,N)=>{const E=rh(i),D=Oi(p.shape,u.shape,n,o,s,a,!1,E),F=v.conv2d(p,u,D);return N([p,u]),F},w={x:p,filter:u},I={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a},T=V.runKernelFunc(b,w,null,Og,I);return m?K(T,[T.shape[1],T.shape[2],T.shape[3]]):T}const er=P({conv2d_:R_});function O_(e,t,n,s,i="NWC",o=1,a){const c=W(e,"x","conv1d"),u=W(t,"filter","conv1d");let p=c,m=!1;c.rank===2&&(m=!0,p=K(c,[1,c.shape[0],c.shape[1]])),k(p.rank===3,()=>`Error in conv1d: input must be rank 3, but got rank ${p.rank}.`),k(u.rank===3,()=>`Error in conv1d: filter must be rank 3, but got rank ${u.rank}.`),a!=null&&k(Ut(s),()=>`Error in conv1d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`),k(p.shape[2]===u.shape[1],()=>`Error in conv1d: depth of input (${p.shape[2]}) must match input depth for filter ${u.shape[1]}.`),k(rn(n,o),()=>`Error in conv1D: Either stride or dilation must be 1. Got stride ${n} and dilation '${o}'`),k(i==="NWC",()=>`Error in conv1d: got dataFormat of ${i} but only NWC is currently supported.`);const y=K(u,[1,u.shape[0],u.shape[1],u.shape[2]]),b=K(p,[p.shape[0],1,p.shape[1],p.shape[2]]),w=[1,n],I=[1,o],T="NHWC",v=er(b,y,w,s,T,I,a);return m?K(v,[v.shape[2],v.shape[3]]):K(v,[v.shape[0],v.shape[2],v.shape[3]])}const Hd=P({conv1d_:O_});function E_(e,t,n,s,i,o="NHWC",a){k(e.length===t.rank,()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`);let c=e,u=t,p=!1;t.rank===3&&(p=!0,u=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]),c=[1,e[0],e[1],e[2]]),k(c.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${c.length}.`),k(u.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${u.rank}`),k(n.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${n.rank}`);const m=o==="NHWC"?c[3]:c[1],y=o==="NHWC"?u.shape[3]:u.shape[1];k(m===n.shape[2],()=>`Error in conv2dDerInput: depth of input (${m}) must match input depth for filter ${n.shape[2]}.`),k(y===n.shape[3],()=>`Error in conv2dDerInput: depth of output (${y}) must match output depth for filter ${n.shape[3]}.`),a!=null&&k(Ut(i),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${a} but got pad ${i}.`);const b=(v,N)=>{const E=1,D=rh(o),F=Oi(c,n.shape,s,E,i,a,!1,D),_=v.conv2dDerInput(u,n,F);return N([u,n]),_},w={dy:u,filter:n},I={strides:s,pad:i,dataFormat:o,dimRoundingMode:a,inputShape:c},T=V.runKernelFunc(b,w,null,Eg,I);return p?K(T,[T.shape[1],T.shape[2],T.shape[3]]):T}const gb=P({conv2DBackpropInput_:E_});function D_(e,t,n,s,i,o){const a=W(e,"x","conv2dTranspose"),c=W(t,"filter","conv2dTranspose");return gb(n,a,c,s,i,"NHWC",o)}const Yd=P({conv2dTranspose_:D_});function k_(e,t,n,s,i="NDHWC",o=[1,1,1]){const a=W(e,"x","conv3d"),c=W(t,"filter","conv3d");let u=a,p=!1;a.rank===4&&(p=!0,u=K(a,[1,a.shape[0],a.shape[1],a.shape[2],a.shape[3]])),k(u.rank===5,()=>`Error in conv3d: input must be rank 5, but got rank ${u.rank}.`),k(c.rank===5,()=>`Error in conv3d: filter must be rank 5, but got rank ${c.rank}.`),k(u.shape[4]===c.shape[3],()=>`Error in conv3d: depth of input (${u.shape[4]}) must match input depth for filter ${c.shape[3]}.`),k(rn(n,o),()=>`Error in conv3D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),k(i==="NDHWC",()=>`Error in conv3d: got dataFormat of ${i} but only NDHWC is currently supported.`);const m=(I,T)=>{const v=ih(u.shape,c.shape,n,o,s),N=I.conv3d(u,c,v);return T([u,c]),N},y={x:u,filter:c},b={strides:n,pad:s,dataFormat:i,dilations:o},w=V.runKernelFunc(m,y,null,Dg,b);return p?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const yb=P({conv3d_:k_});function F_(e,t,n,s,i){k(e.length===t.rank,()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`);let o=e,a=t,c=!1;t.rank===4&&(c=!0,a=K(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]]),o=[1,e[0],e[1],e[2],e[3]]);const u=o[4],p=a.shape[4];k(o.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${o.length}.`),k(a.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${a.rank}`),k(n.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${n.rank}`),k(u===n.shape[3],()=>`Error in conv3dDerInput: depth of input (${u}) must match input depth for filter ${n.shape[3]}.`),k(p===n.shape[4],()=>`Error in conv3dDerInput: depth of output (${p}) must match output depth for filter ${n.shape[4]}.`);const m=I=>{const T=1,v=ih(o,n.shape,s,T,i);return I.conv3dDerInput(a,n,v)},y={dy:a},b={pad:i},w=V.runKernelFunc(m,y,null,Ex,b);return c?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const lA=P({conv3DBackpropInput_:F_});function __(e,t,n,s,i){const o=W(e,"x","conv3dTranspose"),a=W(t,"filter","conv3dTranspose");return lA(n,o,a,s,i)}const W_=P({conv3dTranspose_:__});function $_(e){const t=W(e,"x","cos"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.cos(t);return i([t]),o},n,null,Ia)}const lh=P({cos_:$_});function U_(e){const t=W(e,"x","cosh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.cosh(t);return i([t]),o},n,null,ml)}const qd=P({cosh_:U_});function B_(e,t=0,n=!1,s=!1){const i=W(e,"x","cumsum"),o=(u,p)=>{const m=_n([t],i.rank);let y=i;m!=null&&(y=Pe(i,m));const b=Is(1,i.rank)[0];let w=u.cumsum(y,b,n,s);if(p([i]),m!=null){const I=eh(m);w=Pe(w,I)}return w},a={x:i},c={axis:t,exclusive:n,reverse:s};return V.runKernelFunc(o,a,null,kg,c)}const jd=P({cumsum_:B_});function M_(e,t,n="NHWC"){const s=W(e,"x","depthToSpace"),i=n==="NHWC"?s.shape[1]:s.shape[2],o=n==="NHWC"?s.shape[2]:s.shape[3],a=n==="NHWC"?s.shape[3]:s.shape[1];k(i*t>=0,()=>`Negative dimension size caused by overflow when multiplying +Expected: ${o}.`)}}function TF(e,t){e().then(()=>t.fail(),()=>t())}function AF(e,t){const n=typeof t=="string"||typeof t=="number"||typeof t=="boolean"?[t]:t;return Or(e)||Or(e[0])||Or(t)||Or(t[0])?Zy(e,n,(s,i)=>s==i):Zy(e,t,(s,i)=>eb(s,i,0))}function Qy(e,t,n){if(n==null&&(n=Ud()),!eb(e,t,n))throw new Error(`Numbers differ: actual === ${e}, expected === ${t}`)}function eb(e,t,n){return!isFinite(e)&&!isFinite(t)?!0:!(isNaN(e)||isNaN(t)||Math.abs(e-t)>n)}function vF(e,t,n){for(let s=0;sn)throw new Error(`Value out of range:${e[s]} low: ${t}, high: ${n}`)}function NF(e,t){expect(new Float32Array(e)).toEqual(new Float32Array(t))}var CF=Object.freeze({__proto__:null,TEST_EPSILON_FLOAT16:XT,expectArraysClose:xF,testEpsilon:Ud,expectPromiseToFail:TF,expectArraysEqual:AF,expectNumbersClose:Qy,expectValuesInRange:vF,expectArrayBuffersEqual:NF});const JT="2.6.0";function RF(){C().set("PROD",!0)}function OF(){C().set("DEBUG",!0)}function EF(){C().set("DEPRECATION_WARNINGS_ENABLED",!1),console.warn("TensorFlow.js deprecation warnings have been disabled.")}function sn(e){C().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(e+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}uk(sn);function DF(){V.disposeVariables()}function $s(){return V}function Bd(){return V.memory()}function kF(e){return V.profile(e)}function ee(e,t){return V.tidy(e,t)}function qe(e){const t=Zi(e);t.forEach(n=>n.dispose())}function Rn(e){return V.keep(e)}function FF(e){return V.time(e)}function ZT(e){return V.setBackend(e)}function _F(){return V.ready()}function WF(){return V.backendName}function $F(e){V.removeBackend(e)}function UF(e){return V.findBackend(e)}function BF(e){return V.findBackendFactory(e)}function tb(e,t,n=1){return V.registerBackend(e,t,n)}function QT(){return V.backend}function MF(e,t){C().setPlatform(e,t)}function PF(e,t){let n=W(e,"a","add"),s=W(t,"b","add");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.add(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,xe)}const be=P({add_:PF});function zF(e,t){let n=W(e,"a","floorDiv"),s=W(t,"b","floorDiv");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.floorDiv(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Wg)}const Md=P({floorDiv_:zF});function GF(e,t){let n=W(e,"a","div"),s=W(t,"b","div");if([n,s]=Bt(n,s),n.dtype==="int32"&&s.dtype==="int32")return Md(n,s);const i=(c,u)=>{const p=c.realDivide(n,s);return u([n,s]),p},o={a:n,b:s},a={};return V.runKernelFunc(i,o,null,Aa,a)}const _e=P({div_:GF});function VF(e,t){let n=W(e,"a","mul"),s=W(t,"b","mul");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.multiply(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Rl)}const X=P({mul_:VF});function HF(e){const t=W(e,"x","abs"),n={x:t};return V.runKernelFunc((s,i)=>(i([t]),t.dtype==="complex64"?s.complexAbs(t):s.abs(t)),n,null,fe)}const rn=P({abs_:HF});function YF(e){const t=W(e,"x","acos"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.acos(t);return i([t]),o},n,null,de)}const nb=P({acos_:YF});function qF(e){const t=W(e,"x","acosh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.acosh(t);return i([t]),o},n,null,Ae)}const sb=P({acosh_:qF});function jF(e){k(Array.isArray(e),()=>"The argument passed to tf.addN() must be a list of tensors"),k(e.length>=1,()=>`Must pass at least one tensor to tf.addN(), but got ${e.length}`);const t=e.map((o,a)=>W(o,`tensors${a}`,"addN")),n=t[0];t.forEach(o=>{if(o.dtype!==n.dtype)throw new Error("All tensors passed to tf.addN() must have the same dtype")}),t.forEach(o=>{if(!ot(o.shape,n.shape))throw new Error("All tensors passed to tf.addN() must have the same shape")});const s=(o,a)=>{const c=o.addN(t);return a(t),c},i=t;return V.runKernelFunc(s,i,null,Me)}const eA=P({addN_:jF});function ib(e,t){for(let n=0;ne[o]);return[n,i]}function En(e,t){const n=t.map(s=>1);return tA(e,n,t)}function ss(e,t,n){k(ib(t,n),()=>`${e} supports only inner-most axes for now. Got axes ${t} and rank-${n} input.`)}function _n(e,t){if(ib(e,t))return null;const n=[];for(let s=0;sn.push(s)),n}function eh(e){return e.map((t,n)=>[n,t]).sort((t,n)=>t[1]-n[1]).map(t=>t[0])}function Is(e,t){const n=[];for(let s=t-e;s{const u=ft(t,s.shape);let p=u;const m=_n(p,s.rank);m!=null&&(s=Pe(s,m),p=Is(p.length,s.rank));const y=c.all(s,p);if(n){const b=En(y.shape,u);return K(y,b)}return y},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,Ke,a)}const Pd=P({all_:KF});function XF(e,t=null,n=!1){let s=W(e,"x","any","bool");const i=c=>{const u=ft(t,s.shape);let p=u;const m=_n(p,s.rank);m!=null&&(s=Pe(s,m),p=Is(p.length,s.rank));const y=c.any(s,p);if(n){const b=En(y.shape,u);return K(y,b)}return y},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,wt,a)}const th=P({any_:XF});function JF(e,t=0){let n=W(e,"x","argMax");const s=(a,c)=>{c([n]);let u=ft(t,n.shape);const p=_n(u,n.rank);return p!=null&&(n=Pe(n,p),u=Is(u.length,n.rank)),a.argMax(n,u[0])},i={x:n},o={axis:t};return V.runKernelFunc(s,i,null,$t,o)}const nh=P({argMax_:JF});function ZF(e,t=0){let n=W(e,"x","argMin");const s=(a,c)=>{c([n]),t==null&&(t=0);let u=ft(t,n.shape);const p=_n(u,n.rank);return p!=null&&(n=Pe(n,p),u=Is(u.length,n.rank)),a.argMin(n,u[0])},i={x:n},o={axis:t};return V.runKernelFunc(s,i,null,Kt,o)}const rb=P({argMin_:ZF});function QF(e){const t=W(e,"x","asin"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.asin(t);return i([t]),o},n,null,Fn)}const ob=P({asin_:QF});function e_(e){const t=W(e,"x","asinh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.asinh(t);return i([t]),o},n,null,vn)}const ab=P({asinh_:e_});function t_(e){const t=W(e,"x","atan"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.atan(t);return i([t]),o},n,null,Nn)}const cb=P({atan_:t_});function n_(e,t){let n=W(e,"a","atan2"),s=W(t,"b","atan2");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.atan2(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Ai)}const lb=P({atan2_:n_});function s_(e){const t=W(e,"x","atanh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.atanh(t);return i([t]),o},n,null,Qs)}const hb=P({atanh_:s_});function zd(e,t,n,s,i="NHWC",o){const a=e[3],c=[...t,a],u=rh(i);return Oi(e,c,n,o,s,null,null,u)}function Wn(e,t,n,s,i,o,a="channelsLast"){const[c,u]=Gd(t);let p;if(a==="channelsLast")p=[c,u,e[3],e[3]];else if(a==="channelsFirst")p=[c,u,e[1],e[1]];else throw new Error(`Unknown dataFormat ${a}`);return Oi(e,p,n,s,i,o,!1,a)}function sh(e,t,n,s,i,o,a="NDHWC"){const[c,u,p]=db(t);let m,y;if(a==="NDHWC")y="channelsLast",m=[c,u,p,e[4],e[4]];else if(a==="NCDHW")y="channelsFirst",m=[c,u,p,e[1],e[1]];else throw new Error(`Unknown dataFormat ${a}`);return ih(e,m,n,s,i,!1,y,o)}function Oi(e,t,n,s,i,o,a=!1,c="channelsLast"){let[u,p,m,y]=[-1,-1,-1,-1];if(c==="channelsLast")[u,p,m,y]=e;else if(c==="channelsFirst")[u,y,p,m]=e;else throw new Error(`Unknown dataFormat ${c}`);const[b,w,,I]=t,[T,v]=Gd(n),[N,E]=Gd(s),D=Ba(b,N),F=Ba(w,E),{padInfo:_,outHeight:B,outWidth:U}=o_(i,p,m,T,v,D,F,o,c),Y=a?I*y:I;let q;return c==="channelsFirst"?q=[u,Y,B,U]:c==="channelsLast"&&(q=[u,B,U,Y]),{batchSize:u,dataFormat:c,inHeight:p,inWidth:m,inChannels:y,outHeight:B,outWidth:U,outChannels:Y,padInfo:_,strideHeight:T,strideWidth:v,filterHeight:b,filterWidth:w,effectiveFilterHeight:D,effectiveFilterWidth:F,dilationHeight:N,dilationWidth:E,inShape:e,outShape:q,filterShape:t}}function ih(e,t,n,s,i,o=!1,a="channelsLast",c){let[u,p,m,y,b]=[-1,-1,-1,-1,-1];if(a==="channelsLast")[u,p,m,y,b]=e;else if(a==="channelsFirst")[u,b,p,m,y]=e;else throw new Error(`Unknown dataFormat ${a}`);const[w,I,T,,v]=t,[N,E,D]=db(n),[F,_,B]=db(s),U=Ba(w,F),Y=Ba(I,_),q=Ba(T,B),{padInfo:J,outDepth:oe,outHeight:ce,outWidth:ue}=a_(i,p,m,y,N,E,D,U,Y,q,c),he=o?v*b:v;let pe;return a==="channelsFirst"?pe=[u,he,oe,ce,ue]:a==="channelsLast"&&(pe=[u,oe,ce,ue,he]),{batchSize:u,dataFormat:a,inDepth:p,inHeight:m,inWidth:y,inChannels:b,outDepth:oe,outHeight:ce,outWidth:ue,outChannels:he,padInfo:J,strideDepth:N,strideHeight:E,strideWidth:D,filterDepth:w,filterHeight:I,filterWidth:T,effectiveFilterDepth:U,effectiveFilterHeight:Y,effectiveFilterWidth:q,dilationDepth:F,dilationHeight:_,dilationWidth:B,inShape:e,outShape:pe,filterShape:t}}function i_(e,t,n,s,i){s==null&&(s=ub(e,t,n));const o=e[0],a=e[1],c=Co((o-t+2*s)/n+1,i);k(Ut(c),()=>`The output # of rows (${c}) must be an integer. Change the stride and/or zero pad parameters`);const u=Co((a-t+2*s)/n+1,i);return k(Ut(u),()=>`The output # of columns (${u}) must be an integer. Change the stride and/or zero pad parameters`),[c,u]}function r_(e,t,n,s,i,o){i==null&&(i=ub(e,t,s));const a=e[0],c=e[1],u=e[2],p=Co((a-t+2*i)/s+1,o);k(Ut(p),()=>`The output # of depths (${p}) must be an integer. Change the stride and/or zero pad parameters`);const m=Co((c-t+2*i)/s+1,o);k(Ut(m),()=>`The output # of rows (${m}) must be an integer. Change the stride and/or zero pad parameters`);const y=Co((u-t+2*i)/s+1,o);return k(Ut(y),()=>`The output # of columns (${y}) must be an integer. Change the stride and/or zero pad parameters`),[p,m,y,n]}function ub(e,t,n,s=1){const i=Ba(t,s);return Math.floor((e[0]*(n-1)-n+i)/2)}function Gd(e){return typeof e=="number"?[e,e,e]:e.length===2?[e[0],e[1],1]:e}function db(e){return typeof e=="number"?[e,e,e]:e}function Ba(e,t){return t<=1?e:e+(e-1)*(t-1)}function o_(e,t,n,s,i,o,a,c,u){let p,m,y;if(typeof e=="number"){const b=e===0?"VALID":"NUMBER";p={top:e,bottom:e,left:e,right:e,type:b};const w=i_([t,n],o,s,e,c);m=w[0],y=w[1]}else if(e==="same"){m=Math.ceil(t/s),y=Math.ceil(n/i);const b=Math.max(0,(m-1)*s+o-t),w=Math.max(0,(y-1)*i+a-n),I=Math.floor(b/2),T=b-I,v=Math.floor(w/2),N=w-v;p={top:I,bottom:T,left:v,right:N,type:"SAME"}}else if(e==="valid")p={top:0,bottom:0,left:0,right:0,type:"VALID"},m=Math.ceil((t-o+1)/s),y=Math.ceil((n-a+1)/i);else if(typeof e=="object"){const b=u==="channelsLast"?e[1][0]:e[2][0],w=u==="channelsLast"?e[1][1]:e[2][1],I=u==="channelsLast"?e[2][0]:e[3][0],T=u==="channelsLast"?e[2][1]:e[3][1],v=b===0&&w===0&&I===0&&T===0?"VALID":"EXPLICIT";p={top:b,bottom:w,left:I,right:T,type:v},m=Co((t-o+b+w)/s+1,c),y=Co((n-a+I+T)/i+1,c)}else throw Error(`Unknown padding parameter: ${e}`);return{padInfo:p,outHeight:m,outWidth:y}}function a_(e,t,n,s,i,o,a,c,u,p,m){let y,b,w,I;if(typeof e=="number"){const T=e===0?"VALID":"NUMBER";y={top:e,bottom:e,left:e,right:e,front:e,back:e,type:T};const v=r_([t,n,s,1],c,1,i,e,m);b=v[0],w=v[1],I=v[2]}else if(e==="same"){b=Math.ceil(t/i),w=Math.ceil(n/o),I=Math.ceil(s/a);const T=(b-1)*i+c-t,v=(w-1)*o+u-n,N=(I-1)*a+p-s,E=Math.floor(T/2),D=T-E,F=Math.floor(v/2),_=v-F,B=Math.floor(N/2),U=N-B;y={top:F,bottom:_,left:B,right:U,front:E,back:D,type:"SAME"}}else if(e==="valid")y={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},b=Math.ceil((t-c+1)/i),w=Math.ceil((n-u+1)/o),I=Math.ceil((s-p+1)/a);else throw Error(`Unknown padding parameter: ${e}`);return{padInfo:y,outDepth:b,outHeight:w,outWidth:I}}function Co(e,t){if(!t)return e;switch(t){case"round":return Math.round(e);case"ceil":return Math.ceil(e);case"floor":return Math.floor(e);default:throw new Error(`Unknown roundingMode ${t}`)}}function $r(e){const[t,n,s]=Gd(e);return t===1&&n===1&&s===1}function on(e,t){return $r(e)||$r(t)}function rh(e){if(e==="NHWC")return"channelsLast";if(e==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${e}`)}function c_(e,t,n,s,i){const o=W(e,"x","avgPool","float32"),a=1;k(on(n,a),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`);let c=o,u=!1;o.rank===3&&(u=!0,c=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),k(c.rank===4,()=>`Error in avgPool: x must be rank 4 but got rank ${c.rank}.`),i!=null&&k(Ut(s),()=>`Error in avgPool: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const p=(w,I)=>{const T=Wn(c.shape,t,n,1,s,i);return I([c]),T.filterWidth===1&&T.filterHeight===1&&ot(T.inShape,T.outShape)?c.clone():w.avgPool(c,T)},m={x:c},y={filterSize:t,strides:n,pad:s,dimRoundingMode:i};let b=V.runKernelFunc(p,m,null,ei,y);return b=ve(b,o.dtype),u?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const oh=P({avgPool_:c_});function l_(e,t,n,s,i,o="NDHWC",a){a==null?a=[1,1,1]:sn("dilations is deprecated, this field will be gone in v3.0.0.");const c=W(e,"x","avgPool3d","float32");let u=c,p=!1;c.rank===4&&(p=!0,u=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]])),k(u.rank===5,()=>`Error in avgPool3d: x must be rank 5 but got rank ${u.rank}.`),k(o==="NDHWC",()=>`Error in avgPool3d: Only NDHWC is currently supported, but got dataFormat of ${o}`),k(on(n,a),()=>`Error in avgPool3d: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&k(Ut(s),()=>`Error in avgPool3d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const m=(I,T)=>{a==null&&(a=[1,1,1]);const v=sh(u.shape,t,n,a,s,i,o);return T([u]),I.avgPool3d(u,v)},y={x:u},b={filterSize:t,strides:n,pad:s,dimRoundingMode:i,dataFormat:o,dilations:a};let w=V.runKernelFunc(m,y,null,hl,b);return w=ve(w,u.dtype),p?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const pb=P({avgPool3d_:l_});function mb(e,t){const n=e[0].length;e.forEach((i,o)=>{k(i.length===n,()=>`Error in concat${n}D: rank of tensors[${o}] must be the same as the rank of the rest (${n})`)}),k(t>=0&&t`Error in concat${n}D: axis must be between 0 and ${n-1}.`);const s=e[0];e.forEach((i,o)=>{for(let a=0;a`Error in concat${n}D: Shape of tensors[${o}] (${i}) does not match the shape of the rest (${s}) along the non-concatenated axis ${o}.`)})}function Ur(e,t){const n=e[0].slice();for(let s=1;s=1,()=>"Pass at least one tensor to concat");let n=Zl(e,"tensors","concat");n[0].dtype==="complex64"&&n.forEach(a=>{if(a.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor + with dtype ${a.dtype}. `)});const s=(a,c)=>{const u=ft(t,n[0].shape)[0],p=Ur(n.map(b=>b.shape),u);if(we(p)===0)return en([],p);if(n=n.filter(b=>b.size>0),n.length===1)return n[0];const m=n.map(b=>b.shape);mb(m,u);const y=a.concat(n,u);return c(n),y},i=n,o={axis:t};return V.runKernelFunc(s,i,null,td,o)}const Mt=P({concat_:h_});function u_(e){const t=W(e,"x","sigmoid"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sigmoid(t);return i([o]),o},n,null,$l)}const Ei=P({sigmoid_:u_});function d_(e,t,n){const s=W(e,"x","slice");if(s.rank===0)throw new Error("Slicing scalar is not possible");const i=(c,u)=>{const[p,m]=$d(s,t,n);return Ky(s,p,m),u([s]),c.slice(s,p,m)},o={x:s},a={begin:t,size:n};return V.runKernelFunc(i,o,null,pd,a)}const st=P({slice_:d_});function p_(e){const t=W(e,"x","tanh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.tanh(t);return i([o]),o},n,null,Pl)}const Ma=P({tanh_:p_});function m_(e,t,n,s,i,o){const a=W(e,"forgetBias","basicLSTMCell"),c=W(t,"lstmKernel","basicLSTMCell"),u=W(n,"lstmBias","basicLSTMCell"),p=W(s,"data","basicLSTMCell"),m=W(i,"c","basicLSTMCell"),y=W(o,"h","basicLSTMCell"),b=Mt([p,y],1),w=at(b,c),I=be(w,u),T=I.shape[0],v=I.shape[1]/4,N=[T,v],E=st(I,[0,0],N),D=st(I,[0,v],N),F=st(I,[0,v*2],N),_=st(I,[0,v*3],N),B=be(X(Ei(E),Ma(D)),X(m,Ei(be(a,F)))),U=X(Ma(B),Ei(_));return[B,U]}const f_=P({basicLSTMCell_:m_});function g_(e,t,n){const s=W(e,"x","batchToSpaceND"),i=t.reduce((u,p)=>u*p);k(s.rank>=1+t.length,()=>`input rank is ${s.rank} but should be > than blockShape.length ${t.length}`),k(n.length===t.length,()=>`crops.length is ${n.length} but should be equal to blockShape.length ${t.length}`),k(s.shape[0]%i===0,()=>`input tensor batch is ${s.shape[0]} but is not divisible by the product of the elements of blockShape ${t.join(" * ")} === ${i}`);const o=u=>u.batchToSpaceND(s,t,n),a={x:s},c={blockShape:t,crops:n};return V.runKernelFunc(o,a,null,Ng,c)}const ah=P({batchToSpaceND_:g_});function y_(e){let t;return e.rank===0||e.rank===1?t=K(e,[1,1,1,e.size]):e.rank===2?t=K(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?t=K(e,[1,e.shape[0],e.shape[1],e.shape[2]]):t=e,t}function b_(e,t,n,s,i,o){o==null&&(o=.001);const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),u=W(n,"variance","batchNorm");let p;i!=null&&(p=W(i,"scale","batchNorm"));let m;s!=null&&(m=W(s,"offset","batchNorm")),k(c.rank===u.rank,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),k(m==null||c.rank===m.rank,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),k(p==null||c.rank===p.rank,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");const y=y_(a),b=(v,N)=>(N([y,c,u,p]),v.batchNorm(y,Vd(c),Vd(u),Vd(m),Vd(p),o)),w={x:y,scale:p,offset:m,mean:c,variance:u},I={varianceEpsilon:o},T=V.runKernelFunc(b,w,null,Ll,I);return K(T,a.shape)}function Vd(e){return e==null?null:e.rank===0?K(e,[e.size]):e.rank===1?e:e.rank===2?K(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?K(e,[1,e.shape[0],e.shape[1],e.shape[2]]):e}const Ro=P({batchNorm_:b_});function w_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),u=W(n,"variance","batchNorm");let p;i!=null&&(p=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),k(a.rank===2,()=>`Error in batchNorm2D: x must be rank 2 but got rank ${a.rank}.`),k(c.rank===2||c.rank===1,()=>`Error in batchNorm2D: mean must be rank 2 or rank 1 but got rank ${c.rank}.`),k(u.rank===2||u.rank===1,()=>`Error in batchNorm2D: variance must be rank 2 or rank 1 but got rank ${u.rank}.`),p!=null&&k(p.rank===2||p.rank===1,()=>`Error in batchNorm2D: scale must be rank 2 or rank 1 but got rank ${p.rank}.`),m!=null&&k(m.rank===2||m.rank===1,()=>`Error in batchNorm2D: offset must be rank 2 or rank 1 but got rank ${m.rank}.`),Ro(a,c,u,m,p,o)}const nA=P({batchNorm2d_:w_});function L_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),u=W(n,"variance","batchNorm");let p;i!=null&&(p=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),k(a.rank===3,()=>`Error in batchNorm3D: x must be rank 3 but got rank ${a.rank}.`),k(c.rank===3||c.rank===1,()=>`Error in batchNorm3D: mean must be rank 3 or rank 1 but got rank ${c.rank}.`),k(u.rank===3||u.rank===1,()=>`Error in batchNorm3D: variance must be rank 3 or rank 1 but got rank ${u.rank}.`),p!=null&&k(p.rank===3||p.rank===1,()=>`Error in batchNorm3D: scale must be rank 3 or rank 1 but got rank ${p.rank}.`),m!=null&&k(m.rank===3||m.rank===1,()=>`Error in batchNorm3D: offset must be rank 3 or rank 1 but got rank ${m.rank}.`),Ro(a,c,u,m,p,o)}const sA=P({batchNorm3d_:L_});function S_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),u=W(n,"variance","batchNorm");let p;i!=null&&(p=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),k(a.rank===4,()=>`Error in batchNorm4D: x must be rank 4 but got rank ${a.rank}.`),k(c.rank===4||c.rank===1,()=>`Error in batchNorm4D: mean must be rank 4 or rank 1 but got rank ${c.rank}.`),k(u.rank===4||u.rank===1,()=>`Error in batchNorm4D: variance must be rank 4 or rank 1 but got rank ${u.rank}.`),p!=null&&k(p.rank===4||p.rank===1,()=>`Error in batchNorm4D: scale must be rank 4 or rank 1 but got rank ${p.rank}.`),m!=null&&k(m.rank===4||m.rank===1,()=>`Error in batchNorm4D: offset must be rank 4 or rank 1 but got rank ${m.rank}.`),Ro(a,c,u,m,p,o)}const iA=P({batchNorm4d_:S_});function I_(e,t){let n=W(e,"broadcastTo","x");const s=n.shape;if(t.some(m=>!(m>0)||m%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${t}].`);if(t.lengthn.rank){const m=n.shape.slice();for(;m.length=0;m--)if(i[m]===t[m])o[m]=1;else if(n.shape[m]!==1)throw new Error(`broadcastTo(): [${s}] cannot be broadcast to [${t}].`);const a=o.map((m,y)=>m>1?y:-1).filter(m=>m>=0);if(a.length===0)return Wr(n);const c=m=>m.tile(n,o),u={x:n},p={shape:t,inputShape:i};return V.runKernelFunc(c,u,null,Cg,p)}const ch=P({broadcastTo_:I_});function x_(e){const t=W(e,"x","ceil"),n={x:t};return V.runKernelFunc(s=>s.ceil(t),n,null,dl)}const fb=P({ceil_:x_});function T_(e,t,n){const s=W(e,"x","clipByValue");k(t<=n,()=>`Error in clip: min (${t}) must be less than or equal to max (${n}).`);const i={x:s},o={clipValueMin:t,clipValueMax:n};return V.runKernelFunc((a,c)=>{const u=a.clip(s,t,n);return c([s]),u},i,null,pl,o)}const jn=P({clipByValue_:T_});function A_(e){return Mt(e,0)}const rA=P({concat1d_:A_});function v_(e,t){return Mt(e,t)}const oA=P({concat2d_:v_});function N_(e,t){return Mt(e,t)}const aA=P({concat3d_:N_});function C_(e,t){return Mt(e,t)}const cA=P({concat4d_:C_});function R_(e,t,n,s,i="NHWC",o=[1,1],a){const c=W(e,"x","conv2d"),u=W(t,"filter","conv2d");let p=c,m=!1;c.rank===3&&(m=!0,p=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),k(p.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${p.rank}.`),k(u.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${u.rank}.`),a!=null&&k(Ut(s),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`);const y=i==="NHWC"?p.shape[3]:p.shape[1];k(y===u.shape[2],()=>`Error in conv2d: depth of input (${y}) must match input depth for filter ${u.shape[2]}.`),k(on(n,o),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`);const b=(v,N)=>{const E=rh(i),D=Oi(p.shape,u.shape,n,o,s,a,!1,E),F=v.conv2d(p,u,D);return N([p,u]),F},w={x:p,filter:u},I={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a},T=V.runKernelFunc(b,w,null,Og,I);return m?K(T,[T.shape[1],T.shape[2],T.shape[3]]):T}const er=P({conv2d_:R_});function O_(e,t,n,s,i="NWC",o=1,a){const c=W(e,"x","conv1d"),u=W(t,"filter","conv1d");let p=c,m=!1;c.rank===2&&(m=!0,p=K(c,[1,c.shape[0],c.shape[1]])),k(p.rank===3,()=>`Error in conv1d: input must be rank 3, but got rank ${p.rank}.`),k(u.rank===3,()=>`Error in conv1d: filter must be rank 3, but got rank ${u.rank}.`),a!=null&&k(Ut(s),()=>`Error in conv1d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`),k(p.shape[2]===u.shape[1],()=>`Error in conv1d: depth of input (${p.shape[2]}) must match input depth for filter ${u.shape[1]}.`),k(on(n,o),()=>`Error in conv1D: Either stride or dilation must be 1. Got stride ${n} and dilation '${o}'`),k(i==="NWC",()=>`Error in conv1d: got dataFormat of ${i} but only NWC is currently supported.`);const y=K(u,[1,u.shape[0],u.shape[1],u.shape[2]]),b=K(p,[p.shape[0],1,p.shape[1],p.shape[2]]),w=[1,n],I=[1,o],T="NHWC",v=er(b,y,w,s,T,I,a);return m?K(v,[v.shape[2],v.shape[3]]):K(v,[v.shape[0],v.shape[2],v.shape[3]])}const Hd=P({conv1d_:O_});function E_(e,t,n,s,i,o="NHWC",a){k(e.length===t.rank,()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`);let c=e,u=t,p=!1;t.rank===3&&(p=!0,u=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]),c=[1,e[0],e[1],e[2]]),k(c.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${c.length}.`),k(u.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${u.rank}`),k(n.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${n.rank}`);const m=o==="NHWC"?c[3]:c[1],y=o==="NHWC"?u.shape[3]:u.shape[1];k(m===n.shape[2],()=>`Error in conv2dDerInput: depth of input (${m}) must match input depth for filter ${n.shape[2]}.`),k(y===n.shape[3],()=>`Error in conv2dDerInput: depth of output (${y}) must match output depth for filter ${n.shape[3]}.`),a!=null&&k(Ut(i),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${a} but got pad ${i}.`);const b=(v,N)=>{const E=1,D=rh(o),F=Oi(c,n.shape,s,E,i,a,!1,D),_=v.conv2dDerInput(u,n,F);return N([u,n]),_},w={dy:u,filter:n},I={strides:s,pad:i,dataFormat:o,dimRoundingMode:a,inputShape:c},T=V.runKernelFunc(b,w,null,Eg,I);return p?K(T,[T.shape[1],T.shape[2],T.shape[3]]):T}const gb=P({conv2DBackpropInput_:E_});function D_(e,t,n,s,i,o){const a=W(e,"x","conv2dTranspose"),c=W(t,"filter","conv2dTranspose");return gb(n,a,c,s,i,"NHWC",o)}const Yd=P({conv2dTranspose_:D_});function k_(e,t,n,s,i="NDHWC",o=[1,1,1]){const a=W(e,"x","conv3d"),c=W(t,"filter","conv3d");let u=a,p=!1;a.rank===4&&(p=!0,u=K(a,[1,a.shape[0],a.shape[1],a.shape[2],a.shape[3]])),k(u.rank===5,()=>`Error in conv3d: input must be rank 5, but got rank ${u.rank}.`),k(c.rank===5,()=>`Error in conv3d: filter must be rank 5, but got rank ${c.rank}.`),k(u.shape[4]===c.shape[3],()=>`Error in conv3d: depth of input (${u.shape[4]}) must match input depth for filter ${c.shape[3]}.`),k(on(n,o),()=>`Error in conv3D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),k(i==="NDHWC",()=>`Error in conv3d: got dataFormat of ${i} but only NDHWC is currently supported.`);const m=(I,T)=>{const v=ih(u.shape,c.shape,n,o,s),N=I.conv3d(u,c,v);return T([u,c]),N},y={x:u,filter:c},b={strides:n,pad:s,dataFormat:i,dilations:o},w=V.runKernelFunc(m,y,null,Dg,b);return p?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const yb=P({conv3d_:k_});function F_(e,t,n,s,i){k(e.length===t.rank,()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`);let o=e,a=t,c=!1;t.rank===4&&(c=!0,a=K(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]]),o=[1,e[0],e[1],e[2],e[3]]);const u=o[4],p=a.shape[4];k(o.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${o.length}.`),k(a.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${a.rank}`),k(n.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${n.rank}`),k(u===n.shape[3],()=>`Error in conv3dDerInput: depth of input (${u}) must match input depth for filter ${n.shape[3]}.`),k(p===n.shape[4],()=>`Error in conv3dDerInput: depth of output (${p}) must match output depth for filter ${n.shape[4]}.`);const m=I=>{const T=1,v=ih(o,n.shape,s,T,i);return I.conv3dDerInput(a,n,v)},y={dy:a},b={pad:i},w=V.runKernelFunc(m,y,null,Ex,b);return c?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const lA=P({conv3DBackpropInput_:F_});function __(e,t,n,s,i){const o=W(e,"x","conv3dTranspose"),a=W(t,"filter","conv3dTranspose");return lA(n,o,a,s,i)}const W_=P({conv3dTranspose_:__});function $_(e){const t=W(e,"x","cos"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.cos(t);return i([t]),o},n,null,Ta)}const lh=P({cos_:$_});function U_(e){const t=W(e,"x","cosh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.cosh(t);return i([t]),o},n,null,ml)}const qd=P({cosh_:U_});function B_(e,t=0,n=!1,s=!1){const i=W(e,"x","cumsum"),o=(u,p)=>{const m=_n([t],i.rank);let y=i;m!=null&&(y=Pe(i,m));const b=Is(1,i.rank)[0];let w=u.cumsum(y,b,n,s);if(p([i]),m!=null){const I=eh(m);w=Pe(w,I)}return w},a={x:i},c={axis:t,exclusive:n,reverse:s};return V.runKernelFunc(o,a,null,kg,c)}const jd=P({cumsum_:B_});function M_(e,t,n="NHWC"){const s=W(e,"x","depthToSpace"),i=n==="NHWC"?s.shape[1]:s.shape[2],o=n==="NHWC"?s.shape[2]:s.shape[3],a=n==="NHWC"?s.shape[3]:s.shape[1];k(i*t>=0,()=>`Negative dimension size caused by overflow when multiplying ${i} and ${t} for depthToSpace with input shape ${s.shape}`),k(o*t>=0,()=>`Negative dimension size caused by overflow when multiplying ${o} and ${t} for depthToSpace with input shape - ${s.shape}`),k(a%(t*t)===0,()=>`Dimension size must be evenly divisible by ${t*t} but is ${a} for depthToSpace with input shape ${s.shape}`);const c=m=>m.depthToSpace(s,t,n),u={x:s},p={blockSize:t,dataFormat:n};return V.runKernelFunc(c,u,null,kx,p)}const bb=P({depthToSpace_:M_});function P_(e,t,n,s,i="NHWC",o=[1,1],a){const c=W(e,"x","depthwiseConv2d"),u=W(t,"filter","depthwiseConv2d");let p=c,m=!1;c.rank===3&&(m=!0,p=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),k(p.rank===4,()=>`Error in depthwiseConv2d: input must be rank 4, but got rank ${p.rank}.`),k(u.rank===4,()=>`Error in depthwiseConv2d: filter must be rank 4, but got rank ${u.rank}.`),k(p.shape[3]===u.shape[2],()=>`Error in depthwiseConv2d: number of input channels (${p.shape[3]}) must match the inChannels dimension in filter ${u.shape[2]}.`),a!=null&&k(Ut(s),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`);const y=(T,v)=>{o==null&&(o=[1,1]),k(rn(n,o),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`);const N=Oi(p.shape,u.shape,n,o,s,a,!0),E=T.depthwiseConv2D(p,u,N);return v([p,u]),E},b={x:p,filter:u},w={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a},I=V.runKernelFunc(y,b,null,Fg,w);return m?K(I,[I.shape[1],I.shape[2],I.shape[3]]):I}const Co=P({depthwiseConv2d_:P_});function z_(e){const t=W(e,"x","diag"),n=i=>{const o=K(t,[t.size]),a=i.diag(o),c=[...e.shape,...e.shape];return K(a,c)},s={x:t};return V.runKernelFunc(n,s,null,Wx)}const G_=P({diag_:z_});function V_(e,t,n,s,i=[1,1],o="NHWC"){const a=W(e,"x","dilation2d"),c=W(t,"filter","dilation2d");k(a.rank===3||a.rank===4,()=>`Error in dilation2d: input must be rank 3 or 4, but got rank ${a.rank}.`),k(c.rank===3,()=>`Error in dilation2d: filter must be rank 3, but got rank ${c.rank}.`),k(o==="NHWC",()=>`Error in dilation2d: Only NHWC is currently supported, but got dataFormat of ${o}`);let u=a,p=!1;a.rank===3&&(u=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]),p=!0);const m={x:u,filter:c},y={strides:n,pad:s,dilations:i},b=V.runKernel(nd,m,y);return p?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const wb=P({dilation2d_:V_});function Ro(e,t){const n=e.length,s=[];for(let i=0;i1&&a===1&&s.unshift(o)}return s}function on(e,t){const n=[];for(let s=0;s1)&&n.unshift(o)}return n}function nt(e,t){const n=[],s=Math.max(e.length,t.length);for(let i=0;ia.equal(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,Ux)}const ni=P({equal_:H_});function Y_(e,t,n){const s=W(t,"a","where"),i=W(n,"b","where"),o=W(e,"condition","where","bool"),a=nt(s.shape,i.shape),c=ch(s,a),u=ch(i,a);o.rank===1&&k(o.shape[0]===s.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),o.rank!==1&&dt(o.shape,u.shape,"Error in where: ");const p=(y,b)=>{const w=y.select(o,c,u);return b([o]),w},m={condition:o,t:c,e:u};return V.runKernelFunc(p,m,null,oy)}const $n=P({where_:Y_});function q_(e){const t=W(e,"x","zerosLike"),n={x:t};return V.runKernelFunc(s=>s.zerosLike(t),n,null,py)}const et=P({zerosLike_:q_});function j_(e,t){let n=W(e,"a","div"),s=W(t,"b","div");[n,s]=Bt(n,s);const i=_e(n,s),o=et(i),a=ni(s,o);return $n(a,o,i)}const Lb=P({divNoNan_:j_});function K_(e,t){const n=W(e,"t1","dot"),s=W(t,"t2","dot");k((n.rank===1||n.rank===2)&&(s.rank===1||s.rank===2),()=>`Error in dot: inputs must all be rank 1 or 2, but got ranks ${n.rank} and ${s.rank}.`);const i=n.rank===1?n.size:n.shape[1],o=s.rank===1?s.size:s.shape[0];if(k(i===o,()=>`Error in dot: inner dimensions of inputs must match, but got ${i} and ${o}.`),n.rank===1&&s.rank===1){const a=K(n,[1,-1]),c=K(s,[-1,1]),u=at(a,c);return K(u,[])}else if(n.rank===1&&s.rank===2){const a=K(n,[1,-1]),c=K(s,[s.shape[0],s.shape[1]]),u=at(a,c);return K(u,[u.size])}else if(n.rank===2&&s.rank===1){const a=K(s,[-1,1]),c=at(n,a);return K(c,[c.size])}else{const a=K(s,[s.shape[0],s.shape[1]]),c=at(n,a);return c}}const hA=P({dot_:K_});function X_(e){const t=W(e,"x","elu"),n=(i,o)=>{const a=i.elu(t);return o([a]),a},s={x:t};return V.runKernelFunc(n,s,null,fl)}const Oo=P({elu_:X_});function J_(e){let t=W(e,"x","erf");k(t.dtype==="int32"||t.dtype==="float32",()=>"Input dtype must be `int32` or `float32`."),t.dtype==="int32"&&(t=ve(t,"float32"));const n={x:t};return V.runKernelFunc((s,i)=>{const o=s.erf(t);return i([t]),o},n,null,gl)}const Sb=P({erf_:J_});function Z_(e){const t=W(e,"x","exp"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.exp(t);return i([o]),o},n,null,yl)}const xs=P({exp_:Z_});function Q_(e,t=0){const n=null,s=W(e,"x","expandDims",n);k(t<=s.rank,()=>"Axis must be <= rank of the tensor");const i=s.shape.slice();return t<0&&(k(-(s.rank+1)<=t,()=>`Axis must be in the interval [${-(s.rank+1)}, ${s.rank}]`),t=s.rank+t+1),i.splice(t,0,1),K(s,i)}const Kn=P({expandDims_:Q_});function eW(e){const t=W(e,"x","expm1"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.expm1(t);return i([t]),o},n,null,bl)}const Ib=P({expm1_:eW});function tW(e,t){const n=null,s=W(e,"x","tile",n);k(s.rank===t.length,()=>`Error in transpose: rank of input ${s.rank} must match length of reps ${t}.`);const i=(u,p)=>{const m=u.tile(s,t);return p([s]),m},o=[s],a={x:s},c={reps:t};return V.runKernelFunc(i,a,null,hy,c,o)}const Br=P({tile_:tW});function nW(e,t,n,s="float32"){t==null&&(t=e);const i=Qe([e,t],s),o=e<=t?e:t;for(let c=0;ci.fill(e,t,n),{},null,Bx,s)}function sW(e){const t=W(e,"x","floor"),n={x:t};return V.runKernelFunc(s=>s.floor(t),n,null,wl)}const Ba=P({floor_:sW});const xb=30;function uh(e){return e<=xb?e:Td(e,Math.floor(Math.sqrt(e)))}function iW(e,t){let n=!1,s;for(e<=xb?(s=e,n=!0):s=Td(e,Math.floor(Math.sqrt(e)));!n;)s>t||s===e?n=!0:s=Td(e,s+1);return s}function rW(e,t,n){const s=[],i=e.length;for(let o=0;o{const m=ft(n,s.shape)[0],y=uA(s,i,m),b=u.gather(s,K(i,[i.size]),m);return p([s,i]),K(b,y.outputShape)};return V.runKernelFunc(c,o,null,$g,a)}const Ma=P({gather_:aW});function cW(e,t){let n=W(e,"a","greater"),s=W(t,"b","greater");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=a=>a.greater(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,Px)}const Ts=P({greater_:cW});function lW(e,t){let n=W(e,"a","greaterEqual"),s=W(t,"b","greaterEqual");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=(a,c)=>{const u=a.greaterEqual(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Ug)}const tr=P({greaterEqual_:lW});function hW(e){const t=W(e,"input","imag"),n=i=>i.imag(t),s={input:t};return V.runKernelFunc(n,s,null,Mg)}const Pa=P({imag_:hW});function uW(e){const t=W(e,"x","isFinite"),n={x:t};return V.runKernelFunc(s=>s.isFinite(t),n,null,Il)}const dA=P({isFinite_:uW});function dW(e){const t=W(e,"x","isInf"),n={x:t};return V.runKernelFunc(s=>s.isInf(t),n,null,xl)}const pA=P({isInf_:dW});function pW(e){const t=W(e,"x","isNaN"),n={x:t};return V.runKernelFunc(s=>s.isNaN(t),n,null,Tl)}const mA=P({isNaN_:pW});function mW(e,t){let n=W(e,"a","maximum"),s=W(t,"b","maximum");[n,s]=Bt(n,s),n.dtype==="bool"&&(n=ve(n,"int32"),s=ve(s,"int32")),nt(n.shape,s.shape);const i=(a,c)=>{const u=a.maximum(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Gg)}const Us=P({maximum_:mW});function Ne(e,t){if((Ln(e)&&t!=="string"||Array.isArray(e))&&t!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(t==="string"&&Ln(e)&&!(e instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");const n=[],s=[];return Fr(e,n,s,t)}function fW(e,t=.2){const n=W(e,"x","leakyRelu");return Us(X(Ne(t),n),n)}const Xd=P({leakyRelu_:fW});function gW(e,t){let n=W(e,"a","less"),s=W(t,"b","less");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=a=>a.less(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,zx)}const dh=P({less_:gW});function yW(e,t){let n=W(e,"a","lessEqual"),s=W(t,"b","lessEqual");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=(a,c)=>{const u=a.lessEqual(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Gx)}const Mr=P({lessEqual_:yW});function fA(e,t,n){if(n<=0)throw new Error("The number of values should be positive.");const s={start:e,stop:t,num:n};return V.runKernelFunc(i=>i.linspace(e,t,n),{},null,Vx,s)}function bW(e,t=5,n=1,s=1,i=.5){const o=W(e,"x","localResponseNormalization");k(o.rank===4||o.rank===3,()=>`Error in localResponseNormalization: x must be rank 3 or 4 but got + ${s.shape}`),k(a%(t*t)===0,()=>`Dimension size must be evenly divisible by ${t*t} but is ${a} for depthToSpace with input shape ${s.shape}`);const c=m=>m.depthToSpace(s,t,n),u={x:s},p={blockSize:t,dataFormat:n};return V.runKernelFunc(c,u,null,kx,p)}const bb=P({depthToSpace_:M_});function P_(e,t,n,s,i="NHWC",o=[1,1],a){const c=W(e,"x","depthwiseConv2d"),u=W(t,"filter","depthwiseConv2d");let p=c,m=!1;c.rank===3&&(m=!0,p=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),k(p.rank===4,()=>`Error in depthwiseConv2d: input must be rank 4, but got rank ${p.rank}.`),k(u.rank===4,()=>`Error in depthwiseConv2d: filter must be rank 4, but got rank ${u.rank}.`),k(p.shape[3]===u.shape[2],()=>`Error in depthwiseConv2d: number of input channels (${p.shape[3]}) must match the inChannels dimension in filter ${u.shape[2]}.`),a!=null&&k(Ut(s),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`);const y=(T,v)=>{o==null&&(o=[1,1]),k(on(n,o),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`);const N=Oi(p.shape,u.shape,n,o,s,a,!0),E=T.depthwiseConv2D(p,u,N);return v([p,u]),E},b={x:p,filter:u},w={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a},I=V.runKernelFunc(y,b,null,Fg,w);return m?K(I,[I.shape[1],I.shape[2],I.shape[3]]):I}const Oo=P({depthwiseConv2d_:P_});function z_(e){const t=W(e,"x","diag"),n=i=>{const o=K(t,[t.size]),a=i.diag(o),c=[...e.shape,...e.shape];return K(a,c)},s={x:t};return V.runKernelFunc(n,s,null,Wx)}const G_=P({diag_:z_});function V_(e,t,n,s,i=[1,1],o="NHWC"){const a=W(e,"x","dilation2d"),c=W(t,"filter","dilation2d");k(a.rank===3||a.rank===4,()=>`Error in dilation2d: input must be rank 3 or 4, but got rank ${a.rank}.`),k(c.rank===3,()=>`Error in dilation2d: filter must be rank 3, but got rank ${c.rank}.`),k(o==="NHWC",()=>`Error in dilation2d: Only NHWC is currently supported, but got dataFormat of ${o}`);let u=a,p=!1;a.rank===3&&(u=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]),p=!0);const m={x:u,filter:c},y={strides:n,pad:s,dilations:i},b=V.runKernel(nd,m,y);return p?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const wb=P({dilation2d_:V_});function Eo(e,t){const n=e.length,s=[];for(let i=0;i1&&a===1&&s.unshift(o)}return s}function an(e,t){const n=[];for(let s=0;s1)&&n.unshift(o)}return n}function nt(e,t){const n=[],s=Math.max(e.length,t.length);for(let i=0;ia.equal(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,Ux)}const ni=P({equal_:H_});function Y_(e,t,n){const s=W(t,"a","where"),i=W(n,"b","where"),o=W(e,"condition","where","bool"),a=nt(s.shape,i.shape),c=ch(s,a),u=ch(i,a);o.rank===1&&k(o.shape[0]===s.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),o.rank!==1&&dt(o.shape,u.shape,"Error in where: ");const p=(y,b)=>{const w=y.select(o,c,u);return b([o]),w},m={condition:o,t:c,e:u};return V.runKernelFunc(p,m,null,oy)}const $n=P({where_:Y_});function q_(e){const t=W(e,"x","zerosLike"),n={x:t};return V.runKernelFunc(s=>s.zerosLike(t),n,null,py)}const et=P({zerosLike_:q_});function j_(e,t){let n=W(e,"a","div"),s=W(t,"b","div");[n,s]=Bt(n,s);const i=_e(n,s),o=et(i),a=ni(s,o);return $n(a,o,i)}const Lb=P({divNoNan_:j_});function K_(e,t){const n=W(e,"t1","dot"),s=W(t,"t2","dot");k((n.rank===1||n.rank===2)&&(s.rank===1||s.rank===2),()=>`Error in dot: inputs must all be rank 1 or 2, but got ranks ${n.rank} and ${s.rank}.`);const i=n.rank===1?n.size:n.shape[1],o=s.rank===1?s.size:s.shape[0];if(k(i===o,()=>`Error in dot: inner dimensions of inputs must match, but got ${i} and ${o}.`),n.rank===1&&s.rank===1){const a=K(n,[1,-1]),c=K(s,[-1,1]),u=at(a,c);return K(u,[])}else if(n.rank===1&&s.rank===2){const a=K(n,[1,-1]),c=K(s,[s.shape[0],s.shape[1]]),u=at(a,c);return K(u,[u.size])}else if(n.rank===2&&s.rank===1){const a=K(s,[-1,1]),c=at(n,a);return K(c,[c.size])}else{const a=K(s,[s.shape[0],s.shape[1]]),c=at(n,a);return c}}const hA=P({dot_:K_});function X_(e){const t=W(e,"x","elu"),n=(i,o)=>{const a=i.elu(t);return o([a]),a},s={x:t};return V.runKernelFunc(n,s,null,fl)}const Do=P({elu_:X_});function J_(e){let t=W(e,"x","erf");k(t.dtype==="int32"||t.dtype==="float32",()=>"Input dtype must be `int32` or `float32`."),t.dtype==="int32"&&(t=ve(t,"float32"));const n={x:t};return V.runKernelFunc((s,i)=>{const o=s.erf(t);return i([t]),o},n,null,gl)}const Sb=P({erf_:J_});function Z_(e){const t=W(e,"x","exp"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.exp(t);return i([o]),o},n,null,yl)}const xs=P({exp_:Z_});function Q_(e,t=0){const n=null,s=W(e,"x","expandDims",n);k(t<=s.rank,()=>"Axis must be <= rank of the tensor");const i=s.shape.slice();return t<0&&(k(-(s.rank+1)<=t,()=>`Axis must be in the interval [${-(s.rank+1)}, ${s.rank}]`),t=s.rank+t+1),i.splice(t,0,1),K(s,i)}const Kn=P({expandDims_:Q_});function eW(e){const t=W(e,"x","expm1"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.expm1(t);return i([t]),o},n,null,bl)}const Ib=P({expm1_:eW});function tW(e,t){const n=null,s=W(e,"x","tile",n);k(s.rank===t.length,()=>`Error in transpose: rank of input ${s.rank} must match length of reps ${t}.`);const i=(u,p)=>{const m=u.tile(s,t);return p([s]),m},o=[s],a={x:s},c={reps:t};return V.runKernelFunc(i,a,null,hy,c,o)}const Br=P({tile_:tW});function nW(e,t,n,s="float32"){t==null&&(t=e);const i=Qe([e,t],s),o=e<=t?e:t;for(let c=0;ci.fill(e,t,n),{},null,Bx,s)}function sW(e){const t=W(e,"x","floor"),n={x:t};return V.runKernelFunc(s=>s.floor(t),n,null,wl)}const Pa=P({floor_:sW});const xb=30;function uh(e){return e<=xb?e:Td(e,Math.floor(Math.sqrt(e)))}function iW(e,t){let n=!1,s;for(e<=xb?(s=e,n=!0):s=Td(e,Math.floor(Math.sqrt(e)));!n;)s>t||s===e?n=!0:s=Td(e,s+1);return s}function rW(e,t,n){const s=[],i=e.length;for(let o=0;o{const m=ft(n,s.shape)[0],y=uA(s,i,m),b=u.gather(s,K(i,[i.size]),m);return p([s,i]),K(b,y.outputShape)};return V.runKernelFunc(c,o,null,$g,a)}const za=P({gather_:aW});function cW(e,t){let n=W(e,"a","greater"),s=W(t,"b","greater");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=a=>a.greater(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,Px)}const Ts=P({greater_:cW});function lW(e,t){let n=W(e,"a","greaterEqual"),s=W(t,"b","greaterEqual");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=(a,c)=>{const u=a.greaterEqual(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Ug)}const tr=P({greaterEqual_:lW});function hW(e){const t=W(e,"input","imag"),n=i=>i.imag(t),s={input:t};return V.runKernelFunc(n,s,null,Mg)}const Ga=P({imag_:hW});function uW(e){const t=W(e,"x","isFinite"),n={x:t};return V.runKernelFunc(s=>s.isFinite(t),n,null,Il)}const dA=P({isFinite_:uW});function dW(e){const t=W(e,"x","isInf"),n={x:t};return V.runKernelFunc(s=>s.isInf(t),n,null,xl)}const pA=P({isInf_:dW});function pW(e){const t=W(e,"x","isNaN"),n={x:t};return V.runKernelFunc(s=>s.isNaN(t),n,null,Tl)}const mA=P({isNaN_:pW});function mW(e,t){let n=W(e,"a","maximum"),s=W(t,"b","maximum");[n,s]=Bt(n,s),n.dtype==="bool"&&(n=ve(n,"int32"),s=ve(s,"int32")),nt(n.shape,s.shape);const i=(a,c)=>{const u=a.maximum(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Gg)}const Us=P({maximum_:mW});function Ne(e,t){if((Ln(e)&&t!=="string"||Array.isArray(e))&&t!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(t==="string"&&Ln(e)&&!(e instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");const n=[],s=[];return Fr(e,n,s,t)}function fW(e,t=.2){const n=W(e,"x","leakyRelu");return Us(X(Ne(t),n),n)}const Xd=P({leakyRelu_:fW});function gW(e,t){let n=W(e,"a","less"),s=W(t,"b","less");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=a=>a.less(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,zx)}const dh=P({less_:gW});function yW(e,t){let n=W(e,"a","lessEqual"),s=W(t,"b","lessEqual");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=(a,c)=>{const u=a.lessEqual(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Gx)}const Mr=P({lessEqual_:yW});function fA(e,t,n){if(n<=0)throw new Error("The number of values should be positive.");const s={start:e,stop:t,num:n};return V.runKernelFunc(i=>i.linspace(e,t,n),{},null,Vx,s)}function bW(e,t=5,n=1,s=1,i=.5){const o=W(e,"x","localResponseNormalization");k(o.rank===4||o.rank===3,()=>`Error in localResponseNormalization: x must be rank 3 or 4 but got rank ${o.rank}.`),k(Ut(t),()=>`Error in localResponseNormalization: depthRadius must be an integer but got depthRadius ${t}.`);let a=o,c=!1;o.rank===3&&(c=!0,a=K(o,[1,o.shape[0],o.shape[1],o.shape[2]]));const u=(b,w)=>{const I=b.localResponseNormalization4D(a,t,n,s,i);return w([a,I]),I},p={x:a},m={depthRadius:t,bias:n,alpha:s,beta:i},y=V.runKernelFunc(u,p,null,zg,m);return c?K(y,[y.shape[1],y.shape[2],y.shape[3]]):y}const Tb=P({localResponseNormalization_:bW});function wW(e){const t=W(e,"x","log"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.log(t);return i([t]),o},n,null,Al)}const is=P({log_:wW});function LW(e){const t=W(e,"x","log1p"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.log1p(t);return i([t]),o},n,null,vl)}const Jd=P({log1p_:LW});function SW(e){return k(Er(e),()=>"The f passed in grad(f) must be a function"),(t,n)=>{const s=W(t,"x","tf.grad",null),i=n!=null?W(n,"dy","tf.grad"):null;return V.tidy(()=>{const{value:o,grads:a}=V.gradients(()=>e(s),[s],i);return i!=null&&dt(o.shape,i.shape,"The shape of dy passed in grad(f)(x, dy) must match the shape returned by f(x)"),Zd(a),a[0]})}}function IW(e){return k(Er(e),()=>"The f passed in grads(f) must be a function"),(t,n)=>{k(Array.isArray(t),()=>"The args passed in grads(f)(args) must be an array of `Tensor`s or `TensorLike`s");const s=Zl(t,"args","tf.grads",null),i=n!=null?W(n,"dy","tf.grads"):null;return V.tidy(()=>{const{value:o,grads:a}=V.gradients(()=>e(...s),s,i);return i!=null&&dt(o.shape,i.shape,"The shape of dy passed in grads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),Zd(a),a})}}function xW(e){return k(Er(e),()=>"The f passed in valueAndGrad(f) must be a function"),(t,n)=>{k(t instanceof Q,()=>"The x passed in valueAndGrad(f)(x) must be a tensor"),k(n==null||n instanceof Q,()=>"The dy passed in valueAndGrad(f)(x, dy) must be a tensor");const{grads:s,value:i}=V.gradients(()=>e(t),[t],n);return Zd(s),{grad:s[0],value:i}}}function TW(e){return k(Er(e),()=>"The f passed in valueAndGrads(f) must be a function"),(t,n)=>{k(Array.isArray(t)&&t.every(i=>i instanceof Q),()=>"The args passed in valueAndGrads(f)(args) must be array of tensors"),k(n==null||n instanceof Q,()=>"The dy passed in valueAndGrads(f)(args, dy) must be a tensor");const s=V.gradients(()=>e(...t),t,n);return n!=null&&dt(s.value.shape,n.shape,"The shape of dy passed in valueAndGrads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),Zd(s.grads),s}}function Ab(e,t){k(Er(e),()=>"The f passed in variableGrads(f) must be a function"),k(t==null||Array.isArray(t)&&t.every(p=>p instanceof Xl),()=>"The varList passed in variableGrads(f, varList) must be an array of variables");const n=t!=null;if(!n){t=[];for(const p in V.registeredVariables)t.push(V.registeredVariables[p])}const s=n?t.filter(p=>!p.trainable):null,i=t.length;t=t.filter(p=>p.trainable),k(t.length>0,()=>`variableGrads() expects at least one of the input variables to be trainable, but none of the ${i} variables is trainable.`);const o=!0,{value:a,grads:c}=V.gradients(e,t,null,o);k(c.some(p=>p!=null),()=>"Cannot find a connection between any variable and the result of the loss function y=f(x). Please make sure the operations that use variables are inside the function f passed to minimize()."),k(a.rank===0,()=>`The f passed in variableGrads(f) must return a scalar, but it returned a rank-${a.rank} tensor`);const u={};return t.forEach((p,m)=>{c[m]!=null&&(u[p.name]=c[m])}),s!=null&&s.forEach(p=>u[p.name]=null),{value:a,grads:u}}function Di(e){return V.customGrad(e)}function Zd(e){const t=e.filter(n=>n==null).length;if(t>0)throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that - the f you passed encloses all operations that lead from x to y.`)}function AW(e){const t=W(e,"x","neg"),n={x:t};return V.runKernelFunc(s=>s.neg(t),n,null,jg)}const Pt=P({neg_:AW});function vW(e){const t=W(e,"x","softplus"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.softplus(t);return i([t]),o},n,null,Ul)}const za=P({softplus_:vW});function NW(e){const t=W(e,"x","logSigmoid"),n=Di(s=>{const i=Pt(za(Pt(s))),o=a=>{const c=X(a,Ei(Pt(s)));return c};return{value:i,gradFunc:o}});return n(t)}const gA=P({logSigmoid_:NW});function CW(e,t=null,n=!1){const s=W(e,"x","max"),i=(c,u)=>{const p=ft(t,s.shape);let m=p;const y=_n(m,s.rank);let b=s;y!=null&&(b=Pe(s,y),m=Is(m.length,b.rank));const w=c.max(b,m);y!=null&&b.dispose();let I=w;if(n){const T=En(I.shape,ft(t,s.shape));I=K(I,T),w.dispose()}return u([s,I]),I},o={x:s},a={reductionIndices:t,keepDims:n};return V.runKernelFunc(i,o,null,Nl,a)}const Xn=P({max_:CW});function RW(e,t){let n=W(e,"a","sub"),s=W(t,"b","sub");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.subtract(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Ml)}const Ce=P({sub_:RW});function OW(e,t=null,n=!1){let s=W(e,"x","sum");s.dtype==="bool"&&(s=ve(s,"int32"));const i=(c,u)=>{u([s]);const p=ft(t,s.shape),m=_n(p,s.rank);let y=p,b=s;m!=null&&(b=Pe(s,m),y=Is(y.length,s.rank));let w=c.sum(b,y);if(n){const I=En(w.shape,p);w=K(w,I)}return w},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,ay,a)}const Ue=P({sum_:OW});function EW(e,t=-1){const n=W(e,"logits","logSoftmax");if(t===-1&&(t=n.rank-1),t!==n.rank-1)throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and axis was ${t}`);const s=(a,c)=>{const u=!0,p=Xn(e,t,!0),m=Ce(e,p),y=Ce(ve(m,"float32"),is(Ue(xs(m),t,u)));return c([y]),y},i={logits:n},o={axis:t};return V.runKernelFunc(s,i,null,Pg,o)}const Qd=P({logSoftmax_:EW});function DW(e,t=null,n=!1){const s=W(e,"x","logSumExp"),i=ft(t,s.shape),o=Xn(s,i,!0),a=Ce(s,o),c=xs(a),u=Ue(c,i),p=is(u),m=be(K(o,p.shape),p);if(n){const y=En(m.shape,i);return K(m,y)}return m}const vb=P({logSumExp_:DW});function kW(e,t){const n=W(e,"a","logicalAnd","bool"),s=W(t,"b","logicalAnd","bool");nt(n.shape,s.shape);const i={a:n,b:s};return V.runKernelFunc(o=>o.logicalAnd(n,s),i,null,Hx)}const Bs=P({logicalAnd_:kW});function FW(e){const t=W(e,"x","logicalNot","bool"),n={x:t};return V.runKernelFunc(s=>s.logicalNot(t),n,null,od)}const ph=P({logicalNot_:FW});function _W(e,t){const n=W(e,"a","logicalOr","bool"),s=W(t,"b","logicalOr","bool");nt(n.shape,s.shape);const i={a:n,b:s};return V.runKernelFunc(o=>o.logicalOr(n,s),i,null,Yx)}const ep=P({logicalOr_:_W});function WW(e,t){const n=W(e,"a","logicalXor","bool"),s=W(t,"b","logicalXor","bool");return nt(n.shape,s.shape),Bs(ep(e,t),ph(Bs(e,t)))}const yA=P({logicalXor_:WW});function $W(e,t,n,s,i){const o=W(e,"x","maxPool"),a=1;let c=o,u=!1;o.rank===3&&(u=!0,c=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),k(c.rank===4,()=>`Error in maxPool: input must be rank 4 but got rank ${c.rank}.`),k(rn(n,a),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&k(Ut(s),()=>`Error in maxPool: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const p=(w,I)=>{const T=Wn(c.shape,t,n,1,s,i);let v;return T.filterWidth===1&&T.filterHeight===1&&ot(T.inShape,T.outShape)?v=c.clone():v=w.maxPool(c,T),I([c,v]),v},m={x:c},y={filterSize:t,strides:n,pad:s,dimRoundingMode:i},b=V.runKernelFunc(p,m,null,Cl,y);return u?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const mh=P({maxPool_:$W});function UW(e,t=[1,1,1],n,s,i,o="NDHWC",a){a==null?a=[1,1,1]:nn("dilations is deprecated, this field will be gone in v3.0.0.");const c=W(e,"x","maxPool3d");let u=c,p=!1;c.rank===4&&(p=!0,u=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]])),k(u.rank===5,()=>`Error in maxPool3d: x must be rank 5 but got rank ${u.rank}.`),k(o==="NDHWC",()=>`Error in maxPool3d: Only NDHWC is currently supported, but got dataFormat of ${o}`),k(rn(n,a),()=>`Error in maxPool3d: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&k(Ut(s),()=>`Error in maxPool3d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const m=(I,T)=>{a==null&&(a=[1,1,1]);const v=sh(u.shape,t,n,a,s,i,o),N=I.maxPool3d(u,v);return T([u,N]),N},y={x:u},b={filterSize:t,strides:n,pad:s,dimRoundingMode:i,dataFormat:o,dilations:a},w=V.runKernelFunc(m,y,null,Vg,b);return p?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const Nb=P({maxPool3d_:UW});function BW(e,t,n,s,i=!1){const o=W(e,"x","maxPoolWithArgmax"),a={x:o},c={filterSize:t,strides:n,pad:s,includeBatchInIndex:i},u=V.runKernel(cd,a,c);return{result:u[0],indexes:u[1]}}const bA=P({maxPoolWithArgmax_:BW});function ct(e,t="float32"){if(t==="complex64"){const s=ct(e,"float32"),i=ct(e,"float32");return Ci(s,i)}const n=Ra(we(e),t);return V.makeTensor(n,e,t)}function si(e,t="float32"){if(t==="complex64"){const s=si(e,"float32"),i=ct(e,"float32");return Ci(s,i)}const n=Ay(we(e),t);return V.makeTensor(n,e,t)}function MW(e,t=null,n=!1){const s=W(e,"x","mean"),i=ft(t,s.shape),o=On(s.shape,i),a=o[1],c=we(a),u=Di(p=>{const m=Ne(c),y=m.dtype===p.dtype?p:ve(p,m.dtype),b=_e(y,m),w=Ue(b,t,n),I=T=>{const v=p.shape.slice();i.forEach(D=>{v[D]=1});const N=K(T,v),E=_e(X(N,si(p.shape,"float32")),c);return E};return{value:w,gradFunc:I}});return u(s)}const zt=P({mean_:MW});function PW(e,t=null,n=!1){const s=W(e,"x","min"),i=(c,u)=>{const p=ft(t,s.shape);let m=p;const y=_n(m,s.rank);let b=s;y!=null&&(b=Pe(s,y),m=Is(m.length,s.rank));const w=c.min(b,m);y!=null&&b.dispose();let I=w;if(n){const T=En(I.shape,p);I=K(w,T),w.dispose()}return u([s,I]),I},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,Hg,a)}const Ga=P({min_:PW});function zW(e,t){let n=W(e,"a","minimum"),s=W(t,"b","minimum");[n,s]=Bt(n,s),n.dtype==="bool"&&(n=ve(n,"int32"),s=ve(s,"int32")),nt(n.shape,s.shape);const i=(a,c)=>{const u=a.minimum(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Yg)}const Eo=P({minimum_:zW});function GW(e,t){let n=W(e,"a","mod"),s=W(t,"b","mod");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.mod(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,qg)}const tp=P({mod_:GW});function VW(e){const t=W(e,"x","square"),n={},s=[t],i=[];return V.runKernelFunc((o,a)=>(a([t]),o.square(t)),{x:t},null,"Square",n,s,i)}const Lt=P({square_:VW});function HW(e,t=null,n=!1){e=W(e,"x","moments");const s=ft(t,e.shape),i=zt(e,s,n);let o=i.shape;n||(o=En(i.shape,s));const a=Lt(Ce(ve(e,"float32"),K(i,o))),c=zt(a,s,n);return{mean:i,variance:c}}const np=P({moments_:HW});function YW(e,t,n,s){const i=W(t,"data","multiRNNCell"),o=Zl(n,"c","multiRNNCell"),a=Zl(s,"h","multiRNNCell");let c=i;const u=[];for(let y=0;y2)throw new Error(`Rank of probabilities must be 1 or 2, but is ${a}`);n=n||Math.random();const c=a===1?K(i,[1,-1]):i,u=V.runKernelFunc(p=>p.multinomial(c,s,t,n),{logits2D:c});return a===1?K(u,[u.size]):u}const wA=P({multinomial_:jW});function KW(e,t){let n=W(e,"a","notEqual"),s=W(t,"b","notEqual");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=a=>a.notEqual(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,ld)}const Pr=P({notEqual_:KW});function XW(e){const t=W(e,"input","real"),n=i=>i.real(t),s={input:t};return V.runKernelFunc(n,s,null,ey)}const Do=P({real_:XW});function JW(e){const t=W(e,"x","onesLike"),n=(i,o)=>{if(t.dtype==="complex64"){const a=Dn(Do(t)),c=et(Pa(t));return Ci(a,c)}return i.onesLike(t)},s={x:t};return V.runKernelFunc(n,s,null,Xg)}const Dn=P({onesLike_:JW});function ZW(e,t){const n=W(e,"v1","outerProduct"),s=W(t,"v2","outerProduct");k(n.rank===1&&s.rank===1,()=>`Error in outerProduct: inputs must be rank 1, but got ranks ${n.rank} and ${s.rank}.`);const i=K(n,[-1,1]),o=K(s,[1,-1]);return at(i,o)}const QW=P({outerProduct_:ZW});function e$(e,t,n=0){const s=W(e,"x","pad");if(s.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");const i=(c,u)=>(u([s]),c.pad(s,t,n)),o={paddings:t,constantValue:n},a={x:s};return V.runKernelFunc(i,a,null,dd,o)}const ki=P({pad_:e$});function t$(e,t,n=0){return k(t.length===2,()=>"Invalid number of paddings. Must be length of 2."),ki(e,[t],n)}const n$=P({pad1d_:t$});function s$(e,t,n=0){return k(t.length===2&&t[0].length===2&&t[1].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),ki(e,t,n)}const i$=P({pad2d_:s$});function r$(e,t,n=0){return k(t.length===3&&t[0].length===2&&t[1].length===2&&t[2].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),ki(e,t,n)}const o$=P({pad3d_:r$});function a$(e,t,n=0){return k(t.length===4&&t[0].length===2&&t[1].length===2&&t[2].length===2&&t[3].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),ki(e,t,n)}const c$=P({pad4d_:a$});function l$(e,t,n){const s=W(e,"x","spaceToBatchND");k(s.rank>=1+t.length,()=>`input rank ${s.rank} should be > than [blockShape] ${t.length}`),k(n.length===t.length,()=>`paddings.shape[0] ${n.length} must be equal to [blockShape] ${t.length}`),k(s.shape.reduce((c,u,p)=>p>0&&p<=t.length?c&&(u+n[p-1][0]+n[p-1][1])%t[p-1]===0:c,!0),()=>`input spatial dimensions ${s.shape.slice(1)} with paddings ${n.toString()} must be divisible by blockShapes ${t.toString()}`);const i=c=>c.spaceToBatchND(s,t,n),o={x:s},a={blockShape:t,paddings:n};return V.runKernelFunc(i,o,null,md,a)}const fh=P({spaceToBatchND_:l$});function h$(e,t,n,s,i,o){i==null&&(i=[1,1]),o==null&&(o=1),s===0&&(s="valid");const a=W(e,"x","maxPool");let c=a,u=!1;a.rank===3&&(u=!0,c=K(a,[1,a.shape[0],a.shape[1],a.shape[2]])),k(rn(o,i),()=>`Error in pool: Either strides or dilations must be 1. Got strides ${o} and dilations '${i}'`);const p=Wn(c.shape,t,o,i,s),m=[p.dilationHeight,p.dilationWidth];let y;s==="same"?y=d$([p.filterHeight,p.filterWidth],m):y=[[0,0],[0,0]];const b=m[0]===1&&m[1]===1,[w,I]=u$([p.inHeight,p.inWidth],m,y),T=b?s:"valid",v=b?c:fh(c,m,w),N=n==="avg"?()=>oh(v,t,o,T):()=>mh(v,t,o,T),E=N(),D=b?E:ah(E,m,I);return u?K(D,[D.shape[1],D.shape[2],D.shape[3]]):D}function u$(e,t,n){const s=n.map(m=>m[0]),i=n.map(m=>m[1]),o=e.concat(s,i),a=t.map((m,y)=>(m-o[y]%m)%m),c=i.map((m,y)=>m+a[y]),u=t.map((m,y)=>[s[y],c[y]]),p=t.map((m,y)=>[0,a[y]]);return[u,p]}function d$(e,t){const n=e.map((a,c)=>a+(a-1)*(t[c]-1)),s=n.map(a=>a-1),i=s.map(a=>Math.floor(a/2)),o=s.map((a,c)=>a-i[c]);return s.map((a,c)=>[i[c],o[c]])}const LA=P({pool_:h$});function p$(e,t){let n=W(e,"base","pow"),s=W(t,"exp","pow");[n,s]=Bt(n,s);const i={a:n,b:s},o=(a,c)=>{const u=a.pow(n,s);return c([n,s,u]),u};return V.runKernelFunc(o,i,null,Zg)}const ii=P({pow_:p$});function m$(e,t){const n=W(e,"x","prelu"),s=W(t,"alpha","prelu"),i=(a,c)=>{const u=a.prelu(n,s);return c([n,s]),u},o={x:n,alpha:s};return V.runKernelFunc(i,o,null,Qg)}const gh=P({prelu_:m$});function f$(e,t=null,n=!1){let s=W(e,"x","prod");const i=c=>{s.dtype==="bool"&&(s=ve(s,"int32"));const u=ft(t,s.shape),p=_n(u,s.rank);let m=u,y=s;p!=null&&(y=Pe(s,p),m=Is(m.length,s.rank));let b=c.prod(y,m);if(n){const w=En(b.shape,u);b=K(b,w)}return b},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,Kx,a)}const sp=P({prod_:f$});function g$(e,t,n){const s=we(e);let i=null;if(n==null||n==="float32")i=new Float32Array(s);else if(n==="int32")i=new Int32Array(s);else if(n==="bool")i=new Uint8Array(s);else throw new Error(`Unknown data type ${n}`);for(let o=0;o>>0,b-=u,b*=u,u=b>>>0,b-=u,u+=b*4294967296}return(u>>>0)*23283064365386963e-26};return p}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.alea=a})(Va,e,!1)}),w$=ko(function(e){(function(t,n,s){function i(c){var u=this,p="";u.x=0,u.y=0,u.z=0,u.w=0,u.next=function(){var y=u.x^u.x<<11;return u.x=u.y,u.y=u.z,u.z=u.w,u.w^=u.w>>>19^y^y>>>8},c===(c|0)?u.x=c:p+=c;for(var m=0;m>>0)/4294967296};return y.double=function(){do var b=p.next()>>>11,w=(p.next()>>>0)/4294967296,I=(b+w)/(1<<21);while(I===0);return I},y.int32=p.next,y.quick=y,m&&(typeof m=="object"&&o(m,p),y.state=function(){return o(p,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xor128=a})(Va,e,!1)}),L$=ko(function(e){(function(t,n,s){function i(c){var u=this,p="";u.next=function(){var y=u.x^u.x>>>2;return u.x=u.y,u.y=u.z,u.z=u.w,u.w=u.v,(u.d=u.d+362437|0)+(u.v=u.v^u.v<<4^(y^y<<1))|0},u.x=0,u.y=0,u.z=0,u.w=0,u.v=0,c===(c|0)?u.x=c:p+=c;for(var m=0;m>>4),u.next()}function o(c,u){return u.x=c.x,u.y=c.y,u.z=c.z,u.w=c.w,u.v=c.v,u.d=c.d,u}function a(c,u){var p=new i(c),m=u&&u.state,y=function(){return(p.next()>>>0)/4294967296};return y.double=function(){do var b=p.next()>>>11,w=(p.next()>>>0)/4294967296,I=(b+w)/(1<<21);while(I===0);return I},y.int32=p.next,y.quick=y,m&&(typeof m=="object"&&o(m,p),y.state=function(){return o(p,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xorwow=a})(Va,e,!1)}),S$=ko(function(e){(function(t,n,s){function i(c){var u=this;u.next=function(){var m=u.x,y=u.i,b,w,I;return b=m[y],b^=b>>>7,w=b^b<<24,b=m[y+1&7],w^=b^b>>>10,b=m[y+3&7],w^=b^b>>>3,b=m[y+4&7],w^=b^b<<7,b=m[y+7&7],b=b^b<<13,w^=b^b<<9,m[y]=w,u.i=y+1&7,w};function p(m,y){var b,w,I=[];if(y===(y|0))w=I[0]=y;else for(y=""+y,b=0;b0;--b)m.next()}p(u,c)}function o(c,u){return u.x=c.x.slice(),u.i=c.i,u}function a(c,u){c==null&&(c=+new Date);var p=new i(c),m=u&&u.state,y=function(){return(p.next()>>>0)/4294967296};return y.double=function(){do var b=p.next()>>>11,w=(p.next()>>>0)/4294967296,I=(b+w)/(1<<21);while(I===0);return I},y.int32=p.next,y.quick=y,m&&(m.x&&o(m,p),y.state=function(){return o(p,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xorshift7=a})(Va,e,!1)}),I$=ko(function(e){(function(t,n,s){function i(c){var u=this;u.next=function(){var m=u.w,y=u.X,b=u.i,w,I;return u.w=m=m+1640531527|0,I=y[b+34&127],w=y[b=b+1&127],I^=I<<13,w^=w<<17,I^=I>>>15,w^=w>>>12,I=y[b]=I^w,u.i=b,I+(m^m>>>16)|0};function p(m,y){var b,w,I,T,v,N=[],E=128;for(y===(y|0)?(w=y,y=null):(y=y+"\0",w=0,E=Math.max(E,y.length)),I=0,T=-32;T>>15,w^=w<<4,w^=w>>>13,T>=0&&(v=v+1640531527|0,b=N[T&127]^=w+v,I=b==0?I+1:0);for(I>=128&&(N[(y&&y.length||0)&127]=-1),I=127,T=4*128;T>0;--T)w=N[I+34&127],b=N[I=I+1&127],w^=w<<13,b^=b<<17,w^=w>>>15,b^=b>>>12,N[I]=w^b;m.w=v,m.X=N,m.i=I}p(u,c)}function o(c,u){return u.i=c.i,u.w=c.w,u.X=c.X.slice(),u}function a(c,u){c==null&&(c=+new Date);var p=new i(c),m=u&&u.state,y=function(){return(p.next()>>>0)/4294967296};return y.double=function(){do var b=p.next()>>>11,w=(p.next()>>>0)/4294967296,I=(b+w)/(1<<21);while(I===0);return I},y.int32=p.next,y.quick=y,m&&(m.X&&o(m,p),y.state=function(){return o(p,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xor4096=a})(Va,e,!1)}),x$=ko(function(e){(function(t,n,s){function i(c){var u=this,p="";u.next=function(){var y=u.b,b=u.c,w=u.d,I=u.a;return y=y<<25^y>>>7^b,b=b-w|0,w=w<<24^w>>>8^I,I=I-y|0,u.b=y=y<<20^y>>>12^b,u.c=b=b-w|0,u.d=w<<16^b>>>16^I,u.a=I-y|0},u.a=0,u.b=0,u.c=2654435769|0,u.d=1367130551,c===Math.floor(c)?(u.a=c/4294967296|0,u.b=c|0):p+=c;for(var m=0;m>>0)/4294967296};return y.double=function(){do var b=p.next()>>>11,w=(p.next()>>>0)/4294967296,I=(b+w)/(1<<21);while(I===0);return I},y.int32=p.next,y.quick=y,m&&(typeof m=="object"&&o(m,p),y.state=function(){return o(p,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.tychei=a})(Va,e,!1)}),Fo=ko(function(e){(function(t,n){var s=this,i=256,o=6,a=52,c="random",u=n.pow(i,o),p=n.pow(2,a),m=p*2,y=i-1,b;function w(F,_,B){var U=[];_=_==!0?{entropy:!0}:_||{};var Y=N(v(_.entropy?[F,D(t)]:F==null?E():F,3),U),q=new I(U),J=function(){for(var oe=q.g(o),ce=u,ue=0;oe=m;)oe/=2,ce/=2,ue>>>=1;return(oe+ue)/ce};return J.int32=function(){return q.g(4)|0},J.quick=function(){return q.g(4)/4294967296},J.double=J,N(D(q.S),t),(_.pass||B||function(oe,ce,ue,he){return he&&(he.S&&T(he,q),oe.state=function(){return T(q,{})}),ue?(n[c]=oe,ce):oe})(J,Y,"global"in _?_.global:this==n,_.state)}n["seed"+c]=w;function I(F){var _,B=F.length,U=this,Y=0,q=U.i=U.j=0,J=U.S=[];for(B||(F=[B++]);Y=1||o===0);const a=Math.sqrt(-2*Math.log(o)/o);e=this.mean+this.stdDev*s*a,t=this.mean+this.stdDev*i*a,(!this.truncated||this.isValidTruncated(e))&&(n=!0)}return(!this.truncated||this.isValidTruncated(t))&&(this.nextVal=this.convertValue(t)),this.convertValue(e)}convertValue(e){return this.dtype==null||this.dtype==="float32"?e:Math.round(e)}isValidTruncated(e){return e<=this.upper&&e>=this.lower}}class A${constructor(e,t,n,s){this.alpha=e,this.beta=1/t,this.dtype=n;const i=s||Math.random();this.randu=Ha(i.toString()),this.randn=new Cb(0,1,n,!1,this.randu()),e<1?this.d=e+2/3:this.d=e-1/3,this.c=1/Math.sqrt(9*this.d)}nextValue(){let e,t,n,s,i,o;for(;;){do s=this.randn.nextValue(),o=1+this.c*s;while(o<=0);if(o*=o*o,e=s*s,t=1-.331*e*e,n=.5*e+this.d*(1-o+Math.log(o)),i=this.randu(),ithis.dtype==null||this.dtype==="float32",this.min=e,this.range=t-e,this.dtype=n,s==null&&(s=Math.random()),typeof s=="number"&&(s=s.toString()),!this.canReturnFloat()&&this.range<=1)throw new Error(`The difference between ${e} - ${t} <= 1 and dtype is not float`);this.random=Ha(s)}convertValue(e){return this.canReturnFloat()?e:Math.round(e)}nextValue(){return this.convertValue(this.min+this.range*this.random())}}function _Q(e){const t=e.length,n=R$(e),s=C$(e),i=t/6*(Math.pow(n,2)+.25*Math.pow(s-3,2)),o=5.991;if(i>o)throw new Error(`Invalid p-value for JB: ${i}`)}function WQ(e,t,n,s){s==null&&(s=Ud());const i=Rb(e);Qy(i,t,s),Qy(N$(e,i),n,s)}function Rb(e){let t=0;for(let n=0;n{const a=e===t,c=e1;if(a||c||u)return ct([0],s);const p=Math.abs(Math.ceil((t-e)/n)),m=Ra(p,s);t{const o=s.reciprocal(t);return i([t]),o},n,null,Ol)}const Eb=P({reciprocal_:F$});function _$(e){const t=W(e,"x","relu"),n=(i,o)=>(o([t]),t.dtype==="bool"?ve(t,"int32"):i.relu(t)),s={x:t};return V.runKernelFunc(n,s,null,ty)}const Fi=P({relu_:_$});function W$(e){const t=W(e,"x","relu6"),n=(i,o)=>(o([t]),t.dtype==="bool"?ve(t,"int32"):i.relu6(t)),s={x:t};return V.runKernelFunc(n,s,null,iy)}const Db=P({relu6_:W$});function $$(e,t){const n=W(e,"x","reverse"),s=a=>{const c=ft(t,n.shape);if(n.rank===0)return Wr(n);const u=a.reverse(n,c);return K(u,n.shape)},i={x:n},o={dims:t};return V.runKernelFunc(s,i,null,ry,o)}const As=P({reverse_:$$});function U$(e){const t=W(e,"x","reverse");return k(t.rank===1,()=>`Error in reverse1D: x must be rank 1 but got rank ${t.rank}.`),As(t,0)}const B$=P({reverse1d_:U$});function M$(e,t){const n=W(e,"x","reverse");return k(n.rank===2,()=>`Error in reverse2D: x must be rank 2 but got rank ${n.rank}.`),As(n,t)}const P$=P({reverse2d_:M$});function z$(e,t){const n=W(e,"x","reverse");return k(n.rank===3,()=>`Error in reverse3D: x must be rank 3 but got rank ${n.rank}.`),As(n,t)}const G$=P({reverse3d_:z$});function V$(e,t){const n=W(e,"x","reverse");return k(n.rank===4,()=>`Error in reverse4D: x must be rank 4 but got rank ${n.rank}.`),As(n,t)}const H$=P({reverse4d_:V$});function Y$(e){const t=W(e,"x","round"),n={x:t};return V.runKernelFunc(s=>s.round(t),n,null,Dl)}const kb=P({round_:Y$});function q$(e){const t=W(e,"x","rsqrt"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.rsqrt(t);return i([t]),o},n,null,kl)}const ip=P({rsqrt_:q$});function j$(e){const t=W(e,"x","selu"),n=(i,o)=>{const a=i.selu(t);return o([t]),a},s={x:t};return V.runKernelFunc(n,s,null,Fl)}const rp=P({selu_:j$});function K$(e,t,n,s,i,o=[1,1],a="NHWC"){const c=W(e,"x","separableConv2d"),u=W(t,"depthwiseFilter","separableConv2d"),p=W(n,"pointwiseFilter","separableConv2d");let m=c,y=!1;if(c.rank===3&&(y=!0,m=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),a==="NCHW")throw new Error("separableConv2d currently does not support dataFormat NCHW; only NHWC is supported");k(m.rank===4,()=>`Error in separableConv2d: input must be rank 4, but got rank ${m.rank}.`),k(u.rank===4,()=>`Error in separableConv2d: depthwise filter must be rank 4, but got rank ${u.rank}.`),k(p.rank===4,()=>`Error in separableConv2d: pointwise filter must be rank 4, but got rank ${u.rank}.`),k(p.shape[0]===1,()=>`Error in separableConv2d: the first dimension of pointwise filter must be 1, but got ${p.shape[0]}.`),k(p.shape[1]===1,()=>`Error in separableConv2d: the second dimension of pointwise filter must be 1, but got ${p.shape[1]}.`);const b=u.shape[2],w=u.shape[3];k(p.shape[2]===b*w,()=>`Error in separableConv2d: the third dimension of pointwise filter must be ${b*w}, but got ${p.shape[2]}.`);const I=Co(m,u,s,i,a,o),T=1,v=er(I,p,T,"valid",a);return y?K(v,[v.shape[1],v.shape[2],v.shape[3]]):v}const Fb=P({separableConv2d_:K$});async function X$(e,t){const n=W(e,"x","setdiff1d"),s=W(t,"y","setdiff1d");k(n.dtype===s.dtype,()=>`x and y should have the same dtype, but got x (${n.dtype}) and y (${s.dtype}).`),k(n.rank===1,()=>`x should be 1D tensor, but got x (${n.shape}).`),k(s.rank===1,()=>`y should be 1D tensor, but got y (${s.shape}).`);const i=await n.data(),o=await s.data(),a=new Set(o);let c=0;for(let m=0;ms.sign(t),n,null,Wl)}const _b=P({sign_:J$});function Z$(e){const t=W(e,"x","sin"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sin(t);return i([t]),o},n,null,Ta)}const op=P({sin_:Z$});function Q$(e){const t=W(e,"x","sinh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sinh(t);return i([t]),o},n,null,_l)}const ap=P({sinh_:Q$});function eU(e,t,n){const s=W(e,"x","slice1d");return k(s.rank===1,()=>`slice1d expects a rank-1 tensor, but got a rank-${s.rank} tensor`),st(s,[t],[n])}const cp=P({slice1d_:eU});function tU(e,t,n){const s=W(e,"x","slice2d");return k(s.rank===2,()=>`slice2d expects a rank-2 tensor, but got a rank-${s.rank} tensor`),st(s,t,n)}const Wb=P({slice2d_:tU});function nU(e,t,n){const s=W(e,"x","slice3d");return k(s.rank===3,()=>`slice3d expects a rank-3 tensor, but got a rank-${s.rank} tensor`),st(s,t,n)}const lp=P({slice3d_:nU});function sU(e,t,n){const s=W(e,"x","slice4d");return k(s.rank===4,()=>`slice4d expects a rank-4 tensor, but got a rank-${s.rank} tensor`),st(s,t,n)}const bh=P({slice4d_:sU});function iU(e,t=-1){const n=W(e,"logits","softmax","float32");if(t===-1&&(t=n.rank-1),t!==n.rank-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and dim was ${t}`);const s={logits:n},i={dim:t};return V.runKernelFunc((o,a)=>{const c=o.softmax(n,t);return a([c]),c},s,null,ly,i)}const Wo=P({softmax_:iU});function rU(e){k(e.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${e.dtype}.`);const t={input:e};return V.runKernelFunc(n=>{const s=e.shape[e.shape.length-1],i=e.size/s,o=e.as2D(i,s),a=n.fft(o);return a.reshape(e.shape)},t,null,_g)}const wh=P({fft_:rU});function oU(e){k(e.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${e.dtype}.`);const t={input:e};return V.runKernelFunc(n=>{const s=e.shape[e.shape.length-1],i=e.size/s,o=K(e,[i,s]),a=n.ifft(o);return K(a,e.shape)},t,null,Bg)}const Ya=P({ifft_:oU});function aU(e){const t=e.shape[e.shape.length-1],n=e.size/t;let s;if(t<=2){const i=K(e,[n,t]);s=Ya(i)}else{const i=[n,2*(t-1)],o=K(Do(e),[n,t]),a=K(Pa(e),[n,t]),c=As(st(o,[0,1],[n,t-2]),1),u=X(As(st(a,[0,1],[n,t-2]),1),Ne(-1)),p=Mt([o,c],1),m=Mt([a,u],1),y=K(Ci(p,m),[i[0],i[1]]);s=Ya(y)}if(s=Do(s),e.rank===3&&e.shape[0]!==0){const i=s,o=e.shape[0];s=K(s,[o,s.shape[0]/o,s.shape[1]]),i.dispose()}return s}const hp=P({irfft_:aU});function IA(e,t,n=0){let s=[];if(typeof t=="number")k(e.shape[n]%t===0,()=>"Number of splits must evenly divide the axis."),s=new Array(t).fill(e.shape[n]/t);else{const i=t.reduce((a,c)=>(c===-1&&(a+=1),a),0);k(i<=1,()=>"There should be only one negative value in split array.");const o=t.indexOf(-1);if(o!==-1){const a=t.reduce((c,u)=>u>0?c+u:c);t[o]=e.shape[n]-a}k(e.shape[n]===t.reduce((a,c)=>a+c),()=>"The sum of sizes must match the size of the axis dimension."),s=t}return s}function cU(e,t,n=0){const s=W(e,"x","split"),i=(c,u)=>{const p=ft(n,s.shape)[0],m=IA(s,t,p);return c.split(s,m,p)},o={x:s},a={numOrSizeSplits:t,axis:n};return V.runKernelFunc(i,o,null,cy,a)}const os=P({split_:cU});function lU(e,t){k(e.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${e.dtype}`);let n=e.shape[e.shape.length-1];const s=e.size/n;let i;if(t!=null&&t0),T=e.shape.map(v=>v);T[e.shape.length-1]=t,i=st(e,I,T),n=t}else if(t!=null&&t>n){const I=e.shape.map(T=>T);I[e.shape.length-1]=t-n,i=Mt([e,ct(I)],e.shape.length-1),n=t}else i=e;const o=et(i),a=K(Ci(i,o),[s,n]),c=wh(a),u=Math.floor(n/2)+1,p=Do(c),m=Pa(c),y=os(p,[u,n-u],p.shape.length-1),b=os(m,[u,n-u],m.shape.length-1),w=i.shape.slice();return w[i.shape.length-1]=u,K(Ci(y[0],b[0]),w)}const Lh=P({rfft_:lU});function hU(e){const t=W(e,"x","sqrt"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sqrt(t);return i([t]),o},n,null,Bl)}const Sn=P({sqrt_:hU});function uU(e,t){let n=W(e,"a","squaredDifference"),s=W(t,"b","squaredDifference");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=(c,u)=>{const p=c.squaredDifference(n,s);return u([n,s]),p},o={a:n,b:s},a={};return V.runKernelFunc(i,o,null,Aa,a)}const Sh=P({squaredDifference_:uU});function dU(e,t){const n=W(e,"x","squeeze");return K(n,Rr(n.shape,t).newShape)}const zr=P({squeeze_:dU});function pU(e,t=0){const n=Zl(e,"tensors","stack");if(k(n.length>=1,()=>"Pass at least one tensor to tf.stack"),n.length===1)return Kn(n[0],t);const s=n[0].rank,i=n[0].shape,o=n[0].dtype;k(t<=s,()=>"Axis must be <= rank of the tensor"),n.forEach(c=>{dt(i,c.shape,"All tensors passed to stack must have matching shapes"),k(o===c.dtype,()=>"All tensors passed to stack must have matching dtypes")});const a=n.map(c=>Kn(c,t));return Mt(a,t)}const as=P({stack_:pU});function mU(e,t=0){const n=W(e,"x","step"),s={x:n},i={alpha:t};return V.runKernelFunc(o=>o.step(n,t),s,null,Gl,i)}const qa=P({step_:mU});function fU(e,t,n,s,i=0,o=0,a=0,c=0,u=0){let p=W(e,"x","stridedSlice");const m=w=>{s==null&&(s=new Array(t.length));const I=_d(a);if(I.length>1)throw new Error("Multiple ellipses in slice is not allowed.");if(a!==0&&c!==0)throw new Error("Using both ellipsisMask and newAxisMask is not yet supported.");if(a!==0&&u!==0)throw new Error("Using both ellipsisMask and shrinkAxisMask is not yet supported.");const T=p.rank-t.length,v=_d(c),N=p.shape.slice();v.forEach(J=>{t[J]=0,n[J]=1,N.splice(J,0,1)}),p=K(p,N);const{begin:E,end:D,strides:F}=GT(p.shape,I,T,t,n,s,i,o,a);t=E,n=D,s=F;const _=_d(u);_.forEach(J=>{n[J]=t[J]+1,s[J]=1});const B=Wd(t,n,s),U=B.filter((J,oe)=>_.indexOf(oe)===-1),Y=s.every(J=>J===1);if(Y)return K(st(p,t,B),U);const q=w.stridedSlice(p,t,n,s);return K(q,U)},y={x:p},b={begin:t,end:n,strides:s,beginMask:i,endMask:o,ellipsisMask:a,newAxisMask:c,shrinkAxisMask:u};return V.runKernelFunc(m,y,null,tT,b)}const $b=P({stridedSlice_:fU});function gU(e){const t=W(e,"x","tan"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.tan(t);return i([t]),o},n,null,va)}const Ub=P({tan_:gU});function Gr(e,t,n){if(yo(e),t!=null&&t.length!==2)throw new Error("tensor2d() requires shape to have two numbers");const s=Ni(e,n);if(s.length!==2&&s.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return Fr(e,t,s,n)}function ja(e,t,n){if(yo(e),t!=null&&t.length!==4)throw new Error("tensor4d() requires shape to have four numbers");const s=Ni(e,n);if(s.length!==4&&s.length!==1)throw new Error("tensor4d() requires values to be number[][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor4d() requires shape to be provided when `values` are a flat array");return Fr(e,t,s,n)}function yU(e,t,n){if(yo(e),t!=null&&t.length!==5)throw new Error("tensor5d() requires shape to have five numbers");const s=Ni(e,n);if(s.length!==5&&s.length!==1)throw new Error("tensor5d() requires values to be number[][][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor5d() requires shape to be provided when `values` are a flat array");return Fr(e,t,s,n)}function bU(e,t,n){if(yo(e),t!=null&&t.length!==6)throw new Error("tensor6d() requires shape to have six numbers");const s=Ni(e,n);if(s.length!==6&&s.length!==1)throw new Error("tensor6d() requires values to be number[][][][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor6d() requires shape to be provided when `values` are a flat array");return t=t||s,Fr(e,t,s,n)}function wU(e,t=1,n=!0){const s=W(e,"x","topk");if(s.rank===0)throw new Error("topk() expects the input to be of rank 1 or higher");const i=s.shape[s.shape.length-1];if(t>i)throw new Error(`'k' passed to topk() must be <= the last dimension (${i}) but got ${t}`);const o={x:s},a={k:t,sorted:n},[c,u]=V.runKernelFunc(p=>p.topk(s,t,n),o,null,nT,a);return{values:c,indices:u}}const Bb=P({topk_:wU});function LU(e,t=0,n=1,s,i){if(s!=null&&s==="bool")throw new Error("Unsupported data type $ { dtype }");const o=new Cb(t,n,s,!0,i),a=Qe(e,s);for(let c=0;c0,()=>"The input tensor must be at least 1D");const s={x:n},i={axis:t},[o,a]=V.runKernel(gd,s,i);return{values:o,indices:a}}const up=P({unique_:SU});function IU(e,t,n){const s=W(e,"x","unsortedSegmentSum"),i=W(t,"segmentIds","unsortedSegmentSum","int32");k(Ut(n),()=>"numSegments must be of dtype int");const o={x:s,segmentIds:i},a={numSegments:n},c=(u,p)=>{const m=u.unsortedSegmentSum(s,i,n);return p([i]),m};return V.runKernelFunc(c,o,null,dy,a)}const Mb=P({unsortedSegmentSum_:IU});function xU(e,t=0){const n=W(e,"x","unstack");k(t>=-n.shape.length&&t`Axis = ${t} is not in [-${n.shape.length}, ${n.shape.length})`),t<0&&(t+=n.shape.length);const s={value:n},i={axis:t},o=a=>a.unstack(n,t);return V.runKernelFunc(o,s,null,uy,i)}const _i=P({unstack_:xU});function xA(e,t=!0,n,s){return V.makeVariable(e,t,n,s)}function dp(e,t){const n=[];for(let o=0;o0,()=>"mask cannot be scalar"),dt(c.slice(o,o+a),i.shape,"mask's shape must match the first K dimensions of tensor's shape,");let u=1;for(let T=o;T"Shape mismatch in v and x");const u=Ne(1),p=Ce(u,c);let m=X(Ce(a,o),p);if(i){k(s!=null,()=>"When using zeroDebias: true, step is required.");const y=W(s,"step","movingAverage");m=_e(m,Ce(u,ii(c,y)))}return be(o,m)}const VU=P({movingAverage_:GU});function HU(e,t,n){const s=W(e,"indices","scatterND","int32"),i=W(t,"updates","scatterND");jy(i,s,n);const o=u=>u.scatterND(s,i,n),a={indices:s,updates:i},c={shape:n};return V.runKernelFunc(o,a,null,Qx,c)}const MA=P({scatterND_:HU});function YU(e,t,n,s){if(e.dtype!=="int32")throw new Error(`tf.sparseToDense() expects the indices to be int32 type, but the dtype was ${e.dtype}.`);if(e.rank>2)throw new Error(`sparseIndices should be a scalar, vector, or matrix, but got shape ${e.shape}.`);const i=e.rank>0?e.shape[0]:1,o=e.rank>1?e.shape[1]:1;if(n.length!==o)throw new Error(`outputShape has incorrect number of elements:, ${n.length}, should be: ${o}.`);const a=t.size;if(!(t.rank===0||t.rank===1&&a===i))throw new Error(`sparseValues has incorrect shape ${t.shape}, should be [] or [${i}]`);if(t.dtype!==s.dtype)throw new Error("sparseValues.dtype must match defaultValues.dtype")}function qU(e,t,n,s=0){const i=W(e,"sparseIndices","sparseToDense","int32"),o=W(t,"sparseValues","sparseToDense"),a=W(s,"defaultValue","sparseToDense",o.dtype);YU(i,o,n,a);const c={sparseIndices:i,sparseValues:o,defaultValue:a},u={outputShape:n};return V.runKernelFunc(p=>p.sparseToDense(i,o,n,a),c,null,eT,u)}const zb=P({sparseToDense_:qU});function jU(e,t){const n=W(t,"indices","gatherND","int32"),s=W(e,"x","gatherND"),i=a=>a.gatherND(s,n),o={params:s,indices:n};return V.runKernelFunc(i,o,null,Mx)}const PA=P({gatherND_:jU});function KU(e,t){if(t==null)return e.shape.slice();if(ot(e.shape,t))return t;if(e.shape.length===t.length){const n=[];for(let s=0;s`x has to be a floating point tensor since it's going to be scaled, but got a ${i.dtype} tensor instead.`),k(t>=0&&t<1,()=>`rate must be a float in the range [0, 1), but got ${t}.`),t===0)return e instanceof Q?i.clone():i;const o=KU(i,n),a=1-t,c=_e(Ba(be(_o(o,0,1,"float32",s),a)),a);return X(i,c)}const zA=P({dropout_:XU});function GA(e){return Math.floor(Math.pow(2,Math.ceil(Math.log(e)/Math.log(2))))}function Gb(e,t,n){const s=1-e%2,i=new Float32Array(e);for(let o=0;o1,()=>`inTopK() expects the predictions to be of rank 2 or higher, but got ${s.rank}`),k(s.rank-1===i.rank,()=>`predictions rank should be 1 larger than targets rank, but got predictions rank ${s.rank} and targets rank ${i.rank}`),dt(s.shape.slice(0,s.shape.length-1),i.shape,"predictions's shape should be align with the targets' shape, except the last dimension.");const o=s.shape[s.shape.length-1];k(n>0&&n<=o,()=>`'k' passed to inTopK() must be > 0 && <= the predictions last dimension (${o}), but got ${n}`);const a=await s.data(),c=await i.data(),[u,p]=[a.length/o,o],m=wn("bool",u);for(let y=0;yv.value-T.value),m[y]=0;for(let T=0;T`Error in conv2dDerFilter: input must be rank 4, but got shape ${c.shape}.`),k(u.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${u.shape}.`),k(n.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${n}.`);const p=o==="NHWC"?c.shape[3]:c.shape[1],m=o==="NHWC"?u.shape[3]:u.shape[1];k(p===n[2],()=>`Error in conv2dDerFilter: depth of input ${p}) must match input depth in filter (${n[2]}.`),k(m===n[3],()=>`Error in conv2dDerFilter: depth of dy (${m}) must match output depth for filter (${n[3]}).`),a!=null&&k(Ut(i),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${a} but got pad ${i}.`);const y=I=>{const T=1,v=rh(o),N=Oi(c.shape,n,s,T,i,a,!1,v);return I.conv2dDerFilter(c,u,N)},b={x:c,dy:u},w={strides:s,pad:i,dataFormat:o,dimRoundingMode:a};return V.runKernelFunc(y,b,null,Rx,w)}const Vb=P({conv2DBackpropFilter_:QU});function mp(e,t,n){if(n==null||n==="linear")return e;if(n==="relu")return X(e,qa(t));throw new Error(`Cannot compute gradient for fused activation ${n}.`)}function fp(e,t){let n=t;const s=on(e.shape,t.shape);return s.length>0&&(n=Ue(n,s)),K(n,e.shape)}function gp(e,t,n){if(t==="linear")return e;if(t==="relu")return Fi(e);if(t==="elu")return Oo(e);if(t==="relu6")return Db(e);if(t==="prelu")return gh(e,n);throw new Error(`Unknown fused activation ${t}.`)}const yp=(e,t)=>{const n=e>0;return!n||t==="linear"};function eB({x:e,filter:t,strides:n,pad:s,dataFormat:i="NHWC",dilations:o=[1,1],dimRoundingMode:a,bias:c,activation:u="linear",preluActivationWeights:p}){if(u=u||"linear",yp(V.state.gradientDepth,u)===!1){let _=er(e,t,n,s,i,o,a);return c!=null&&(_=be(_,c)),gp(_,u,p)}const m=W(e,"x","conv2d"),y=W(t,"filter","conv2d");let b=m,w=!1;m.rank===3&&(w=!0,b=K(m,[1,m.shape[0],m.shape[1],m.shape[2]])),k(b.rank===4,()=>`Error in fused conv2d: input must be rank 4, but got rank ${b.rank}.`),k(y.rank===4,()=>`Error in fused conv2d: filter must be rank 4, but got rank ${y.rank}.`),a!=null&&k(Ut(s),()=>`Error in fused conv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`),k(b.shape[3]===y.shape[2],()=>`Error in conv2d: depth of input (${b.shape[3]}) must match input depth for filter ${y.shape[2]}.`),k(rn(n,o),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),k(i==="NHWC",()=>`Error in conv2d: got dataFormat of ${i} but only NHWC is currently supported.`);const I=Oi(b.shape,y.shape,n,o,s,a);let T;c!=null&&(T=W(c,"bias","fused conv2d"),[T]=Bt(T,m),nt(I.outShape,T.shape));let v;p!=null&&(v=W(p,"prelu weights","fused conv2d"));const N=(_,B)=>{const[U,Y,q,J]=B,oe=mp(_,q,u);k($r(o),()=>`Error in gradient of fused conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${o}'`);const ce=gb(Y.shape,oe,U,n,s),ue=Vb(Y,oe,U.shape,n,s),he=[ce,ue];if(J!=null){const pe=fp(J,oe);he.push(pe)}return he},E=_=>{const B=_.fusedConv2d({input:b,filter:y,convInfo:I,bias:T,activation:u,preluActivationWeights:v});return B},D={x:b,filter:y,bias:T,preluActivationWeights:v},F={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a,activation:u};if(c==null){const _=Di((B,U,Y)=>{let q=V.runKernelFunc(E,D,null,fy,F);return Y([U,B,q]),w&&(q=K(q,[q.shape[1],q.shape[2],q.shape[3]])),{value:q,gradFunc:N}});return _(b,y)}else{const _=Di((B,U,Y,q)=>{let J=V.runKernelFunc(E,D,null,fy,F);return q([U,B,J,Y]),w&&(J=K(J,[J.shape[1],J.shape[2],J.shape[3]])),{value:J,gradFunc:N}});return _(b,y,T)}}const Hb=P({fusedConv2d_:eB});function tB(e,t,n,s){let i=e;e.rank===3&&(i=K(e,[1,e.shape[0],e.shape[1],e.shape[2]]));let o=t;o.rank===3&&(o=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const a=u=>u.depthwiseConv2DDerFilter(i,o,s),c={x:i,dy:o};return V.runKernelFunc(a,c,null,Fx)}const VA=P({depthwiseConv2dNativeBackpropFilter_:tB});function nB(e,t,n,s){let i=t,o=!1;t.rank===3&&(o=!0,i=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const a=p=>p.depthwiseConv2DDerInput(i,n,s),c={dy:i},u=V.runKernelFunc(a,c,null,_x);return o?K(u,[u.shape[1],u.shape[2],u.shape[3]]):u}const HA=P({depthwiseConv2dNativeBackpropInput_:nB});function sB({x:e,filter:t,strides:n,pad:s,dataFormat:i="NHWC",dilations:o=[1,1],dimRoundingMode:a,bias:c,activation:u="linear",preluActivationWeights:p}){if(yp(V.state.gradientDepth,u)===!1){let _=Co(e,t,n,s,i,o,a);return c!=null&&(_=be(_,c)),gp(_,u,p)}const m=W(e,"x","depthwiseConv2d"),y=W(t,"filter","depthwiseConv2d");let b=m,w=!1;m.rank===3&&(w=!0,b=K(m,[1,m.shape[0],m.shape[1],m.shape[2]])),k(b.rank===4,()=>`Error in fused depthwiseConv2d: input must be rank 4, but got rank ${b.rank}.`),k(y.rank===4,()=>`Error in fused depthwiseConv2d: filter must be rank 4, but got rank ${y.rank}.`),k(b.shape[3]===y.shape[2],()=>`Error in fused depthwiseConv2d: number of input channels (${b.shape[3]}) must match the inChannels dimension in filter ${y.shape[2]}.`),o==null&&(o=[1,1]),k(rn(n,o),()=>`Error in fused depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),a!=null&&k(Ut(s),()=>`Error in fused depthwiseConv2d: pad must be an integer when using dimRoundingMode ${a} but got pad ${s}.`);const I=Oi(b.shape,y.shape,n,o,s,a,!0);let T;c!=null&&(T=W(c,"bias","fused conv2d"),[T]=Bt(T,m),nt(I.outShape,T.shape));let v;p!=null&&(v=W(p,"prelu weights","fused depthwiseConv2d"));const N=(_,B)=>{k($r(o),()=>`Error in gradient of fused depthwiseConv2d: dilation rates greater than 1 are not yet supported. Got dilations '${o}'`);const[U,Y,q,J]=B,oe=mp(_,q,u),ce=HA(Y.shape,oe,U,I),ue=VA(Y,oe,U.shape,I);if(J!=null){const he=fp(T,oe);return[ce,ue,he]}return[ce,ue]},E=_=>{const B=_.fusedDepthwiseConv2D({input:b,filter:y,convInfo:I,bias:T,activation:u,preluActivationWeights:v});return B},D={x:b,filter:y,bias:T,preluActivationWeights:v},F={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a,activation:u};if(c==null){const _=Di((B,U,Y)=>{let q=V.runKernelFunc(E,D,null,gy,F);return Y([U,B,q]),w&&(q=K(q,[q.shape[1],q.shape[2],q.shape[3]])),{value:q,gradFunc:N}});return _(b,y)}else{const _=Di((B,U,Y,q)=>{let J=V.runKernelFunc(E,D,null,gy,F);return q([U,B,J,Y]),w&&(J=K(J,[J.shape[1],J.shape[2],J.shape[3]])),{value:J,gradFunc:N}});return _(b,y,T)}}const YA=P({fusedDepthwiseConv2d_:sB});function iB({a:e,b:t,transposeA:n=!1,transposeB:s=!1,bias:i,activation:o="linear",preluActivationWeights:a}){if(yp(V.state.gradientDepth,o)===!1){let J=at(e,t,n,s);return i!=null&&(J=be(J,i)),gp(J,o,a)}let c=W(e,"a","fused matMul"),u=W(t,"b","fused matMul");[c,u]=Bt(c,u);const p=n?c.shape[c.rank-2]:c.shape[c.rank-1],m=s?u.shape[u.rank-1]:u.shape[u.rank-2],y=n?c.shape[c.rank-1]:c.shape[c.rank-2],b=s?u.shape[u.rank-2]:u.shape[u.rank-1],w=c.shape.slice(0,-2),I=u.shape.slice(0,-2),T=we(w),v=we(I);k(c.rank>=2&&u.rank>=2&&c.rank===u.rank,()=>`Error in fused matMul: inputs must have the same rank of at least 2, got ranks ${c.rank} and ${u.rank}.`),k(ot(w,I),()=>`Error in fused matMul: outer dimensions (${w}) and (${I}) of Tensors with shapes ${c.shape} and ${u.shape} must match.`),k(p===m,()=>`Error in fused matMul: inner shapes (${p}) and (${m}) of Tensors with shapes ${c.shape} and ${u.shape} and transposeA=${n} and transposeB=${s} must match.`);const N=c.shape.slice(0,-2).concat([y,b]),E=n?K(c,[T,p,y]):K(c,[T,y,p]),D=s?K(u,[v,b,m]):K(u,[v,m,b]);let F;i!=null&&(F=W(i,"bias","fused matMul"),[F]=Bt(F,c),nt(N,F.shape));let _;a!=null&&(_=W(a,"prelu weights","fused matMul"));const B=(J,oe)=>{const[ce,ue,he,pe]=oe,le=mp(K(J,he.shape),he,o);let ye,me;if(!n&&!s?(ye=at(le,ue,!1,!0),me=at(ce,le,!0,!1)):!n&&s?(ye=at(le,ue,!1,!1),me=at(le,ce,!0,!1)):n&&!s?(ye=at(ue,le,!1,!0),me=at(ce,le,!1,!1)):(ye=at(ue,le,!0,!0),me=at(le,ce,!0,!0)),i!=null){const Ie=fp(pe,le);return[ye,me,Ie]}else return[ye,me]},U=J=>{const oe=J.fusedBatchMatMul({a:E,b:D,transposeA:n,transposeB:s,bias:F,activation:o,preluActivationWeights:_});return oe},Y={a:E,b:D,bias:F,preluActivationWeights:_},q={transposeA:n,transposeB:s,activation:o};if(i==null){const J=Di((oe,ce,ue)=>{const he=V.runKernelFunc(U,Y,null,my,q);return ue([oe,ce,he]),{value:K(he,N),gradFunc:B}});return J(E,D)}else{const J=Di((oe,ce,ue,he)=>{const pe=V.runKernelFunc(U,Y,null,my,q);return he([oe,ce,pe,ue]),{value:K(pe,N),gradFunc:B}});return J(E,D,F)}}const bp=P({fusedMatMul_:iB});var rB=Object.freeze({__proto__:null,conv2d:Hb,depthwiseConv2d:YA,matMul:bp});function oB(e){return Gb(e,.54,.46)}const aB=P({hammingWindow_:oB});function cB(e){return Gb(e,.5,.5)}const qA=P({hannWindow_:cB});function lB(e,t,n,s=!1,i=0){let o=0;const a=[];for(;o+t<=e.size;)a.push(st(e,o,t)),o+=n;if(s)for(;o`Error in cropAndResize: image must be rank 4,but got rank ${a.rank}.`),k(c.rank===2&&c.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${p},4] but had shape ${c.shape}.`),k(u.rank===1&&u.shape[0]===p,()=>`Error in cropAndResize: boxInd must be have size [${p}] but had shape ${c.shape}.`),k(s.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${s.length}.`),k(s[0]>=1&&s[1]>=1,()=>`cropSize must be atleast [1,1], but was ${s}`),k(i==="bilinear"||i==="nearest",()=>`method must be bilinear or nearest, but was ${i}`);const m=I=>I.cropAndResize(a,c,u,s,i,o),y={image:a,boxes:c,boxInd:u},b={method:i,extrapolationValue:o,cropSize:s},w=V.runKernelFunc(m,y,null,Dx,b);return w}const pB=P({cropAndResize_:dB});function mB(e){const t=W(e,"image","flipLeftRight","float32");k(t.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${t.rank}.`);const n={image:t},s=V.runKernel(rd,n,{});return s}const fB=P({flipLeftRight_:mB});function gB(e,t,n=0,s=.5){const i=W(e,"image","rotateWithOffset","float32");k(i.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${i.rank}.`);const o={image:i},a={radians:t,fillValue:n,center:s},c=V.runKernel(bd,o,a);return c}const yB=P({rotateWithOffset_:gB});function Ka(e,t,n,s,i,o){s==null&&(s=.5),i==null&&(i=Number.NEGATIVE_INFINITY),o==null&&(o=0);const a=e.shape[0];return n=Math.min(n,a),k(0<=s&&s<=1,()=>`iouThreshold must be in [0, 1], but was '${s}'`),k(e.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${e.rank}'`),k(e.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${e.shape[1]}`),k(t.rank===1,()=>"scores must be a 1D tensor"),k(t.shape[0]===a,()=>`scores has incompatible shape with boxes. Expected ${a}, but was ${t.shape[0]}`),k(0<=o&&o<=1,()=>`softNmsSigma must be in [0, 1], but was '${o}'`),{maxOutputSize:n,iouThreshold:s,scoreThreshold:i,softNmsSigma:o}}function bB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY){const o=W(e,"boxes","nonMaxSuppression"),a=W(t,"scores","nonMaxSuppression"),c=Ka(o,a,n,s,i);n=c.maxOutputSize,s=c.iouThreshold,i=c.scoreThreshold;const u={maxOutputSize:n,iouThreshold:s,scoreThreshold:i};return V.runKernelFunc(p=>p.nonMaxSuppression(o,a,n,s,i),{boxes:o,scores:a},null,Kg,u)}const wB=P({nonMaxSuppression_:bB});function LB(e,t,n){const s=SB(e,t,n),i=s<0?-(s+1):s;e.splice(i,0,t)}function SB(e,t,n){return xB(e,t,n||IB)}function IB(e,t){return e>t?1:e>>1);const c=n(t,e[o]);c>0?s=o+1:(i=o,a=!c)}return a?s:-s-1}function wp(e,t,n,s,i){return Yb(e,t,n,s,i,0).selectedIndices}function Lp(e,t,n,s,i,o){return Yb(e,t,n,s,i,0,!1,o,!0)}function Sp(e,t,n,s,i,o){return Yb(e,t,n,s,i,o,!0)}function Yb(e,t,n,s,i,o,a=!1,c=!1,u=!1){const p=[];for(let v=0;vi&&p.push({score:t[v],boxIndex:v,suppressBeginIndex:0});p.sort(KA);const m=o>0?-.5/o:0,y=[],b=[];for(;y.length0;){const v=p.pop(),{score:N,boxIndex:E,suppressBeginIndex:D}=v;if(N=D;--_){const B=TB(e,E,y[_]);if(B>=s){F=!0;break}if(v.score=v.score*AB(s,m,B),v.score<=i)break}v.suppressBeginIndex=y.length,F||(v.score===N?(y.push(E),b.push(v.score)):v.score>i&&LB(p,v,KA))}const w=y.length,I=n-w;c&&I>0&&(y.push(...new Array(I).fill(0)),b.push(...new Array(I).fill(0)));const T={selectedIndices:rs(y,"int32")};return a&&(T.selectedScores=rs(b,"float32")),u&&(T.validOutputs=Ne(w,"int32")),T}function TB(e,t,n){const s=e.subarray(t*4,t*4+4),i=e.subarray(n*4,n*4+4),o=Math.min(s[0],s[2]),a=Math.min(s[1],s[3]),c=Math.max(s[0],s[2]),u=Math.max(s[1],s[3]),p=Math.min(i[0],i[2]),m=Math.min(i[1],i[3]),y=Math.max(i[0],i[2]),b=Math.max(i[1],i[3]),w=(c-o)*(u-a),I=(y-p)*(b-m);if(w<=0||I<=0)return 0;const T=Math.max(o,p),v=Math.max(a,m),N=Math.min(c,y),E=Math.min(u,b),D=Math.max(N-T,0)*Math.max(E-v,0);return D/(w+I-D)}function AB(e,t,n){const s=Math.exp(t*n*n);return n<=e?s:0}function KA(e,t){return e.score-t.score||e.score===t.score&&t.boxIndex-e.boxIndex}async function vB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY){const o=W(e,"boxes","nonMaxSuppressionAsync"),a=W(t,"scores","nonMaxSuppressionAsync"),c=Ka(o,a,n,s,i);n=c.maxOutputSize,s=c.iouThreshold,i=c.scoreThreshold;const u=await Promise.all([o.data(),a.data()]),p=u[0],m=u[1],y=wp(p,m,n,s,i);return o!==e&&o.dispose(),a!==t&&a.dispose(),y}const NB=vB;function CB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=0){const a=W(e,"boxes","nonMaxSuppression"),c=W(t,"scores","nonMaxSuppression"),u=Ka(a,c,n,s,i,o);n=u.maxOutputSize,s=u.iouThreshold,i=u.scoreThreshold,o=u.softNmsSigma;const p={boxes:a,scores:c},m={maxOutputSize:n,iouThreshold:s,scoreThreshold:i,softNmsSigma:o},y=V.runKernel(ud,p,m);return{selectedIndices:y[0],selectedScores:y[1]}}const RB=P({nonMaxSuppressionWithScore_:CB});async function OB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=0){const a=W(e,"boxes","nonMaxSuppressionAsync"),c=W(t,"scores","nonMaxSuppressionAsync"),u=Ka(a,c,n,s,i,o);n=u.maxOutputSize,s=u.iouThreshold,i=u.scoreThreshold,o=u.softNmsSigma;const p=await Promise.all([a.data(),c.data()]),m=p[0],y=p[1],b=Sp(m,y,n,s,i,o);return a!==e&&a.dispose(),c!==t&&c.dispose(),b}const EB=OB;function DB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=!1){const a=W(e,"boxes","nonMaxSuppression"),c=W(t,"scores","nonMaxSuppression"),u=Ka(a,c,n,s,i,null),p=u.maxOutputSize,m=u.iouThreshold,y=u.scoreThreshold,b={boxes:a,scores:c},w={maxOutputSize:p,iouThreshold:m,scoreThreshold:y,padToMaxOutputSize:o},I=V.runKernel(hd,b,w);return{selectedIndices:I[0],validOutputs:I[1]}}const kB=P({nonMaxSuppressionPadded_:DB});async function FB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=!1){const a=W(e,"boxes","nonMaxSuppressionAsync"),c=W(t,"scores","nonMaxSuppressionAsync"),u=Ka(a,c,n,s,i,null),p=u.maxOutputSize,m=u.iouThreshold,y=u.scoreThreshold,[b,w]=await Promise.all([a.data(),c.data()]),I=Lp(b,w,p,m,y,o);return a!==e&&a.dispose(),c!==t&&c.dispose(),I}const _B=FB;function WB(e,t,n=!1){const s=W(e,"images","resizeBilinear");k(s.rank===3||s.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${s.rank}.`),k(t.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${t}.`);let i=s,o=!1;s.rank===3&&(o=!0,i=K(s,[1,s.shape[0],s.shape[1],s.shape[2]]));const[a,c]=t,u=(b,w)=>(w([i]),b.resizeBilinear(i,a,c,n)),p={images:i},m={alignCorners:n,size:t},y=V.runKernelFunc(u,p,null,sy,m);return o?K(y,[y.shape[1],y.shape[2],y.shape[3]]):y}const XA=P({resizeBilinear_:WB});function $B(e,t,n=!1){const s=W(e,"images","resizeNearestNeighbor");k(s.rank===3||s.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${s.rank}.`),k(t.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${t}.`),k(s.dtype==="float32"||s.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let i=s,o=!1;s.rank===3&&(o=!0,i=K(s,[1,s.shape[0],s.shape[1],s.shape[2]]));const[a,c]=t,u={images:i},p={alignCorners:n,size:t},m=(b,w)=>(w([i]),b.resizeNearestNeighbor(i,a,c,n)),y=V.runKernelFunc(m,u,null,ny,p);return o?K(y,[y.shape[1],y.shape[2],y.shape[3]]):y}const JA=P({resizeNearestNeighbor_:$B});function UB(e,t,n){k(t%1===0,()=>`bandPart(): numLower must be an integer, got ${t}.`),k(n%1===0,()=>`bandPart(): numUpper must be an integer, got ${n}.`);const s=W(e,"a","bandPart");k(s.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${s.rank}.`);const i=s.shape,[o,a]=s.shape.slice(-2);if(!(t<=o))throw new Error(`bandPart(): numLower (${t}) must not be greater than the number of rows (${o}).`);if(!(n<=a))throw new Error(`bandPart(): numUpper (${n}) must not be greater than the number of columns (${a}).`);t<0&&(t=o),n<0&&(n=a);const c=K(yh(0,o,1,"int32"),[-1,1]),u=yh(0,a,1,"int32"),p=Ce(c,u),m=Bs(Mr(p,Ne(+t,"int32")),tr(p,Ne(-n,"int32"))),y=ct([o,a],s.dtype);return K(as(_i(K(s,[-1,o,a])).map(b=>$n(m,b,y))),i)}const BB=P({bandPart_:UB});function MB(e){let t;if(Array.isArray(e)){t=!1,k(e!=null&&e.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");const i=e[0].shape[0];for(let o=1;o`Gram-Schmidt: Non-unique lengths found in the input vectors: (${e[o].shape[0]} vs. ${i})`)}else t=!0,e=os(e,e.shape[0],0).map(i=>zr(i,[0]));k(e.length<=e[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${e.length}) exceeds number of dimensions (${e[0].shape[0]}).`);const n=[],s=e;for(let i=0;i{let o=s[i];if(i>0)for(let a=0;a=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${e.rank}`),e.rank===2)return ZA(e,t);{const n=e.shape.slice(0,e.shape.length-2).reduce((u,p)=>u*p),s=_i(K(e,[n,e.shape[e.shape.length-2],e.shape[e.shape.length-1]]),0),i=[],o=[];s.forEach(u=>{const[p,m]=ZA(u,t);i.push(p),o.push(m)});const a=K(as(i,0),e.shape),c=K(as(o,0),e.shape);return[a,c]}}function ZA(e,t=!1){return V.tidy(()=>{k(e.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${e.shape.length}D Tensor.`);const n=e.shape[0],s=e.shape[1];let i=Kd(n),o=Wr(e);const a=Gr([[1]],[1,1]);let c=Wr(a);const u=n>=s?s:n;for(let p=0;p{const w=st(o,[p,p],[n-p,1]),I=pp(w),T=st(o,[p,p],[1,1]),v=$n(Ts(T,0),Gr([[-1]]),Gr([[1]])),N=Ce(T,X(v,I)),E=_e(w,N);E.shape[0]===1?c=Wr(a):c=Mt([a,st(E,[1,0],[E.shape[0]-1,E.shape[1]])],0);const D=Pt(_e(at(v,N),I)),F=st(o,[p,0],[n-p,s]),_=X(D,c),B=Pe(c);if(p===0)o=Ce(F,at(_,at(B,F)));else{const q=Ce(F,at(_,at(B,F)));o=Mt([st(o,[0,0],[p,s]),q],0)}const U=Pe(_),Y=st(i,[0,p],[n,i.shape[1]-p]);if(p===0)i=Ce(Y,at(at(Y,c),U));else{const q=Ce(Y,at(at(Y,c),U));i=Mt([st(i,[0,0],[n,p]),q],1)}return[c,o,i]}),qe([m,y,b])}return!t&&n>s&&(i=st(i,[0,0],[n,s]),o=st(o,[0,0],[s,s])),[i,o]})}const GB=P({qr_:zB});(function(e){e[e.NONE=0]="NONE",e[e.MEAN=1]="MEAN",e[e.SUM=2]="SUM",e[e.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(r.Reduction||(r.Reduction={}));function VB(e,t,n=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const s=W(e,"losses","computeWeightedLoss");let i=null;t!=null&&(i=W(t,"weights","computeWeightedLoss"));const o=i==null?s:X(s,i);if(n===r.Reduction.NONE)return o;if(n===r.Reduction.SUM)return Ue(o);if(n===r.Reduction.MEAN){if(i==null)return zt(o);{const a=s.size/i.size,c=_e(Ue(o),Ue(i));return a>1?_e(c,Ne(a)):c}}if(n===r.Reduction.SUM_BY_NONZERO_WEIGHTS){if(i==null)return _e(Ue(o),Ne(s.size));{const a=X(i,si(s.shape)),c=ve(Ue(Pr(a,Ne(0))),"float32");return _e(Ue(o),c)}}throw Error(`Unknown reduction: ${n}`)}const nr=P({computeWeightedLoss_:VB});function HB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const i=W(e,"labels","absoluteDifference"),o=W(t,"predictions","absoluteDifference");let a=null;n!=null&&(a=W(n,"weights","absoluteDifference")),dt(i.shape,o.shape,"Error in absoluteDifference: ");const c=sn(Ce(i,o));return nr(c,a,s)}const YB=P({absoluteDifference_:HB});function qB(e,t,n,s,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","cosineDistance"),a=W(t,"predictions","cosineDistance");let c=null;s!=null&&(c=W(s,"weights","cosineDistance")),dt(o.shape,a.shape,"Error in cosineDistance: ");const u=Ne(1),p=Ce(u,Ue(X(o,a),n,!0));return nr(p,c,i)}const jB=P({cosineDistance_:qB});function KB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let i=W(e,"labels","hingeLoss");const o=W(t,"predictions","hingeLoss");let a=null;n!=null&&(a=W(n,"weights","hingeLoss")),dt(i.shape,o.shape,"Error in hingeLoss: ");const c=Ne(1);i=Ce(X(Ne(2),i),c);const u=Fi(Ce(c,X(i,o)));return nr(u,a,s)}const XB=P({hingeLoss_:KB});function JB(e,t,n,s=1,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","huberLoss"),a=W(t,"predictions","huberLoss");let c=null;n!=null&&(c=W(n,"weights","huberLoss")),dt(o.shape,a.shape,"Error in huberLoss: ");const u=Ne(s),p=sn(Ce(a,o)),m=Eo(p,u),y=Ce(p,m),b=be(X(Ne(.5),Lt(m)),X(u,y));return nr(b,c,i)}const ZB=P({huberLoss_:JB});function QB(e,t,n,s=1e-7,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","logLoss"),a=W(t,"predictions","logLoss");let c=null;n!=null&&(c=W(n,"weights","logLoss")),dt(o.shape,a.shape,"Error in logLoss: ");const u=Ne(1),p=Ne(s),m=Pt(X(o,is(be(a,p)))),y=X(Ce(u,o),is(be(Ce(u,a),p))),b=Ce(m,y);return nr(b,c,i)}const eM=P({logLoss_:QB});function tM(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const i=W(e,"labels","meanSquaredError"),o=W(t,"predictions","meanSquaredError");let a=null;n!=null&&(a=W(n,"weights","meanSquaredError")),dt(i.shape,o.shape,"Error in meanSquaredError: ");const c=Sh(i,o);return nr(c,a,s)}const nM=P({meanSquaredError_:tM});function sM(e,t){const n=W(e,"labels","sigmoidCrossEntropyWithLogits"),s=W(t,"logits","sigmoidCrossEntropyWithLogits");dt(n.shape,s.shape,"Error in sigmoidCrossEntropyWithLogits: ");const i=Fi(s),o=X(s,n),a=Jd(xs(Pt(sn(s))));return be(Ce(i,o),a)}function iM(e,t,n,s=0,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=W(e,"multiClassLabels","sigmoidCrossEntropy");const a=W(t,"logits","sigmoidCrossEntropy");let c=null;if(n!=null&&(c=W(n,"weights","sigmoidCrossEntropy")),dt(o.shape,a.shape,"Error in sigmoidCrossEntropy: "),s>0){const p=Ne(s),m=Ne(1),y=Ne(.5);o=be(X(o,Ce(m,p)),X(y,p))}const u=sM(o,a);return nr(u,c,i)}const rM=P({sigmoidCrossEntropy_:iM});function oM(e,t,n=-1){if(n===-1&&(n=t.rank-1),n!==t.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${t.rank} and dim was ${n}`);const s=Di((i,o,a)=>{const c=!0,u=vb(o,[n],c),p=Ce(ve(o,"float32"),u);a([i,p]);const m=Pt(X(p,i)),y=Ue(m,[n]),b=(w,I)=>{const[T,v]=I,N=En(w.shape,[n]);return[X(K(w,N),Ce(ve(T,"float32"),xs(v))),X(K(w,N),Ce(xs(v),ve(T,"float32")))]};return{value:y,gradFunc:b}});return s(e,t)}function aM(e,t,n,s=0,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=W(e,"onehotLabels","softmaxCrossEntropy");const a=W(t,"logits","softmaxCrossEntropy");let c=null;if(n!=null&&(c=W(n,"weights","softmaxCrossEntropy")),dt(o.shape,a.shape,"Error in softmaxCrossEntropy: "),s>0){const p=Ne(s),m=Ne(1),y=Ne(o.shape[1]);o=be(X(o,Ce(m,p)),_e(p,y))}const u=oM(o,a);return nr(u,c,i)}const cM=P({softmaxCrossEntropy_:aM});const lM={fft:wh,ifft:Ya,rfft:Lh,irfft:hp},hM={hammingWindow:aB,hannWindow:qA,frame:jA,stft:uB},Vr={flipLeftRight:fB,resizeNearestNeighbor:JA,resizeBilinear:XA,rotateWithOffset:yB,cropAndResize:pB,nonMaxSuppression:wB,nonMaxSuppressionAsync:NB,nonMaxSuppressionWithScore:RB,nonMaxSuppressionWithScoreAsync:EB,nonMaxSuppressionPadded:kB,nonMaxSuppressionPaddedAsync:_B},QA={bandPart:BB,gramSchmidt:PB,qr:GB},uM={absoluteDifference:YB,computeWeightedLoss:nr,cosineDistance:jB,hingeLoss:XB,huberLoss:ZB,logLoss:eM,meanSquaredError:nM,sigmoidCrossEntropy:rM,softmaxCrossEntropy:cM};class sr extends Ao{minimize(e,t=!1,n){const{value:s,grads:i}=this.computeGradients(e,n);if(n!=null){const o=n.map(a=>({name:a.name,tensor:i[a.name]}));this.applyGradients(o)}else this.applyGradients(i);return qe(i),t?s:(s.dispose(),null)}get iterations(){return this.iterations_==null&&(this.iterations_=0),this.iterations_}incrementIterations(){this.iterations_=this.iterations+1}computeGradients(e,t){return Ab(e,t)}dispose(){this.iterations_!=null&&qe(this.iterations_)}async saveIterations(){return this.iterations_==null&&(this.iterations_=0),{name:"iter",tensor:Ne(this.iterations_,"int32")}}async getWeights(){throw new Error("getWeights() is not implemented for this optimizer yet.")}async setWeights(e){throw new Error(`setWeights() is not implemented for this optimizer class ${this.getClassName()}`)}async extractIterations(e){return this.iterations_=(await e[0].tensor.data())[0],e.slice(1)}}Object.defineProperty(sr,Symbol.hasInstance,{value:e=>e.minimize!=null&&e.computeGradients!=null&&e.applyGradients!=null});class xh extends sr{constructor(e,t,n=null){super();this.learningRate=e,this.rho=t,this.epsilon=n,this.accumulatedGrads=[],this.accumulatedUpdates=[],n==null&&(this.epsilon=V.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n],o=!1;this.accumulatedGrads[s]==null&&(this.accumulatedGrads[s]={originalName:`${n}/accum_grad`,variable:ee(()=>et(i).variable(o))}),this.accumulatedUpdates[s]==null&&(this.accumulatedUpdates[s]={originalName:`${n}/accum_var`,variable:ee(()=>et(i).variable(o))});const a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;const c=this.accumulatedGrads[s].variable,u=this.accumulatedUpdates[s].variable;ee(()=>{const p=be(X(c,this.rho),X(Lt(a),1-this.rho)),m=X(_e(Sn(be(u,this.epsilon)),Sn(be(c,this.epsilon))),a),y=be(X(u,this.rho),X(Lt(m),1-this.rho));c.assign(p),u.assign(y);const b=be(X(m,-this.learningRate),i);i.assign(b)})}),this.incrementIterations()}dispose(){this.accumulatedUpdates!=null&&(qe(this.accumulatedGrads.map(e=>e.variable)),qe(this.accumulatedUpdates.map(e=>e.variable)))}async getWeights(){const e=[...this.accumulatedGrads,...this.accumulatedUpdates];return[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=e.length/2,n=!1;this.accumulatedGrads=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedUpdates=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)}))}getConfig(){return{learningRate:this.learningRate,rho:this.rho,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.rho,t.epsilon)}}xh.className="Adadelta",ge(xh);class Th extends sr{constructor(e,t=.1){super();this.learningRate=e,this.initialAccumulatorValue=t,this.accumulatedGrads=[]}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n];if(this.accumulatedGrads[s]==null){const c=!1;this.accumulatedGrads[s]={originalName:`${n}/accumulator`,variable:ee(()=>hh(i.shape,this.initialAccumulatorValue).variable(c))}}const o=Array.isArray(e)?e[s].tensor:e[n];if(o==null)return;const a=this.accumulatedGrads[s].variable;ee(()=>{const c=be(a,Lt(o));a.assign(c);const u=be(X(_e(o,Sn(be(c,V.backend.epsilon()))),-this.learningRate),i);i.assign(u)})}),this.incrementIterations()}dispose(){this.accumulatedGrads!=null&&qe(this.accumulatedGrads.map(e=>e.variable))}async getWeights(){return[await this.saveIterations()].concat(this.accumulatedGrads.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=!1;this.accumulatedGrads=e.map(n=>({originalName:n.name,variable:n.tensor.variable(t)}))}getConfig(){return{learningRate:this.learningRate,initialAccumulatorValue:this.initialAccumulatorValue}}static fromConfig(e,t){return new e(t.learningRate,t.initialAccumulatorValue)}}Th.className="Adagrad",ge(Th);class Ah extends sr{constructor(e,t,n,s=null){super();this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.accumulatedFirstMoment=[],this.accumulatedSecondMoment=[],ee(()=>{this.accBeta1=Ne(t).variable(),this.accBeta2=Ne(n).variable()}),s==null&&(this.epsilon=V.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);ee(()=>{const n=Ce(1,this.accBeta1),s=Ce(1,this.accBeta2);t.forEach((i,o)=>{const a=V.registeredVariables[i],c=!1;this.accumulatedFirstMoment[o]==null&&(this.accumulatedFirstMoment[o]={originalName:`${i}/m`,variable:ee(()=>et(a).variable(c))}),this.accumulatedSecondMoment[o]==null&&(this.accumulatedSecondMoment[o]={originalName:`${i}/v`,variable:ee(()=>et(a).variable(c))});const u=Array.isArray(e)?e[o].tensor:e[i];if(u==null)return;const p=this.accumulatedFirstMoment[o].variable,m=this.accumulatedSecondMoment[o].variable,y=be(X(p,this.beta1),X(u,1-this.beta1)),b=be(X(m,this.beta2),X(Lt(u),1-this.beta2)),w=_e(y,n),I=_e(b,s);p.assign(y),m.assign(b);const T=be(X(_e(w,be(Sn(I),this.epsilon)),-this.learningRate),a);a.assign(T)}),this.accBeta1.assign(X(this.accBeta1,this.beta1)),this.accBeta2.assign(X(this.accBeta2,this.beta2))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.accBeta2.dispose(),this.accumulatedFirstMoment!=null&&qe(this.accumulatedFirstMoment.map(e=>e.variable)),this.accumulatedSecondMoment!=null&&qe(this.accumulatedSecondMoment.map(e=>e.variable))}async getWeights(){const e=[...this.accumulatedFirstMoment,...this.accumulatedSecondMoment];return[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e),ee(()=>{this.accBeta1.assign(ii(this.beta1,this.iterations_+1)),this.accBeta2.assign(ii(this.beta2,this.iterations_+1))});const t=e.length/2,n=!1;this.accumulatedFirstMoment=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedSecondMoment=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)}))}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon)}}Ah.className="Adam",ge(Ah);class vh extends sr{constructor(e,t,n,s=null,i=0){super();this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.decay=i,this.accumulatedFirstMoment=[],this.accumulatedWeightedInfNorm=[],ee(()=>{this.iteration=Ne(0).variable(),this.accBeta1=Ne(t).variable()}),s==null&&(this.epsilon=V.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);ee(()=>{const n=Ce(1,this.accBeta1),s=_e(-this.learningRate,be(X(this.iteration,this.decay),1));t.forEach((i,o)=>{const a=V.registeredVariables[i],c=!1;this.accumulatedFirstMoment[o]==null&&(this.accumulatedFirstMoment[o]={originalName:`${i}/m`,variable:et(a).variable(c)}),this.accumulatedWeightedInfNorm[o]==null&&(this.accumulatedWeightedInfNorm[o]={originalName:`${i}/v`,variable:et(a).variable(c)});const u=Array.isArray(e)?e[o].tensor:e[i];if(u==null)return;const p=this.accumulatedFirstMoment[o].variable,m=this.accumulatedWeightedInfNorm[o].variable,y=be(X(p,this.beta1),X(u,1-this.beta1)),b=X(m,this.beta2),w=sn(u),I=Us(b,w);p.assign(y),m.assign(I);const T=be(X(_e(s,n),_e(y,be(I,this.epsilon))),a);a.assign(T)}),this.iteration.assign(be(this.iteration,1)),this.accBeta1.assign(X(this.accBeta1,this.beta1))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.iteration.dispose(),this.accumulatedFirstMoment!=null&&qe(this.accumulatedFirstMoment.map(e=>e.variable)),this.accumulatedWeightedInfNorm!=null&&qe(this.accumulatedWeightedInfNorm.map(e=>e.variable))}async getWeights(){throw new Error("getWeights() is not implemented for Adamax yet.")}async setWeights(e){throw new Error("setWeights() is not implemented for Adamax yet.")}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon,decay:this.decay}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon,t.decay)}}vh.className="Adamax",ge(vh);class Xa extends sr{constructor(e){super();this.learningRate=e,this.setLearningRate(e)}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=Array.isArray(e)?e[s].tensor:e[n];if(i==null)return;const o=V.registeredVariables[n];ee(()=>{const a=be(X(this.c,i),o);o.assign(a)})}),this.incrementIterations()}setLearningRate(e){this.learningRate=e,this.c!=null&&this.c.dispose(),this.c=Rn(Ne(-e))}dispose(){this.c.dispose()}async getWeights(){return[await this.saveIterations()]}async setWeights(e){if(e=await this.extractIterations(e),e.length!==0)throw new Error("SGD optimizer does not have settable weights.")}getConfig(){return{learningRate:this.learningRate}}static fromConfig(e,t){return new e(t.learningRate)}}Xa.className="SGD",ge(Xa);class Nh extends Xa{constructor(e,t,n=!1){super(e);this.learningRate=e,this.momentum=t,this.useNesterov=n,this.accumulations=[],this.m=Ne(this.momentum)}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n];if(this.accumulations[s]==null){const c=!1;this.accumulations[s]={originalName:`${n}/momentum`,variable:ee(()=>et(i).variable(c))}}const o=this.accumulations[s].variable,a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;ee(()=>{let c;const u=be(X(this.m,o),a);this.useNesterov?c=be(X(this.c,be(a,X(u,this.m))),i):c=be(X(this.c,u),i),o.assign(u),i.assign(c)})}),this.incrementIterations()}dispose(){this.m.dispose(),this.accumulations!=null&&qe(this.accumulations.map(e=>e.variable))}setMomentum(e){this.momentum=e}async getWeights(){return[await this.saveIterations()].concat(this.accumulations.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=!1;this.accumulations=e.map(n=>({originalName:n.name,variable:n.tensor.variable(t)}))}getConfig(){return{learningRate:this.learningRate,momentum:this.momentum,useNesterov:this.useNesterov}}static fromConfig(e,t){return new e(t.learningRate,t.momentum,t.useNesterov)}}Nh.className="Momentum",ge(Nh);class Ch extends sr{constructor(e,t=.9,n=0,s=null,i=!1){super();if(this.learningRate=e,this.decay=t,this.momentum=n,this.epsilon=s,this.accumulatedMeanSquares=[],this.accumulatedMoments=[],this.accumulatedMeanGrads=[],this.centered=i,s==null&&(this.epsilon=V.backend.epsilon()),e==null)throw new Error("learningRate for RMSPropOptimizer must be defined.")}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n],o=!1;this.accumulatedMeanSquares[s]==null&&(this.accumulatedMeanSquares[s]={originalName:`${n}/rms`,variable:ee(()=>et(i).variable(o))}),this.accumulatedMoments[s]==null&&(this.accumulatedMoments[s]={originalName:`${n}/momentum`,variable:ee(()=>et(i).variable(o))}),this.accumulatedMeanGrads[s]==null&&this.centered&&(this.accumulatedMeanGrads[s]={originalName:`${n}/mg`,variable:ee(()=>et(i).variable(o))});const a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;const c=this.accumulatedMeanSquares[s].variable,u=this.accumulatedMoments[s].variable;ee(()=>{const p=be(X(c,this.decay),X(Lt(a),1-this.decay));if(this.centered){const m=this.accumulatedMeanGrads[s].variable,y=be(X(m,this.decay),X(a,1-this.decay)),b=_e(X(a,this.learningRate),Sn(Ce(p,be(Lt(y),this.epsilon)))),w=be(X(u,this.momentum),b);c.assign(p),m.assign(y),u.assign(w);const I=Ce(i,w);i.assign(I)}else{const m=be(X(c,this.decay),X(Lt(a),1-this.decay)),y=be(X(u,this.momentum),_e(X(a,this.learningRate),Sn(be(m,this.epsilon))));c.assign(m),u.assign(y);const b=Ce(i,y);i.assign(b)}})}),this.incrementIterations()}dispose(){this.accumulatedMeanSquares!=null&&qe(this.accumulatedMeanSquares.map(e=>e.variable)),this.accumulatedMeanGrads!=null&&this.centered&&qe(this.accumulatedMeanGrads.map(e=>e.variable)),this.accumulatedMoments!=null&&qe(this.accumulatedMoments.map(e=>e.variable))}async getWeights(){const e=[...this.accumulatedMeanSquares,...this.accumulatedMoments];return this.centered&&e.push(...this.accumulatedMeanGrads),[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=this.centered?e.length/3:e.length/2,n=!1;this.accumulatedMeanSquares=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedMoments=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.centered&&(this.accumulatedMeanGrads=e.slice(t*2,t*3).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})))}getConfig(){return{learningRate:this.learningRate,decay:this.decay,momentum:this.momentum,epsilon:this.epsilon,centered:this.centered}}static fromConfig(e,t){return new e(t.learningRate,t.decay,t.momentum,t.epsilon,t.centered)}}Ch.className="RMSProp",ge(Ch);class $o{static sgd(e){return new Xa(e)}static momentum(e,t,n=!1){return new Nh(e,t,n)}static rmsprop(e,t=.9,n=0,s=null,i=!1){return new Ch(e,t,n,s,i)}static adam(e=.001,t=.9,n=.999,s=null){return new Ah(e,t,n,s)}static adadelta(e=.001,t=.95,n=null){return new xh(e,t,n)}static adamax(e=.002,t=.9,n=.999,s=null,i=0){return new vh(e,t,n,s,i)}static adagrad(e,t=.1){return new Th(e,t)}}const Uo={sgd:$o.sgd,momentum:$o.momentum,adadelta:$o.adadelta,adagrad:$o.adagrad,rmsprop:$o.rmsprop,adamax:$o.adamax,adam:$o.adam};const dM=(()=>typeof requestAnimationFrame!="undefined"?requestAnimationFrame:typeof setImmediate!="undefined"?setImmediate:e=>e())();function Ip(){return new Promise(e=>dM(()=>e()))}function qb(e,t,n){const s=n*(typeof e=="number"?e:e[0]),i=t*(typeof e=="number"?e:e[1]);return[s,i]}function Rh(e,t,n,s=!0){let i=[];if(s)i=i.concat(t.slice(0)),i.push(e[0]/n),i=i.concat(e.slice(1));else{i=i.concat(e[0]);const o=t.length;for(let a=0;a=t*2+1||a%2===1?o.push(a):i.push(a);s.push(...i),s.push(0),s.push(...o)}return s}function Eh(e,t,n,s=!0){const i=[];s?i.push(e[0]/n):i.push(e[0]*n);for(let o=1;o{const a=[...i];a[n]=o;const c=st(e,s,a);return s[n]+=o,c})}function ow(e,t){const n=new Array(e.rank);for(let i=0;iD.value-E.value);const T=y*s,v=u.subarray(T,T+s),N=p.subarray(T,T+s);for(let E=0;E{const[n]=t;return{x:()=>X(e,qa(ve(n,"float32"),-1))}}};const gM={kernelName:de,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Lt(ve(n,"float32")),i=Sn(Ce(Ne(1),s));return Pt(_e(e,i))}}}};const yM={kernelName:Ae,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Sn(Ce(Lt(ve(n,"float32")),1));return _e(e,s)}}}};const bM={kernelName:xe,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{let c=e;const u=on(n.shape,i);return u.length>0&&(c=Ue(c,u)),K(c,n.shape)},a=()=>{let c=e;const u=on(s.shape,i);return u.length>0&&(c=Ue(c,u)),K(c,s.shape)};return{a:o,b:a}}};const wM={kernelName:Me,saveAllInputs:!0,gradFunc:(e,t)=>{const n={};return t.forEach((s,i)=>{n[i]=()=>e.clone()}),n}};const LM={kernelName:$t,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>et(n)}}};const SM={kernelName:Kt,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>et(n)}}};const IM={kernelName:Fn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,Sn(Ce(Ne(1),Lt(ve(n,"float32")))))}}};const xM={kernelName:vn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Sn(be(Ne(1),Lt(ve(n,"float32"))));return _e(e,s)}}}};const TM={kernelName:Ai,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=be(Lt(n),Lt(s));let u=X(e,_e(s,c));const p=on(n.shape,i);return p.length>0&&(u=Ue(u,p)),K(u,n.shape)},a=()=>{const c=be(Lt(n),Lt(s));let u=Pt(X(e,_e(n,c)));const p=on(s.shape,i);return p.length>0&&(u=Ue(u,p)),K(u,s.shape)};return{a:o,b:a}}};const AM={kernelName:Nn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,be(Lt(ve(n,"float32")),1))}}};const vM={kernelName:Qs,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,Ce(Ne(1),Lt(ve(n,"float32"))))}}};function NM(e,t,n,s,i=[1,1,1],o,a){const c=W(e,"dy","avgPool3dBackprop"),u=W(t,"input","avgPool3dBackprop");let p=c,m=u,y=!1;u.rank===4&&(y=!0,p=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]]),m=K(u,[1,u.shape[0],u.shape[1],u.shape[2],u.shape[3]])),k(p.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${p.rank}.`),k(m.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${m.rank}.`),k(rn(s,i),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${s} and dilations '${i}'`),a!=null&&k(Ut(o),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const b=v=>{const N=sh(m.shape,n,s,i,o,a);return v.avgPool3dBackprop(p,m,N)},w={dy:p,input:m},I={filterSize:n,strides:s,dilations:i,pad:o,dimRoundingMode:a},T=V.runKernelFunc(b,w,null,Cx,I);return y?K(T,[T.shape[1],T.shape[2],T.shape[3],T.shape[4]]):T}const CM=P({avgPool3dBackprop_:NM});const RM={kernelName:hl,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:i,strides:o,dilations:a,pad:c,dimRoundingMode:u}=n,p=a==null?[1,1,1]:a;return{x:()=>CM(e,s,i,o,p,c,u)}}};function OM(e,t,n,s,i){const o=W(e,"dy","avgPoolBackprop"),a=W(t,"input","avgPoolBackprop");k(a.rank===o.rank,()=>`Rank of input (${a.rank}) does not match rank of dy (${o.rank})`);let c=a,u=o,p=!1;a.rank===3&&(p=!0,c=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]),u=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),k(u.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${u.rank}.`),k(c.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${c.rank}.`);const m=I=>{const T=Wn(c.shape,n,s,1,i);return I.avgPoolBackprop(u,c,T)},y={dy:u,input:c},b={filterSize:n,strides:s,pad:i},w=V.runKernelFunc(m,y,null,Sa,b);return p?K(w,[w.shape[1],w.shape[2],w.shape[3]]):w}const EM=P({avgPoolBackprop_:OM});const DM={kernelName:ei,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:i,strides:o,pad:a}=n;return{x:()=>EM(e,s,i,o,a)}}};const kM={kernelName:vg,inputsToSave:["a","b"],gradFunc:(e,t,n)=>{const[s,i]=t,{transposeA:o,transposeB:a}=n;return!o&&!a?{a:()=>at(e,i,!1,!0),b:()=>at(s,e,!0,!1)}:!o&&a?{a:()=>at(e,i,!1,!1),b:()=>at(e,s,!0,!1)}:o&&!a?{a:()=>at(i,e,!1,!0),b:()=>at(s,e,!1,!1)}:{a:()=>at(i,e,!0,!0),b:()=>at(e,s,!0,!0)}}};const FM={kernelName:Ng,gradFunc:(e,t,n)=>{const{blockShape:s,crops:i}=n;return{x:()=>fh(e,s,i)}}};const _M={kernelName:Cg,gradFunc:(e,t,n)=>{const s=n,i=s.inputShape,o=s.shape,a=Array.from(o);for(let u=i.length-1;u>=0;u--)if(i[u]===o[u])a[u]=1;else if(i[u]!==1)throw new Error(`broadcastTo(): [${i}] cannot be broadcast to [${o}].`);const c=[];for(let u=0;u1&&c.push(u);return{x:()=>Ue(e,c,!0)}}};const WM={kernelName:ul,gradFunc:e=>({x:()=>e.clone()})};const $M={kernelName:dl,gradFunc:e=>({x:()=>et(e)})};const UM={kernelName:pl,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{clipValueMin:i,clipValueMax:o}=n;return{x:()=>$n(Bs(tr(s,i),Mr(s,o)),e,et(e))}}};const BM={kernelName:td,saveAllInputs:!0,gradFunc:(e,t,n)=>{const s=t.map(u=>u.shape),{axis:i}=n,o=ft(i,t[0].shape)[0],a=s.map(u=>u[o]),c=os(e,a,o);return c.map(u=>()=>u)}};const MM={kernelName:Og,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,{dilations:o,strides:a,pad:c,dataFormat:u}=n;return k($r(o),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${o}'`),{x:()=>gb(s.shape,e,i,a,c,u),filter:()=>Vb(s,e,i.shape,a,c,u)}}};const PM={kernelName:Eg,inputsToSave:["dy","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,{strides:o,pad:a,dataFormat:c,dimRoundingMode:u}=n;return{dy:()=>er(e,i,o,a,c,1,u),filter:()=>Vb(e,s,i.shape,o,a,c,u)}}};function zM(e,t,n,s,i){let o=e;e.rank===4&&(o=K(e,[1,e.shape[0],e.shape[1],e.shape[2],e.shape[3]]));let a=t;a.rank===4&&(a=K(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]])),k(o.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${o.shape}.`),k(a.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${a.shape}.`),k(n.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${n}.`),k(o.shape[4]===n[3],()=>`Error in conv3dDerFilter: depth of input ${o.shape[4]}) must match input depth in filter (${n[3]}.`),k(a.shape[4]===n[4],()=>`Error in conv3dDerFilter: depth of dy (${a.shape[4]}) must match output depth for filter (${n[4]}).`);const c=m=>{const y=1,b=ih(o.shape,n,s,y,i);return m.conv3dDerFilter(o,a,b)},u={x:o,y:a},p={strides:s,pad:i};return V.runKernelFunc(c,u,null,Ox,p)}const GM=P({conv3DBackpropFilter_:zM});const VM={kernelName:Dg,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:i,pad:o}=n;k($r(s),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${s}'`);const[a,c]=t;return{x:()=>lA(a.shape,e,c,i,o),filter:()=>GM(a,e,c.shape,i,o)}}};const HM={kernelName:Ia,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Pt(op(ve(n,"float32"))),e)}}};const YM={kernelName:ml,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(ap(ve(n,"float32")),e)}}};const qM={kernelName:kg,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{axis:i,exclusive:o,reverse:a}=n;return{x:()=>{const c=_n([i],s.rank);let u=jd(e,i,o,!a);return c!=null&&(u=Pe(u,c)),u}}}};const jM={kernelName:Fg,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:i,pad:o,dimRoundingMode:a}=n,c=s==null?[1,1]:s;k($r(c),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${c}'`);const[u,p]=t;k(u.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${u.rank}.`),k(p.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${p.rank}.`),k(u.shape[3]===p.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${u.shape[3]}) must match the inChannels dimension in filter ${p.shape[2]}.`),k(rn(i,c),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${i} and dilations '${c}'.`),a!=null&&k(Ut(o),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const m=Oi(u.shape,p.shape,i,c,o,a,!0);return{x:()=>HA(u.shape,e,p,m),filter:()=>VA(u,e,p.shape,m)}}};const KM={kernelName:nd,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,o={x:s,filter:i,dy:e},a={x:s,filter:i,dy:e};return{x:()=>V.runKernel(sd,o,n),filter:()=>V.runKernel(id,a,n)}}};const XM={kernelName:xa,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=_e(e,ve(s,"float32")),u=on(n.shape,i);return u.length>0?K(Ue(c,u),n.shape):c},a=()=>{let c=X(e,ve(n,"float32"));const u=on(s.shape,i);u.length>0&&(c=K(Ue(c,u),s.shape));const p=Lt(s);return Pt(_e(c,ve(p,"float32")))};return{a:o,b:a}}};const JM={kernelName:fl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t,s=o=>o.eluDer(e,n),i={dy:e,y:n};return{x:()=>V.runKernelFunc(s,i,null,$x)}}};const ZM={kernelName:gl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=X(xs(Pt(Lt(n))),2/Math.sqrt(Math.PI));return{x:()=>X(e,s)}}};const QM={kernelName:yl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,n)}}};const eP={kernelName:bl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,xs(n))}}};const tP={kernelName:wl,gradFunc:e=>({x:()=>et(e)})};const nP={kernelName:Wg,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=_e(e,ve(s,"float32")),u=on(n.shape,i);return u.length>0?K(Ue(c,u),n.shape):c},a=()=>{let c=X(e,ve(n,"float32"));const u=on(s.shape,i);u.length>0&&(c=K(Ue(c,u),s.shape));const p=Lt(s);return Pt(_e(c,ve(p,"float32")))};return{a:o,b:a}}};const sP={kernelName:Ll,inputsToSave:["x","mean","variance","scale"],gradFunc:(e,t,n)=>{const{varianceEpsilon:s}=n,[i,o,a,c]=t,u=c==null?Ne(1):c,p=on(o.shape,i.shape),m=[];if(o.rank===1){for(let F=0;Fo.rank===1?K(X(X(e,Br(K(w,[1,1,1,o.shape[0]]),m)),u),i.shape):K(X(X(e,w),u),i.shape),v=()=>{let F=X(X(w,Ne(-1)),b);return o.rank===1&&(F=Ue(F,p)),K(F,o.shape)},N=()=>{let F=X(X(I,y),b);return o.rank===1&&(F=Ue(F,p)),K(F,o.shape)},E=()=>{const F=X(y,w);let _=X(e,F);return o.rank===1&&(_=Ue(_,p)),K(_,o.shape)},D=()=>{let F=e;return o.rank===1&&(F=Ue(F,p)),K(F,o.shape)};return{x:T,mean:v,variance:N,scale:E,offset:D}}};const iP={kernelName:$g,inputsToSave:["x","indices"],gradFunc:(e,t,n)=>{const[s,i]=t,{axis:o}=n,a=ft(o,s.shape)[0],c=()=>{const u=s.shape,p=i.size,m=u.slice(0,a),y=m.length,b=u.slice(o,u.length).slice(1),w=b.length,I=cv(0,y),T=cv(y+1,y+1+w),v=lv([m,[p],b]),N=K(e,v),E=K(i,[p]),D=lv([[y],I,T]),F=Pe(N,D);let _=Mb(F,E,s.shape[a]);const B=eh(D);return _=Pe(_,B),_};return{x:c,indices:()=>i}}};function cv(e,t){const n=[];for(let s=e;s{const[n,s]=t;return{a:()=>et(n),b:()=>et(s)}}};const oP={kernelName:Sl,gradFunc:e=>({x:()=>ve(e,"float32")})};const aP={kernelName:Il,gradFunc:e=>({x:()=>et(e)})};const cP={kernelName:xl,gradFunc:e=>({x:()=>et(e)})};const lP={kernelName:Tl,gradFunc:e=>({x:()=>et(e)})};const hP={kernelName:vl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,be(n,1))}}};const uP={kernelName:Al,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,ve(n,"float32"))}}};const dP={kernelName:Pg,inputsToSave:[],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{axis:i}=n;return{logits:()=>{const o=!0,a=xs(s);return Ce(e,X(Ue(e,i,o),a))}}}};function pP(e,t,n,s=5,i=1,o=1,a=.5){const c=m=>m.LRNGrad(n,e,t,s,i,o,a),u={x:e,y:t,dy:n},p={depthRadius:s,bias:i,alpha:o,beta:a};return V.runKernelFunc(c,u,null,qx,p)}const mP=P({localResponseNormalizationBackprop_:pP});const fP={kernelName:zg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{depthRadius:o,bias:a,alpha:c,beta:u}=n;return{x:()=>mP(s,i,e,o,a,c,u)}}};function hv(e,t,n,s,i){return t.rank{const o=X(e,ve(ni(n,t),e.dtype));return i==null?o:Pe(o,i)}}}const uv={kernelName:Nl,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{reductionIndices:i}=s,[o,a]=t,c=ft(i,o.shape),u=_n(c,o.rank),p=hv(e,a,o,c,u);return{x:()=>{let m=p.x();return u!=null&&(m=Pe(m)),m}}}};const gP={kernelName:Gg,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=()=>X(e,ve(tr(n,s),"float32")),o=()=>X(e,ve(dh(n,s),"float32"));return{a:i,b:o}}};function yP(e,t,n,s,i,o=[1,1,1],a,c){const u=W(e,"dy","maxPool3dBackprop"),p=W(t,"input","maxPool3dBackprop"),m=W(n,"output","maxPool3dBackprop");let y=u,b=p,w=m,I=!1;p.rank===4&&(I=!0,y=K(u,[1,u.shape[0],u.shape[1],u.shape[2],u.shape[3]]),b=K(p,[1,p.shape[0],p.shape[1],p.shape[2],p.shape[3]]),w=K(m,[1,m.shape[0],m.shape[1],m.shape[2],m.shape[3]])),k(y.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${y.rank}.`),k(b.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${b.rank}.`),k(w.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${w.rank}.`),k(rn(i,o),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${i} and dilations '${o}'`),c!=null&&k(Ut(a),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${c} but got pad ${a}.`);const T=D=>{const F=sh(b.shape,s,i,o,a,c);return D.maxPool3dBackprop(y,b,w,F)},v={dy:y,input:b,output:w},N={filterSize:s,strides:i,dilations:o,pad:a,dimRoundingMode:c},E=V.runKernelFunc(T,v,null,jx,N);return I?K(E,[E.shape[1],E.shape[2],E.shape[3],E.shape[4]]):E}const bP=P({maxPool3dBackprop_:yP});const wP={kernelName:Vg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{filterSize:o,strides:a,dilations:c,pad:u,dimRoundingMode:p}=n,m=c==null?[1,1,1]:c;return{x:()=>bP(e,s,i,o,a,m,u,p)}}};function LP(e,t,n,s,i,o,a){const c=W(e,"dy","maxPoolBackprop"),u=W(t,"input","maxPoolBackprop"),p=W(n,"output","maxPoolBackprop");k(u.rank===c.rank,()=>`Rank of input (${u.rank}) does not match rank of dy (${c.rank})`),k(c.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${c.rank}.`),k(u.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${u.rank}.`),a!=null&&k(Ut(o),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const m=w=>{const I=Wn(u.shape,s,i,1,o,a);return w.maxPoolBackprop(c,u,p,I)},y={dy:c,input:u,output:p},b={filterSize:s,strides:i,pad:o,dimRoundingMode:a};return V.runKernelFunc(m,y,null,ad,b)}const SP=P({maxPoolBackprop_:LP});const IP={kernelName:Cl,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{filterSize:o,strides:a,pad:c}=n;return{x:()=>SP(e,s,i,o,a,c)}}};const xP={kernelName:Hg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{axis:i}=s,[o,a]=t,c=ft(i,o.shape),u=_n(c,o.rank),p=hv(e,a,o,c,u);return{x:()=>{let m=p.x();return u!=null&&(m=Pe(m)),m}}}};const TP={kernelName:Yg,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=()=>X(e,ve(Mr(n,s),"float32")),o=()=>X(e,ve(Ts(n,s),"float32"));return{a:i,b:o}}};const AP={kernelName:qg,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=on(n.shape,i);return c.length>0?K(Ue(e,c),n.shape):e},a=()=>{const c=X(e,Pt(Ba(_e(n,s)))),u=on(s.shape,i);return u.length>0?K(Ue(c,u),s.shape):c};return{a:o,b:a}}};const vP={kernelName:Rl,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=X(e,ve(s,"float32")),u=on(n.shape,i);return u.length>0?K(Ue(c,u),n.shape):c},a=()=>{const c=X(e,ve(n,"float32")),u=on(s.shape,i);return u.length>0?K(Ue(c,u),s.shape):c};return{a:o,b:a}}};const NP={kernelName:jg,gradFunc:e=>({x:()=>Pt(e)})};const CP={kernelName:Jg,inputsToSave:["indices"],gradFunc:(e,t)=>{const n=t[0];return{indices:()=>ct(n.shape,"float32")}}};const RP={kernelName:Xg,gradFunc:e=>({x:()=>et(e)})};const dv={kernelName:dd,inputsToSave:["x"],gradFunc:(e,t,n)=>{const s=t[0],{paddings:i}=n,o=i.map(a=>a[0]);return{x:()=>st(e,o,s.shape)}}};const OP={kernelName:Zg,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(e,t)=>{const[n,s,i]=t,o=n,a=s,c=nt(o.shape,a.shape),u=()=>{const m=ve(a,"float32");let y=X(e,X(m,ii(o,Ce(m,Ne(1)))));const b=on(o.shape,c);return b.length>0&&(y=Ue(y,b)),K(y,o.shape)},p=()=>{const m=Ts(o,0),y=$n(m,is(o),et(o));let b=X(e,X(i,y));const w=on(a.shape,c);return w.length>0&&(b=Ue(b,w)),K(b,a.shape)};return{a:u,b:p}}};const EP={kernelName:Qg,inputsToSave:["x","alpha"],gradFunc:(e,t)=>{const[n,s]=t,i=Ts(n,0);return{x:()=>$n(i,e,X(e,s)),alpha:()=>{let o=$n(i,et(e),X(e,n));const a=on(s.shape,e.shape);return a.length>0&&(o=Ue(o,a)),K(o,s.shape)}}}};const DP={kernelName:Ol,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,Pt(Lt(n)))}}};const kP={kernelName:iy,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=X(Mr(n,6),qa(n));return{x:()=>X(e,ve(s,"float32"))}}};const FP={kernelName:ty,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,ve(qa(n),"float32"))}}};const _P={kernelName:El,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>K(e,n.shape)}}};const WP={kernelName:sy,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,i=c=>{const{alignCorners:u}=n;return c.resizeBilinearBackprop(e,s,u)},o={images:s},a=()=>V.runKernelFunc(i,o,null,Zx,n);return{images:a}}};const $P={kernelName:ny,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,i=c=>{const{alignCorners:u}=n;return c.resizeNearestNeighborBackprop(e,s,u)},o={images:s},a=()=>V.runKernelFunc(i,o,null,Jx,n);return{images:a}}};const UP={kernelName:ry,gradFunc:(e,t,n)=>{const{dims:s}=n,i=ft(s,e.shape);return{x:()=>As(e,i)}}};const BP={kernelName:Dl,gradFunc:e=>({x:()=>et(e)})};const MP={kernelName:kl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Pt(_e(e,X(ii(n,1.5),2)))}}};const PP={kernelName:oy,inputsToSave:["condition"],gradFunc:(e,t)=>{const[n]=t;return{condition:()=>ve(et(n),"float32"),t:()=>X(e,ve(n,e.dtype)),e:()=>X(e,ve(ph(n),e.dtype))}}};const zP={kernelName:Fl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Ts(n,Ne(0)),i=Ne(xp),o=Ne(Tp),a=X(e,o),c=X(X(e,i),xs(ve(n,"float32")));return $n(s,a,c)}}}};const GP={kernelName:$l,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,X(n,Ce(Ne(1),n)))}}};const VP={kernelName:Wl,gradFunc:e=>({x:()=>et(e)})};const HP={kernelName:Ta,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(lh(ve(n,"float32")),e)}}};const YP={kernelName:_l,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(qd(ve(n,"float32")),e)}}};const qP={kernelName:pd,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{begin:i,size:o}=n,a=s.shape,[c,u]=$d(s,i,o),p=[];for(let m=0;mki(e,p)}}};const jP={kernelName:ly,outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{dim:i}=n,o=!0,a=X(e,s);return{logits:()=>Ce(a,X(Ue(a,[i],o),s))}}};const KP={kernelName:Ul,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,Ei(n))}}};const pv={kernelName:md,gradFunc:(e,t,n)=>{const{blockShape:s,paddings:i}=n;return{x:()=>ah(e,s,i)}}};const mv={kernelName:cy,gradFunc:(e,t,n)=>{const{axis:s}=n;return{x:()=>Mt(e,s)}}};const XP={kernelName:Bl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,X(Sn(ve(n,"float32")),2))}}};const JP={kernelName:fd,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,X(ve(n,"float32"),2))}}};const ZP={kernelName:Aa,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=Ne(2),o=()=>X(e,X(i,Ce(n,s))),a=()=>X(e,X(i,Ce(s,n)));return{a:o,b:a}}};const QP={kernelName:Gl,gradFunc:e=>({x:()=>et(e)})};const ez={kernelName:Ml,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{let c=e;const u=on(n.shape,i);return u.length>0&&(c=Ue(c,u)),K(c,n.shape)},a=()=>{let c=e;const u=on(s.shape,i);return u.length>0&&(c=Ue(c,u)),K(Pt(c),s.shape)};return{a:o,b:a}}};const tz={kernelName:ay,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,i=s.shape.slice(),{axis:o}=n,a=ft(o,s.shape);a.forEach(p=>{i[p]=1});const c=K(e,i),u=X(c,si(s.shape,"float32"));return{x:()=>u}}};const nz={kernelName:va,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,Lt(lh(n)))}}};const sz={kernelName:Pl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Ce(Ne(1),Lt(n)),e)}}};const iz={kernelName:hy,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{reps:i}=n,o=()=>{let a=et(s);if(s.rank===1)for(let c=0;c{const s=n,{perm:i}=s,o=eh(i);return{x:()=>Pe(e,o)}}};const oz={kernelName:uy,gradFunc:(e,t,n)=>{const s=n,{axis:i}=s;return{value:()=>as(e,i)}}};const az={kernelName:dy,inputsToSave:["segmentIds"],gradFunc:(e,t)=>{const[n]=t,s=()=>cz(e,n);return{x:s}}};function cz(e,t){const n=Us(t,et(t)),s=Ma(e,n);let i=tr(t,Ne(0,"int32"));const o=s.rank-i.rank;for(let c=0;c({x:()=>et(e)})};const hz=[fM,gM,yM,bM,wM,LM,SM,IM,xM,TM,AM,vM,RM,DM,kM,FM,_M,WM,$M,UM,BM,PM,MM,VM,HM,YM,qM,jM,KM,XM,JM,ZM,QM,eP,nP,tP,sP,iP,rP,oP,aP,cP,lP,hP,uP,dP,fP,uv,uv,gP,wP,IP,xP,TP,AP,vP,NP,CP,RP,dv,dv,OP,EP,DP,kP,FP,_P,WP,$P,UP,BP,MP,PP,zP,GP,VP,HP,YP,qP,jP,KP,pv,pv,mv,mv,XP,ZP,JP,QP,ez,tz,nz,sz,iz,rz,oz,az,lz];for(const e of hz)sT(e);Q.prototype.abs=function(){return this.throwIfDisposed(),sn(this)};Q.prototype.acos=function(){return this.throwIfDisposed(),nb(this)};Q.prototype.acosh=function(){return this.throwIfDisposed(),sb(this)};Q.prototype.addStrict=function(e){return this.throwIfDisposed(),OA(this,e)};Q.prototype.add=function(e){return this.throwIfDisposed(),be(this,e)};Q.prototype.all=function(e,t){return this.throwIfDisposed(),Pd(this,e,t)};Q.prototype.any=function(e,t){return this.throwIfDisposed(),th(this,e,t)};Q.prototype.argMax=function(e){return this.throwIfDisposed(),nh(this,e)};Q.prototype.argMin=function(e){return this.throwIfDisposed(),rb(this,e)};Q.prototype.asScalar=function(){return this.throwIfDisposed(),k(this.size===1,()=>"The array must have only 1 element."),K(this,[])};Q.prototype.asType=function(e){return this.throwIfDisposed(),ve(this,e)};Q.prototype.as1D=function(){return this.throwIfDisposed(),K(this,[this.size])};Q.prototype.as2D=function(e,t){return this.throwIfDisposed(),K(this,[e,t])};Q.prototype.as3D=function(e,t,n){return this.throwIfDisposed(),K(this,[e,t,n])};Q.prototype.as4D=function(e,t,n,s){return this.throwIfDisposed(),K(this,[e,t,n,s])};Q.prototype.as5D=function(e,t,n,s,i){return this.throwIfDisposed(),K(this,[e,t,n,s,i])};Q.prototype.asin=function(){return this.throwIfDisposed(),ob(this)};Q.prototype.asinh=function(){return this.throwIfDisposed(),ab(this)};Q.prototype.atan=function(){return this.throwIfDisposed(),cb(this)};Q.prototype.atan2=function(e){return this.throwIfDisposed(),lb(this,e)};Q.prototype.atanh=function(){return this.throwIfDisposed(),hb(this)};Q.prototype.avgPool=function(e,t,n,s){return this.throwIfDisposed(),oh(this,e,t,n,s)};Q.prototype.batchToSpaceND=function(e,t){return this.throwIfDisposed(),ah(this,e,t)};Q.prototype.batchNorm=function(e,t,n,s,i){return this.throwIfDisposed(),No(this,e,t,n,s,i)};Q.prototype.broadcastTo=function(e){return this.throwIfDisposed(),ch(this,e)};Q.prototype.cast=function(e){return this.throwIfDisposed(),ve(this,e)};Q.prototype.ceil=function(){return this.throwIfDisposed(),fb(this)};Q.prototype.clipByValue=function(e,t){return this.throwIfDisposed(),jn(this,e,t)};Q.prototype.concat=function(e,t){return this.throwIfDisposed(),e instanceof Q&&(e=[e]),Mt([this,...e],t)};Q.prototype.conv1d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Hd(this,e,t,n,s,i,o)};Q.prototype.conv2dTranspose=function(e,t,n,s,i){return this.throwIfDisposed(),Yd(this,e,t,n,s,i)};Q.prototype.conv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),er(this,e,t,n,s,i,o)};Q.prototype.cos=function(){return this.throwIfDisposed(),lh(this)};Q.prototype.cosh=function(){return this.throwIfDisposed(),qd(this)};Q.prototype.cumsum=function(e,t,n){return this.throwIfDisposed(),jd(this,e,t,n)};Q.prototype.depthToSpace=function(e,t){return this.throwIfDisposed(),bb(this,e,t)};Q.prototype.depthwiseConv2D=function(e,t,n,s,i,o){return nn("depthwiseConv2D is deprecated, use depthwiseConv2d instead"),this.throwIfDisposed(),Co(this,e,t,n,s,i,o)};Q.prototype.depthwiseConv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Co(this,e,t,n,s,i,o)};Q.prototype.dilation2d=function(e,t,n,s,i){return this.throwIfDisposed(),wb(this,e,t,n,s,i)};Q.prototype.divNoNan=function(e){return this.throwIfDisposed(),Lb(this,e)};Q.prototype.divStrict=function(e){return this.throwIfDisposed(),EA(this,e)};Q.prototype.div=function(e){return this.throwIfDisposed(),_e(this,e)};Q.prototype.dot=function(e){return this.throwIfDisposed(),hA(this,e)};Q.prototype.elu=function(){return this.throwIfDisposed(),Oo(this)};Q.prototype.equalStrict=function(e){return this.throwIfDisposed(),TA(this,e)};Q.prototype.equal=function(e){return this.throwIfDisposed(),ni(this,e)};Q.prototype.erf=function(){return this.throwIfDisposed(),Sb(this)};Q.prototype.exp=function(){return this.throwIfDisposed(),xs(this)};Q.prototype.expandDims=function(e){return this.throwIfDisposed(),Kn(this,e)};Q.prototype.expm1=function(){return this.throwIfDisposed(),Ib(this)};Q.prototype.fft=function(){return this.throwIfDisposed(),wh(this)};Q.prototype.flatten=function(){return this.throwIfDisposed(),K(this,[this.size])};Q.prototype.floor=function(){return this.throwIfDisposed(),Ba(this)};Q.prototype.floorDiv=function(e){return this.throwIfDisposed(),Md(this,e)};Q.prototype.gather=function(e,t){return this.throwIfDisposed(),Ma(this,e,t)};Q.prototype.greaterEqualStrict=function(e){return this.throwIfDisposed(),AA(this,e)};Q.prototype.greaterEqual=function(e){return this.throwIfDisposed(),tr(this,e)};Q.prototype.greaterStrict=function(e){return this.throwIfDisposed(),vA(this,e)};Q.prototype.greater=function(e){return this.throwIfDisposed(),Ts(this,e)};Q.prototype.ifft=function(){return this.throwIfDisposed(),Ya(this)};Q.prototype.irfft=function(){return this.throwIfDisposed(),hp(this)};Q.prototype.isFinite=function(){return this.throwIfDisposed(),dA(this)};Q.prototype.isInf=function(){return this.throwIfDisposed(),pA(this)};Q.prototype.isNaN=function(){return this.throwIfDisposed(),mA(this)};Q.prototype.leakyRelu=function(e){return this.throwIfDisposed(),Xd(this,e)};Q.prototype.lessEqualStrict=function(e){return this.throwIfDisposed(),NA(this,e)};Q.prototype.lessEqual=function(e){return this.throwIfDisposed(),Mr(this,e)};Q.prototype.lessStrict=function(e){return this.throwIfDisposed(),CA(this,e)};Q.prototype.less=function(e){return this.throwIfDisposed(),dh(this,e)};Q.prototype.localResponseNormalization=function(e,t,n,s){return this.throwIfDisposed(),Tb(this,e,t,n,s)};Q.prototype.logSigmoid=function(){return this.throwIfDisposed(),gA(this)};Q.prototype.logSoftmax=function(e){return this.throwIfDisposed(),Qd(this,e)};Q.prototype.logSumExp=function(e,t){return this.throwIfDisposed(),vb(this,e,t)};Q.prototype.log=function(){return this.throwIfDisposed(),is(this)};Q.prototype.log1p=function(){return this.throwIfDisposed(),Jd(this)};Q.prototype.logicalAnd=function(e){return this.throwIfDisposed(),Bs(this,e)};Q.prototype.logicalNot=function(){return this.throwIfDisposed(),ph(this)};Q.prototype.logicalOr=function(e){return this.throwIfDisposed(),ep(this,e)};Q.prototype.logicalXor=function(e){return this.throwIfDisposed(),yA(this,e)};Q.prototype.matMul=function(e,t,n){return this.throwIfDisposed(),at(this,e,t,n)};Q.prototype.maxPool=function(e,t,n,s){return this.throwIfDisposed(),mh(this,e,t,n,s)};Q.prototype.max=function(e,t){return this.throwIfDisposed(),Xn(this,e,t)};Q.prototype.maximumStrict=function(e){return this.throwIfDisposed(),DA(this,e)};Q.prototype.maximum=function(e){return this.throwIfDisposed(),Us(this,e)};Q.prototype.mean=function(e,t){return this.throwIfDisposed(),zt(this,e,t)};Q.prototype.min=function(e,t){return this.throwIfDisposed(),Ga(this,e,t)};Q.prototype.minimumStrict=function(e){return this.throwIfDisposed(),kA(this,e)};Q.prototype.minimum=function(e){return this.throwIfDisposed(),Eo(this,e)};Q.prototype.modStrict=function(e){return this.throwIfDisposed(),FA(this,e)};Q.prototype.mod=function(e){return this.throwIfDisposed(),tp(this,e)};Q.prototype.mulStrict=function(e){return this.throwIfDisposed(),_A(this,e)};Q.prototype.mul=function(e){return this.throwIfDisposed(),X(this,e)};Q.prototype.neg=function(){return this.throwIfDisposed(),Pt(this)};Q.prototype.norm=function(e,t,n){return this.throwIfDisposed(),pp(this,e,t,n)};Q.prototype.notEqualStrict=function(e){return this.throwIfDisposed(),RA(this,e)};Q.prototype.notEqual=function(e){return this.throwIfDisposed(),Pr(this,e)};Q.prototype.oneHot=function(e,t=1,n=0){return this.throwIfDisposed(),To(this,e,t,n)};Q.prototype.onesLike=function(){return this.throwIfDisposed(),Dn(this)};Q.prototype.pad=function(e,t){return this.throwIfDisposed(),ki(this,e,t)};Q.prototype.pool=function(e,t,n,s,i){return this.throwIfDisposed(),LA(this,e,t,n,s,i)};Q.prototype.powStrict=function(e){return this.throwIfDisposed(),WA(this,e)};Q.prototype.pow=function(e){return this.throwIfDisposed(),ii(this,e)};Q.prototype.prelu=function(e){return this.throwIfDisposed(),gh(this,e)};Q.prototype.prod=function(e,t){return this.throwIfDisposed(),sp(this,e,t)};Q.prototype.reciprocal=function(){return this.throwIfDisposed(),Eb(this)};Q.prototype.relu=function(){return this.throwIfDisposed(),Fi(this)};Q.prototype.relu6=function(){return this.throwIfDisposed(),Db(this)};Q.prototype.reshapeAs=function(e){return this.throwIfDisposed(),K(this,e.shape)};Q.prototype.reshape=function(e){return this.throwIfDisposed(),K(this,e)};Q.prototype.resizeBilinear=function(e,t){return this.throwIfDisposed(),XA(this,e,t)};Q.prototype.resizeNearestNeighbor=function(e,t){return this.throwIfDisposed(),JA(this,e,t)};Q.prototype.reverse=function(e){return this.throwIfDisposed(),As(this,e)};Q.prototype.rfft=function(){return this.throwIfDisposed(),Lh(this)};Q.prototype.round=function(){return this.throwIfDisposed(),kb(this)};Q.prototype.rsqrt=function(){return this.throwIfDisposed(),ip(this)};Q.prototype.selu=function(){return this.throwIfDisposed(),rp(this)};Q.prototype.separableConv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Fb(this,e,t,n,s,i,o)};Q.prototype.sigmoid=function(){return this.throwIfDisposed(),Ei(this)};Q.prototype.sign=function(){return this.throwIfDisposed(),_b(this)};Q.prototype.sin=function(){return this.throwIfDisposed(),op(this)};Q.prototype.sinh=function(){return this.throwIfDisposed(),ap(this)};Q.prototype.slice=function(e,t){return this.throwIfDisposed(),st(this,e,t)};Q.prototype.softmax=function(e){return this.throwIfDisposed(),Wo(this,e)};Q.prototype.softplus=function(){return this.throwIfDisposed(),za(this)};Q.prototype.spaceToBatchND=function(e,t){return this.throwIfDisposed(),fh(this,e,t)};Q.prototype.split=function(e,t){return this.throwIfDisposed(),os(this,e,t)};Q.prototype.sqrt=function(){return this.throwIfDisposed(),Sn(this)};Q.prototype.square=function(){return this.throwIfDisposed(),Lt(this)};Q.prototype.squaredDifference=function(e){return this.throwIfDisposed(),Sh(this,e)};Q.prototype.squaredDifferenceStrict=function(e){return this.throwIfDisposed(),$A(this,e)};Q.prototype.squeeze=function(e){return this.throwIfDisposed(),zr(this,e)};Q.prototype.stack=function(e,t){this.throwIfDisposed();const n=e instanceof Q?[this,e]:[this,...e];return as(n,t)};Q.prototype.step=function(e){return this.throwIfDisposed(),qa(this,e)};Q.prototype.stridedSlice=function(e,t,n,s,i,o,a,c){return this.throwIfDisposed(),$b(this,e,t,n,s,i,o,a,c)};Q.prototype.subStrict=function(e){return this.throwIfDisposed(),UA(this,e)};Q.prototype.sub=function(e){return this.throwIfDisposed(),Ce(this,e)};Q.prototype.sum=function(e,t){return this.throwIfDisposed(),Ue(this,e,t)};Q.prototype.tan=function(){return this.throwIfDisposed(),Ub(this)};Q.prototype.tanh=function(){return this.throwIfDisposed(),Ua(this)};Q.prototype.tile=function(e){return this.throwIfDisposed(),Br(this,e)};Q.prototype.toBool=function(){return this.throwIfDisposed(),ve(this,"bool")};Q.prototype.toFloat=function(){return this.throwIfDisposed(),ve(this,"float32")};Q.prototype.toInt=function(){return this.throwIfDisposed(),ve(this,"int32")};Q.prototype.topk=function(e,t){return this.throwIfDisposed(),Bb(this,e,t)};Q.prototype.transpose=function(e){return this.throwIfDisposed(),Pe(this,e)};Q.prototype.unique=function(e){return this.throwIfDisposed(),up(this,e)};Q.prototype.unsortedSegmentSum=function(e,t){return this.throwIfDisposed(),Mb(this,e,t)};Q.prototype.unstack=function(e){return this.throwIfDisposed(),_i(this,e)};Q.prototype.where=function(e,t){return this.throwIfDisposed(),$n(e,this,t)};Q.prototype.zerosLike=function(){return this.throwIfDisposed(),et(this)};let Ap;function an(){return Ap==null&&(Ap=QT().epsilon()),Ap}function $Q(e){Ap=e}function ri(){return"channelsLast"}class rr extends Error{constructor(e){super(e);Object.setPrototypeOf(this,rr.prototype)}}class oi extends Error{constructor(e){super(e);Object.setPrototypeOf(this,oi.prototype)}}class j extends Error{constructor(e){super(e);Object.setPrototypeOf(this,j.prototype)}}class Ge extends Error{constructor(e){super(e);Object.setPrototypeOf(this,Ge.prototype)}}class fv extends Error{constructor(e){super(e);Object.setPrototypeOf(this,fv.prototype)}}class uz extends Error{constructor(e){super(e);Object.setPrototypeOf(this,uz.prototype)}}function Bo(e,t){if(Array.isArray(e)){let n=[];for(let s=0;sn.toUpperCase())}let Ms={};function cw(e){if(e==null)return null;const t={};return t.className=e.getClassName(),t.config=e.getConfig(),t}function lw(e){if(e==null||typeof e!="object")return;if(Array.isArray(e))e.forEach(t=>lw(t));else{const t=Object.keys(e);for(const n of t){const s=e[n];s!=null&&typeof s=="object"&&(!Array.isArray(s)&&s.type==="ndarray"&&typeof s.value=="number"?e[n]=s.value:lw(s))}}}function Dh(e,t={},n={},s="object",i=!1){if(typeof e=="string"){const o=e;let a;if(o in n)a=n[o];else if(o in Ms)a=Ms[o];else if(a=t[o],a==null)throw new j(`Unknown ${s}: ${e}. This may be due to one of the following reasons: + the f you passed encloses all operations that lead from x to y.`)}function AW(e){const t=W(e,"x","neg"),n={x:t};return V.runKernelFunc(s=>s.neg(t),n,null,jg)}const Pt=P({neg_:AW});function vW(e){const t=W(e,"x","softplus"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.softplus(t);return i([t]),o},n,null,Ul)}const Va=P({softplus_:vW});function NW(e){const t=W(e,"x","logSigmoid"),n=Di(s=>{const i=Pt(Va(Pt(s))),o=a=>{const c=X(a,Ei(Pt(s)));return c};return{value:i,gradFunc:o}});return n(t)}const gA=P({logSigmoid_:NW});function CW(e,t=null,n=!1){const s=W(e,"x","max"),i=(c,u)=>{const p=ft(t,s.shape);let m=p;const y=_n(m,s.rank);let b=s;y!=null&&(b=Pe(s,y),m=Is(m.length,b.rank));const w=c.max(b,m);y!=null&&b.dispose();let I=w;if(n){const T=En(I.shape,ft(t,s.shape));I=K(I,T),w.dispose()}return u([s,I]),I},o={x:s},a={reductionIndices:t,keepDims:n};return V.runKernelFunc(i,o,null,Nl,a)}const Xn=P({max_:CW});function RW(e,t){let n=W(e,"a","sub"),s=W(t,"b","sub");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.subtract(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Ml)}const Ce=P({sub_:RW});function OW(e,t=null,n=!1){let s=W(e,"x","sum");s.dtype==="bool"&&(s=ve(s,"int32"));const i=(c,u)=>{u([s]);const p=ft(t,s.shape),m=_n(p,s.rank);let y=p,b=s;m!=null&&(b=Pe(s,m),y=Is(y.length,s.rank));let w=c.sum(b,y);if(n){const I=En(w.shape,p);w=K(w,I)}return w},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,ay,a)}const Ue=P({sum_:OW});function EW(e,t=-1){const n=W(e,"logits","logSoftmax");if(t===-1&&(t=n.rank-1),t!==n.rank-1)throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and axis was ${t}`);const s=(a,c)=>{const u=!0,p=Xn(e,t,!0),m=Ce(e,p),y=Ce(ve(m,"float32"),is(Ue(xs(m),t,u)));return c([y]),y},i={logits:n},o={axis:t};return V.runKernelFunc(s,i,null,Pg,o)}const Qd=P({logSoftmax_:EW});function DW(e,t=null,n=!1){const s=W(e,"x","logSumExp"),i=ft(t,s.shape),o=Xn(s,i,!0),a=Ce(s,o),c=xs(a),u=Ue(c,i),p=is(u),m=be(K(o,p.shape),p);if(n){const y=En(m.shape,i);return K(m,y)}return m}const vb=P({logSumExp_:DW});function kW(e,t){const n=W(e,"a","logicalAnd","bool"),s=W(t,"b","logicalAnd","bool");nt(n.shape,s.shape);const i={a:n,b:s};return V.runKernelFunc(o=>o.logicalAnd(n,s),i,null,Hx)}const Bs=P({logicalAnd_:kW});function FW(e){const t=W(e,"x","logicalNot","bool"),n={x:t};return V.runKernelFunc(s=>s.logicalNot(t),n,null,od)}const ph=P({logicalNot_:FW});function _W(e,t){const n=W(e,"a","logicalOr","bool"),s=W(t,"b","logicalOr","bool");nt(n.shape,s.shape);const i={a:n,b:s};return V.runKernelFunc(o=>o.logicalOr(n,s),i,null,Yx)}const ep=P({logicalOr_:_W});function WW(e,t){const n=W(e,"a","logicalXor","bool"),s=W(t,"b","logicalXor","bool");return nt(n.shape,s.shape),Bs(ep(e,t),ph(Bs(e,t)))}const yA=P({logicalXor_:WW});function $W(e,t,n,s,i){const o=W(e,"x","maxPool"),a=1;let c=o,u=!1;o.rank===3&&(u=!0,c=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),k(c.rank===4,()=>`Error in maxPool: input must be rank 4 but got rank ${c.rank}.`),k(on(n,a),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&k(Ut(s),()=>`Error in maxPool: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const p=(w,I)=>{const T=Wn(c.shape,t,n,1,s,i);let v;return T.filterWidth===1&&T.filterHeight===1&&ot(T.inShape,T.outShape)?v=c.clone():v=w.maxPool(c,T),I([c,v]),v},m={x:c},y={filterSize:t,strides:n,pad:s,dimRoundingMode:i},b=V.runKernelFunc(p,m,null,Cl,y);return u?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const mh=P({maxPool_:$W});function UW(e,t=[1,1,1],n,s,i,o="NDHWC",a){a==null?a=[1,1,1]:sn("dilations is deprecated, this field will be gone in v3.0.0.");const c=W(e,"x","maxPool3d");let u=c,p=!1;c.rank===4&&(p=!0,u=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]])),k(u.rank===5,()=>`Error in maxPool3d: x must be rank 5 but got rank ${u.rank}.`),k(o==="NDHWC",()=>`Error in maxPool3d: Only NDHWC is currently supported, but got dataFormat of ${o}`),k(on(n,a),()=>`Error in maxPool3d: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&k(Ut(s),()=>`Error in maxPool3d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const m=(I,T)=>{a==null&&(a=[1,1,1]);const v=sh(u.shape,t,n,a,s,i,o),N=I.maxPool3d(u,v);return T([u,N]),N},y={x:u},b={filterSize:t,strides:n,pad:s,dimRoundingMode:i,dataFormat:o,dilations:a},w=V.runKernelFunc(m,y,null,Vg,b);return p?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const Nb=P({maxPool3d_:UW});function BW(e,t,n,s,i=!1){const o=W(e,"x","maxPoolWithArgmax"),a={x:o},c={filterSize:t,strides:n,pad:s,includeBatchInIndex:i},u=V.runKernel(cd,a,c);return{result:u[0],indexes:u[1]}}const bA=P({maxPoolWithArgmax_:BW});function ct(e,t="float32"){if(t==="complex64"){const s=ct(e,"float32"),i=ct(e,"float32");return Ci(s,i)}const n=Ea(we(e),t);return V.makeTensor(n,e,t)}function si(e,t="float32"){if(t==="complex64"){const s=si(e,"float32"),i=ct(e,"float32");return Ci(s,i)}const n=Ay(we(e),t);return V.makeTensor(n,e,t)}function MW(e,t=null,n=!1){const s=W(e,"x","mean"),i=ft(t,s.shape),o=On(s.shape,i),a=o[1],c=we(a),u=Di(p=>{const m=Ne(c),y=m.dtype===p.dtype?p:ve(p,m.dtype),b=_e(y,m),w=Ue(b,t,n),I=T=>{const v=p.shape.slice();i.forEach(D=>{v[D]=1});const N=K(T,v),E=_e(X(N,si(p.shape,"float32")),c);return E};return{value:w,gradFunc:I}});return u(s)}const zt=P({mean_:MW});function PW(e,t=null,n=!1){const s=W(e,"x","min"),i=(c,u)=>{const p=ft(t,s.shape);let m=p;const y=_n(m,s.rank);let b=s;y!=null&&(b=Pe(s,y),m=Is(m.length,s.rank));const w=c.min(b,m);y!=null&&b.dispose();let I=w;if(n){const T=En(I.shape,p);I=K(w,T),w.dispose()}return u([s,I]),I},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,Hg,a)}const Ha=P({min_:PW});function zW(e,t){let n=W(e,"a","minimum"),s=W(t,"b","minimum");[n,s]=Bt(n,s),n.dtype==="bool"&&(n=ve(n,"int32"),s=ve(s,"int32")),nt(n.shape,s.shape);const i=(a,c)=>{const u=a.minimum(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,Yg)}const ko=P({minimum_:zW});function GW(e,t){let n=W(e,"a","mod"),s=W(t,"b","mod");[n,s]=Bt(n,s);const i=(a,c)=>{const u=a.mod(n,s);return c([n,s]),u},o={a:n,b:s};return V.runKernelFunc(i,o,null,qg)}const tp=P({mod_:GW});function VW(e){const t=W(e,"x","square"),n={},s=[t],i=[];return V.runKernelFunc((o,a)=>(a([t]),o.square(t)),{x:t},null,"Square",n,s,i)}const Lt=P({square_:VW});function HW(e,t=null,n=!1){e=W(e,"x","moments");const s=ft(t,e.shape),i=zt(e,s,n);let o=i.shape;n||(o=En(i.shape,s));const a=Lt(Ce(ve(e,"float32"),K(i,o))),c=zt(a,s,n);return{mean:i,variance:c}}const np=P({moments_:HW});function YW(e,t,n,s){const i=W(t,"data","multiRNNCell"),o=Zl(n,"c","multiRNNCell"),a=Zl(s,"h","multiRNNCell");let c=i;const u=[];for(let y=0;y2)throw new Error(`Rank of probabilities must be 1 or 2, but is ${a}`);n=n||Math.random();const c=a===1?K(i,[1,-1]):i,u=V.runKernelFunc(p=>p.multinomial(c,s,t,n),{logits2D:c});return a===1?K(u,[u.size]):u}const wA=P({multinomial_:jW});function KW(e,t){let n=W(e,"a","notEqual"),s=W(t,"b","notEqual");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=a=>a.notEqual(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,ld)}const Pr=P({notEqual_:KW});function XW(e){const t=W(e,"input","real"),n=i=>i.real(t),s={input:t};return V.runKernelFunc(n,s,null,ey)}const Fo=P({real_:XW});function JW(e){const t=W(e,"x","onesLike"),n=(i,o)=>{if(t.dtype==="complex64"){const a=Dn(Fo(t)),c=et(Ga(t));return Ci(a,c)}return i.onesLike(t)},s={x:t};return V.runKernelFunc(n,s,null,Xg)}const Dn=P({onesLike_:JW});function ZW(e,t){const n=W(e,"v1","outerProduct"),s=W(t,"v2","outerProduct");k(n.rank===1&&s.rank===1,()=>`Error in outerProduct: inputs must be rank 1, but got ranks ${n.rank} and ${s.rank}.`);const i=K(n,[-1,1]),o=K(s,[1,-1]);return at(i,o)}const QW=P({outerProduct_:ZW});function e$(e,t,n=0){const s=W(e,"x","pad");if(s.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");const i=(c,u)=>(u([s]),c.pad(s,t,n)),o={paddings:t,constantValue:n},a={x:s};return V.runKernelFunc(i,a,null,dd,o)}const ki=P({pad_:e$});function t$(e,t,n=0){return k(t.length===2,()=>"Invalid number of paddings. Must be length of 2."),ki(e,[t],n)}const n$=P({pad1d_:t$});function s$(e,t,n=0){return k(t.length===2&&t[0].length===2&&t[1].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),ki(e,t,n)}const i$=P({pad2d_:s$});function r$(e,t,n=0){return k(t.length===3&&t[0].length===2&&t[1].length===2&&t[2].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),ki(e,t,n)}const o$=P({pad3d_:r$});function a$(e,t,n=0){return k(t.length===4&&t[0].length===2&&t[1].length===2&&t[2].length===2&&t[3].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),ki(e,t,n)}const c$=P({pad4d_:a$});function l$(e,t,n){const s=W(e,"x","spaceToBatchND");k(s.rank>=1+t.length,()=>`input rank ${s.rank} should be > than [blockShape] ${t.length}`),k(n.length===t.length,()=>`paddings.shape[0] ${n.length} must be equal to [blockShape] ${t.length}`),k(s.shape.reduce((c,u,p)=>p>0&&p<=t.length?c&&(u+n[p-1][0]+n[p-1][1])%t[p-1]===0:c,!0),()=>`input spatial dimensions ${s.shape.slice(1)} with paddings ${n.toString()} must be divisible by blockShapes ${t.toString()}`);const i=c=>c.spaceToBatchND(s,t,n),o={x:s},a={blockShape:t,paddings:n};return V.runKernelFunc(i,o,null,md,a)}const fh=P({spaceToBatchND_:l$});function h$(e,t,n,s,i,o){i==null&&(i=[1,1]),o==null&&(o=1),s===0&&(s="valid");const a=W(e,"x","maxPool");let c=a,u=!1;a.rank===3&&(u=!0,c=K(a,[1,a.shape[0],a.shape[1],a.shape[2]])),k(on(o,i),()=>`Error in pool: Either strides or dilations must be 1. Got strides ${o} and dilations '${i}'`);const p=Wn(c.shape,t,o,i,s),m=[p.dilationHeight,p.dilationWidth];let y;s==="same"?y=d$([p.filterHeight,p.filterWidth],m):y=[[0,0],[0,0]];const b=m[0]===1&&m[1]===1,[w,I]=u$([p.inHeight,p.inWidth],m,y),T=b?s:"valid",v=b?c:fh(c,m,w),N=n==="avg"?()=>oh(v,t,o,T):()=>mh(v,t,o,T),E=N(),D=b?E:ah(E,m,I);return u?K(D,[D.shape[1],D.shape[2],D.shape[3]]):D}function u$(e,t,n){const s=n.map(m=>m[0]),i=n.map(m=>m[1]),o=e.concat(s,i),a=t.map((m,y)=>(m-o[y]%m)%m),c=i.map((m,y)=>m+a[y]),u=t.map((m,y)=>[s[y],c[y]]),p=t.map((m,y)=>[0,a[y]]);return[u,p]}function d$(e,t){const n=e.map((a,c)=>a+(a-1)*(t[c]-1)),s=n.map(a=>a-1),i=s.map(a=>Math.floor(a/2)),o=s.map((a,c)=>a-i[c]);return s.map((a,c)=>[i[c],o[c]])}const LA=P({pool_:h$});function p$(e,t){let n=W(e,"base","pow"),s=W(t,"exp","pow");[n,s]=Bt(n,s);const i={a:n,b:s},o=(a,c)=>{const u=a.pow(n,s);return c([n,s,u]),u};return V.runKernelFunc(o,i,null,Zg)}const ii=P({pow_:p$});function m$(e,t){const n=W(e,"x","prelu"),s=W(t,"alpha","prelu"),i=(a,c)=>{const u=a.prelu(n,s);return c([n,s]),u},o={x:n,alpha:s};return V.runKernelFunc(i,o,null,Qg)}const gh=P({prelu_:m$});function f$(e,t=null,n=!1){let s=W(e,"x","prod");const i=c=>{s.dtype==="bool"&&(s=ve(s,"int32"));const u=ft(t,s.shape),p=_n(u,s.rank);let m=u,y=s;p!=null&&(y=Pe(s,p),m=Is(m.length,s.rank));let b=c.prod(y,m);if(n){const w=En(b.shape,u);b=K(b,w)}return b},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,Kx,a)}const sp=P({prod_:f$});function g$(e,t,n){const s=we(e);let i=null;if(n==null||n==="float32")i=new Float32Array(s);else if(n==="int32")i=new Int32Array(s);else if(n==="bool")i=new Uint8Array(s);else throw new Error(`Unknown data type ${n}`);for(let o=0;o>>0,b-=u,b*=u,u=b>>>0,b-=u,u+=b*4294967296}return(u>>>0)*23283064365386963e-26};return p}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.alea=a})(Ya,e,!1)}),w$=_o(function(e){(function(t,n,s){function i(c){var u=this,p="";u.x=0,u.y=0,u.z=0,u.w=0,u.next=function(){var y=u.x^u.x<<11;return u.x=u.y,u.y=u.z,u.z=u.w,u.w^=u.w>>>19^y^y>>>8},c===(c|0)?u.x=c:p+=c;for(var m=0;m>>0)/4294967296};return y.double=function(){do var b=p.next()>>>11,w=(p.next()>>>0)/4294967296,I=(b+w)/(1<<21);while(I===0);return I},y.int32=p.next,y.quick=y,m&&(typeof m=="object"&&o(m,p),y.state=function(){return o(p,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xor128=a})(Ya,e,!1)}),L$=_o(function(e){(function(t,n,s){function i(c){var u=this,p="";u.next=function(){var y=u.x^u.x>>>2;return u.x=u.y,u.y=u.z,u.z=u.w,u.w=u.v,(u.d=u.d+362437|0)+(u.v=u.v^u.v<<4^(y^y<<1))|0},u.x=0,u.y=0,u.z=0,u.w=0,u.v=0,c===(c|0)?u.x=c:p+=c;for(var m=0;m>>4),u.next()}function o(c,u){return u.x=c.x,u.y=c.y,u.z=c.z,u.w=c.w,u.v=c.v,u.d=c.d,u}function a(c,u){var p=new i(c),m=u&&u.state,y=function(){return(p.next()>>>0)/4294967296};return y.double=function(){do var b=p.next()>>>11,w=(p.next()>>>0)/4294967296,I=(b+w)/(1<<21);while(I===0);return I},y.int32=p.next,y.quick=y,m&&(typeof m=="object"&&o(m,p),y.state=function(){return o(p,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xorwow=a})(Ya,e,!1)}),S$=_o(function(e){(function(t,n,s){function i(c){var u=this;u.next=function(){var m=u.x,y=u.i,b,w,I;return b=m[y],b^=b>>>7,w=b^b<<24,b=m[y+1&7],w^=b^b>>>10,b=m[y+3&7],w^=b^b>>>3,b=m[y+4&7],w^=b^b<<7,b=m[y+7&7],b=b^b<<13,w^=b^b<<9,m[y]=w,u.i=y+1&7,w};function p(m,y){var b,w,I=[];if(y===(y|0))w=I[0]=y;else for(y=""+y,b=0;b0;--b)m.next()}p(u,c)}function o(c,u){return u.x=c.x.slice(),u.i=c.i,u}function a(c,u){c==null&&(c=+new Date);var p=new i(c),m=u&&u.state,y=function(){return(p.next()>>>0)/4294967296};return y.double=function(){do var b=p.next()>>>11,w=(p.next()>>>0)/4294967296,I=(b+w)/(1<<21);while(I===0);return I},y.int32=p.next,y.quick=y,m&&(m.x&&o(m,p),y.state=function(){return o(p,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xorshift7=a})(Ya,e,!1)}),I$=_o(function(e){(function(t,n,s){function i(c){var u=this;u.next=function(){var m=u.w,y=u.X,b=u.i,w,I;return u.w=m=m+1640531527|0,I=y[b+34&127],w=y[b=b+1&127],I^=I<<13,w^=w<<17,I^=I>>>15,w^=w>>>12,I=y[b]=I^w,u.i=b,I+(m^m>>>16)|0};function p(m,y){var b,w,I,T,v,N=[],E=128;for(y===(y|0)?(w=y,y=null):(y=y+"\0",w=0,E=Math.max(E,y.length)),I=0,T=-32;T>>15,w^=w<<4,w^=w>>>13,T>=0&&(v=v+1640531527|0,b=N[T&127]^=w+v,I=b==0?I+1:0);for(I>=128&&(N[(y&&y.length||0)&127]=-1),I=127,T=4*128;T>0;--T)w=N[I+34&127],b=N[I=I+1&127],w^=w<<13,b^=b<<17,w^=w>>>15,b^=b>>>12,N[I]=w^b;m.w=v,m.X=N,m.i=I}p(u,c)}function o(c,u){return u.i=c.i,u.w=c.w,u.X=c.X.slice(),u}function a(c,u){c==null&&(c=+new Date);var p=new i(c),m=u&&u.state,y=function(){return(p.next()>>>0)/4294967296};return y.double=function(){do var b=p.next()>>>11,w=(p.next()>>>0)/4294967296,I=(b+w)/(1<<21);while(I===0);return I},y.int32=p.next,y.quick=y,m&&(m.X&&o(m,p),y.state=function(){return o(p,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xor4096=a})(Ya,e,!1)}),x$=_o(function(e){(function(t,n,s){function i(c){var u=this,p="";u.next=function(){var y=u.b,b=u.c,w=u.d,I=u.a;return y=y<<25^y>>>7^b,b=b-w|0,w=w<<24^w>>>8^I,I=I-y|0,u.b=y=y<<20^y>>>12^b,u.c=b=b-w|0,u.d=w<<16^b>>>16^I,u.a=I-y|0},u.a=0,u.b=0,u.c=2654435769|0,u.d=1367130551,c===Math.floor(c)?(u.a=c/4294967296|0,u.b=c|0):p+=c;for(var m=0;m>>0)/4294967296};return y.double=function(){do var b=p.next()>>>11,w=(p.next()>>>0)/4294967296,I=(b+w)/(1<<21);while(I===0);return I},y.int32=p.next,y.quick=y,m&&(typeof m=="object"&&o(m,p),y.state=function(){return o(p,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.tychei=a})(Ya,e,!1)}),Wo=_o(function(e){(function(t,n){var s=this,i=256,o=6,a=52,c="random",u=n.pow(i,o),p=n.pow(2,a),m=p*2,y=i-1,b;function w(F,_,B){var U=[];_=_==!0?{entropy:!0}:_||{};var Y=N(v(_.entropy?[F,D(t)]:F==null?E():F,3),U),q=new I(U),J=function(){for(var oe=q.g(o),ce=u,ue=0;oe=m;)oe/=2,ce/=2,ue>>>=1;return(oe+ue)/ce};return J.int32=function(){return q.g(4)|0},J.quick=function(){return q.g(4)/4294967296},J.double=J,N(D(q.S),t),(_.pass||B||function(oe,ce,ue,he){return he&&(he.S&&T(he,q),oe.state=function(){return T(q,{})}),ue?(n[c]=oe,ce):oe})(J,Y,"global"in _?_.global:this==n,_.state)}n["seed"+c]=w;function I(F){var _,B=F.length,U=this,Y=0,q=U.i=U.j=0,J=U.S=[];for(B||(F=[B++]);Y=1||o===0);const a=Math.sqrt(-2*Math.log(o)/o);e=this.mean+this.stdDev*s*a,t=this.mean+this.stdDev*i*a,(!this.truncated||this.isValidTruncated(e))&&(n=!0)}return(!this.truncated||this.isValidTruncated(t))&&(this.nextVal=this.convertValue(t)),this.convertValue(e)}convertValue(e){return this.dtype==null||this.dtype==="float32"?e:Math.round(e)}isValidTruncated(e){return e<=this.upper&&e>=this.lower}}class A${constructor(e,t,n,s){this.alpha=e,this.beta=1/t,this.dtype=n;const i=s||Math.random();this.randu=qa(i.toString()),this.randn=new Cb(0,1,n,!1,this.randu()),e<1?this.d=e+2/3:this.d=e-1/3,this.c=1/Math.sqrt(9*this.d)}nextValue(){let e,t,n,s,i,o;for(;;){do s=this.randn.nextValue(),o=1+this.c*s;while(o<=0);if(o*=o*o,e=s*s,t=1-.331*e*e,n=.5*e+this.d*(1-o+Math.log(o)),i=this.randu(),ithis.dtype==null||this.dtype==="float32",this.min=e,this.range=t-e,this.dtype=n,s==null&&(s=Math.random()),typeof s=="number"&&(s=s.toString()),!this.canReturnFloat()&&this.range<=1)throw new Error(`The difference between ${e} - ${t} <= 1 and dtype is not float`);this.random=qa(s)}convertValue(e){return this.canReturnFloat()?e:Math.round(e)}nextValue(){return this.convertValue(this.min+this.range*this.random())}}function _Q(e){const t=e.length,n=R$(e),s=C$(e),i=t/6*(Math.pow(n,2)+.25*Math.pow(s-3,2)),o=5.991;if(i>o)throw new Error(`Invalid p-value for JB: ${i}`)}function WQ(e,t,n,s){s==null&&(s=Ud());const i=Rb(e);Qy(i,t,s),Qy(N$(e,i),n,s)}function Rb(e){let t=0;for(let n=0;n{const a=e===t,c=e1;if(a||c||u)return ct([0],s);const p=Math.abs(Math.ceil((t-e)/n)),m=Ea(p,s);t{const o=s.reciprocal(t);return i([t]),o},n,null,Ol)}const Eb=P({reciprocal_:F$});function _$(e){const t=W(e,"x","relu"),n=(i,o)=>(o([t]),t.dtype==="bool"?ve(t,"int32"):i.relu(t)),s={x:t};return V.runKernelFunc(n,s,null,ty)}const Fi=P({relu_:_$});function W$(e){const t=W(e,"x","relu6"),n=(i,o)=>(o([t]),t.dtype==="bool"?ve(t,"int32"):i.relu6(t)),s={x:t};return V.runKernelFunc(n,s,null,iy)}const Db=P({relu6_:W$});function $$(e,t){const n=W(e,"x","reverse"),s=a=>{const c=ft(t,n.shape);if(n.rank===0)return Wr(n);const u=a.reverse(n,c);return K(u,n.shape)},i={x:n},o={dims:t};return V.runKernelFunc(s,i,null,ry,o)}const As=P({reverse_:$$});function U$(e){const t=W(e,"x","reverse");return k(t.rank===1,()=>`Error in reverse1D: x must be rank 1 but got rank ${t.rank}.`),As(t,0)}const B$=P({reverse1d_:U$});function M$(e,t){const n=W(e,"x","reverse");return k(n.rank===2,()=>`Error in reverse2D: x must be rank 2 but got rank ${n.rank}.`),As(n,t)}const P$=P({reverse2d_:M$});function z$(e,t){const n=W(e,"x","reverse");return k(n.rank===3,()=>`Error in reverse3D: x must be rank 3 but got rank ${n.rank}.`),As(n,t)}const G$=P({reverse3d_:z$});function V$(e,t){const n=W(e,"x","reverse");return k(n.rank===4,()=>`Error in reverse4D: x must be rank 4 but got rank ${n.rank}.`),As(n,t)}const H$=P({reverse4d_:V$});function Y$(e){const t=W(e,"x","round"),n={x:t};return V.runKernelFunc(s=>s.round(t),n,null,Dl)}const kb=P({round_:Y$});function q$(e){const t=W(e,"x","rsqrt"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.rsqrt(t);return i([t]),o},n,null,kl)}const ip=P({rsqrt_:q$});function j$(e){const t=W(e,"x","selu"),n=(i,o)=>{const a=i.selu(t);return o([t]),a},s={x:t};return V.runKernelFunc(n,s,null,Fl)}const rp=P({selu_:j$});function K$(e,t,n,s,i,o=[1,1],a="NHWC"){const c=W(e,"x","separableConv2d"),u=W(t,"depthwiseFilter","separableConv2d"),p=W(n,"pointwiseFilter","separableConv2d");let m=c,y=!1;if(c.rank===3&&(y=!0,m=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),a==="NCHW")throw new Error("separableConv2d currently does not support dataFormat NCHW; only NHWC is supported");k(m.rank===4,()=>`Error in separableConv2d: input must be rank 4, but got rank ${m.rank}.`),k(u.rank===4,()=>`Error in separableConv2d: depthwise filter must be rank 4, but got rank ${u.rank}.`),k(p.rank===4,()=>`Error in separableConv2d: pointwise filter must be rank 4, but got rank ${u.rank}.`),k(p.shape[0]===1,()=>`Error in separableConv2d: the first dimension of pointwise filter must be 1, but got ${p.shape[0]}.`),k(p.shape[1]===1,()=>`Error in separableConv2d: the second dimension of pointwise filter must be 1, but got ${p.shape[1]}.`);const b=u.shape[2],w=u.shape[3];k(p.shape[2]===b*w,()=>`Error in separableConv2d: the third dimension of pointwise filter must be ${b*w}, but got ${p.shape[2]}.`);const I=Oo(m,u,s,i,a,o),T=1,v=er(I,p,T,"valid",a);return y?K(v,[v.shape[1],v.shape[2],v.shape[3]]):v}const Fb=P({separableConv2d_:K$});async function X$(e,t){const n=W(e,"x","setdiff1d"),s=W(t,"y","setdiff1d");k(n.dtype===s.dtype,()=>`x and y should have the same dtype, but got x (${n.dtype}) and y (${s.dtype}).`),k(n.rank===1,()=>`x should be 1D tensor, but got x (${n.shape}).`),k(s.rank===1,()=>`y should be 1D tensor, but got y (${s.shape}).`);const i=await n.data(),o=await s.data(),a=new Set(o);let c=0;for(let m=0;ms.sign(t),n,null,Wl)}const _b=P({sign_:J$});function Z$(e){const t=W(e,"x","sin"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sin(t);return i([t]),o},n,null,va)}const op=P({sin_:Z$});function Q$(e){const t=W(e,"x","sinh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sinh(t);return i([t]),o},n,null,_l)}const ap=P({sinh_:Q$});function eU(e,t,n){const s=W(e,"x","slice1d");return k(s.rank===1,()=>`slice1d expects a rank-1 tensor, but got a rank-${s.rank} tensor`),st(s,[t],[n])}const cp=P({slice1d_:eU});function tU(e,t,n){const s=W(e,"x","slice2d");return k(s.rank===2,()=>`slice2d expects a rank-2 tensor, but got a rank-${s.rank} tensor`),st(s,t,n)}const Wb=P({slice2d_:tU});function nU(e,t,n){const s=W(e,"x","slice3d");return k(s.rank===3,()=>`slice3d expects a rank-3 tensor, but got a rank-${s.rank} tensor`),st(s,t,n)}const lp=P({slice3d_:nU});function sU(e,t,n){const s=W(e,"x","slice4d");return k(s.rank===4,()=>`slice4d expects a rank-4 tensor, but got a rank-${s.rank} tensor`),st(s,t,n)}const bh=P({slice4d_:sU});function iU(e,t=-1){const n=W(e,"logits","softmax","float32");if(t===-1&&(t=n.rank-1),t!==n.rank-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and dim was ${t}`);const s={logits:n},i={dim:t};return V.runKernelFunc((o,a)=>{const c=o.softmax(n,t);return a([c]),c},s,null,ly,i)}const Uo=P({softmax_:iU});function rU(e){k(e.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${e.dtype}.`);const t={input:e};return V.runKernelFunc(n=>{const s=e.shape[e.shape.length-1],i=e.size/s,o=e.as2D(i,s),a=n.fft(o);return a.reshape(e.shape)},t,null,_g)}const wh=P({fft_:rU});function oU(e){k(e.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${e.dtype}.`);const t={input:e};return V.runKernelFunc(n=>{const s=e.shape[e.shape.length-1],i=e.size/s,o=K(e,[i,s]),a=n.ifft(o);return K(a,e.shape)},t,null,Bg)}const ja=P({ifft_:oU});function aU(e){const t=e.shape[e.shape.length-1],n=e.size/t;let s;if(t<=2){const i=K(e,[n,t]);s=ja(i)}else{const i=[n,2*(t-1)],o=K(Fo(e),[n,t]),a=K(Ga(e),[n,t]),c=As(st(o,[0,1],[n,t-2]),1),u=X(As(st(a,[0,1],[n,t-2]),1),Ne(-1)),p=Mt([o,c],1),m=Mt([a,u],1),y=K(Ci(p,m),[i[0],i[1]]);s=ja(y)}if(s=Fo(s),e.rank===3&&e.shape[0]!==0){const i=s,o=e.shape[0];s=K(s,[o,s.shape[0]/o,s.shape[1]]),i.dispose()}return s}const hp=P({irfft_:aU});function IA(e,t,n=0){let s=[];if(typeof t=="number")k(e.shape[n]%t===0,()=>"Number of splits must evenly divide the axis."),s=new Array(t).fill(e.shape[n]/t);else{const i=t.reduce((a,c)=>(c===-1&&(a+=1),a),0);k(i<=1,()=>"There should be only one negative value in split array.");const o=t.indexOf(-1);if(o!==-1){const a=t.reduce((c,u)=>u>0?c+u:c);t[o]=e.shape[n]-a}k(e.shape[n]===t.reduce((a,c)=>a+c),()=>"The sum of sizes must match the size of the axis dimension."),s=t}return s}function cU(e,t,n=0){const s=W(e,"x","split"),i=(c,u)=>{const p=ft(n,s.shape)[0],m=IA(s,t,p);return c.split(s,m,p)},o={x:s},a={numOrSizeSplits:t,axis:n};return V.runKernelFunc(i,o,null,cy,a)}const os=P({split_:cU});function lU(e,t){k(e.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${e.dtype}`);let n=e.shape[e.shape.length-1];const s=e.size/n;let i;if(t!=null&&t0),T=e.shape.map(v=>v);T[e.shape.length-1]=t,i=st(e,I,T),n=t}else if(t!=null&&t>n){const I=e.shape.map(T=>T);I[e.shape.length-1]=t-n,i=Mt([e,ct(I)],e.shape.length-1),n=t}else i=e;const o=et(i),a=K(Ci(i,o),[s,n]),c=wh(a),u=Math.floor(n/2)+1,p=Fo(c),m=Ga(c),y=os(p,[u,n-u],p.shape.length-1),b=os(m,[u,n-u],m.shape.length-1),w=i.shape.slice();return w[i.shape.length-1]=u,K(Ci(y[0],b[0]),w)}const Lh=P({rfft_:lU});function hU(e){const t=W(e,"x","sqrt"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sqrt(t);return i([t]),o},n,null,Bl)}const Sn=P({sqrt_:hU});function uU(e,t){let n=W(e,"a","squaredDifference"),s=W(t,"b","squaredDifference");[n,s]=Bt(n,s),nt(n.shape,s.shape);const i=(c,u)=>{const p=c.squaredDifference(n,s);return u([n,s]),p},o={a:n,b:s},a={};return V.runKernelFunc(i,o,null,Na,a)}const Sh=P({squaredDifference_:uU});function dU(e,t){const n=W(e,"x","squeeze");return K(n,Rr(n.shape,t).newShape)}const zr=P({squeeze_:dU});function pU(e,t=0){const n=Zl(e,"tensors","stack");if(k(n.length>=1,()=>"Pass at least one tensor to tf.stack"),n.length===1)return Kn(n[0],t);const s=n[0].rank,i=n[0].shape,o=n[0].dtype;k(t<=s,()=>"Axis must be <= rank of the tensor"),n.forEach(c=>{dt(i,c.shape,"All tensors passed to stack must have matching shapes"),k(o===c.dtype,()=>"All tensors passed to stack must have matching dtypes")});const a=n.map(c=>Kn(c,t));return Mt(a,t)}const as=P({stack_:pU});function mU(e,t=0){const n=W(e,"x","step"),s={x:n},i={alpha:t};return V.runKernelFunc(o=>o.step(n,t),s,null,Gl,i)}const Ka=P({step_:mU});function fU(e,t,n,s,i=0,o=0,a=0,c=0,u=0){let p=W(e,"x","stridedSlice");const m=w=>{s==null&&(s=new Array(t.length));const I=_d(a);if(I.length>1)throw new Error("Multiple ellipses in slice is not allowed.");if(a!==0&&c!==0)throw new Error("Using both ellipsisMask and newAxisMask is not yet supported.");if(a!==0&&u!==0)throw new Error("Using both ellipsisMask and shrinkAxisMask is not yet supported.");const T=p.rank-t.length,v=_d(c),N=p.shape.slice();v.forEach(J=>{t[J]=0,n[J]=1,N.splice(J,0,1)}),p=K(p,N);const{begin:E,end:D,strides:F}=GT(p.shape,I,T,t,n,s,i,o,a);t=E,n=D,s=F;const _=_d(u);_.forEach(J=>{n[J]=t[J]+1,s[J]=1});const B=Wd(t,n,s),U=B.filter((J,oe)=>_.indexOf(oe)===-1),Y=s.every(J=>J===1);if(Y)return K(st(p,t,B),U);const q=w.stridedSlice(p,t,n,s);return K(q,U)},y={x:p},b={begin:t,end:n,strides:s,beginMask:i,endMask:o,ellipsisMask:a,newAxisMask:c,shrinkAxisMask:u};return V.runKernelFunc(m,y,null,tT,b)}const $b=P({stridedSlice_:fU});function gU(e){const t=W(e,"x","tan"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.tan(t);return i([t]),o},n,null,Ca)}const Ub=P({tan_:gU});function Gr(e,t,n){if(wo(e),t!=null&&t.length!==2)throw new Error("tensor2d() requires shape to have two numbers");const s=Ni(e,n);if(s.length!==2&&s.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return Fr(e,t,s,n)}function Xa(e,t,n){if(wo(e),t!=null&&t.length!==4)throw new Error("tensor4d() requires shape to have four numbers");const s=Ni(e,n);if(s.length!==4&&s.length!==1)throw new Error("tensor4d() requires values to be number[][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor4d() requires shape to be provided when `values` are a flat array");return Fr(e,t,s,n)}function yU(e,t,n){if(wo(e),t!=null&&t.length!==5)throw new Error("tensor5d() requires shape to have five numbers");const s=Ni(e,n);if(s.length!==5&&s.length!==1)throw new Error("tensor5d() requires values to be number[][][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor5d() requires shape to be provided when `values` are a flat array");return Fr(e,t,s,n)}function bU(e,t,n){if(wo(e),t!=null&&t.length!==6)throw new Error("tensor6d() requires shape to have six numbers");const s=Ni(e,n);if(s.length!==6&&s.length!==1)throw new Error("tensor6d() requires values to be number[][][][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor6d() requires shape to be provided when `values` are a flat array");return t=t||s,Fr(e,t,s,n)}function wU(e,t=1,n=!0){const s=W(e,"x","topk");if(s.rank===0)throw new Error("topk() expects the input to be of rank 1 or higher");const i=s.shape[s.shape.length-1];if(t>i)throw new Error(`'k' passed to topk() must be <= the last dimension (${i}) but got ${t}`);const o={x:s},a={k:t,sorted:n},[c,u]=V.runKernelFunc(p=>p.topk(s,t,n),o,null,nT,a);return{values:c,indices:u}}const Bb=P({topk_:wU});function LU(e,t=0,n=1,s,i){if(s!=null&&s==="bool")throw new Error("Unsupported data type $ { dtype }");const o=new Cb(t,n,s,!0,i),a=Qe(e,s);for(let c=0;c0,()=>"The input tensor must be at least 1D");const s={x:n},i={axis:t},[o,a]=V.runKernel(gd,s,i);return{values:o,indices:a}}const up=P({unique_:SU});function IU(e,t,n){const s=W(e,"x","unsortedSegmentSum"),i=W(t,"segmentIds","unsortedSegmentSum","int32");k(Ut(n),()=>"numSegments must be of dtype int");const o={x:s,segmentIds:i},a={numSegments:n},c=(u,p)=>{const m=u.unsortedSegmentSum(s,i,n);return p([i]),m};return V.runKernelFunc(c,o,null,dy,a)}const Mb=P({unsortedSegmentSum_:IU});function xU(e,t=0){const n=W(e,"x","unstack");k(t>=-n.shape.length&&t`Axis = ${t} is not in [-${n.shape.length}, ${n.shape.length})`),t<0&&(t+=n.shape.length);const s={value:n},i={axis:t},o=a=>a.unstack(n,t);return V.runKernelFunc(o,s,null,uy,i)}const _i=P({unstack_:xU});function xA(e,t=!0,n,s){return V.makeVariable(e,t,n,s)}function dp(e,t){const n=[];for(let o=0;o0,()=>"mask cannot be scalar"),dt(c.slice(o,o+a),i.shape,"mask's shape must match the first K dimensions of tensor's shape,");let u=1;for(let T=o;T"Shape mismatch in v and x");const u=Ne(1),p=Ce(u,c);let m=X(Ce(a,o),p);if(i){k(s!=null,()=>"When using zeroDebias: true, step is required.");const y=W(s,"step","movingAverage");m=_e(m,Ce(u,ii(c,y)))}return be(o,m)}const VU=P({movingAverage_:GU});function HU(e,t,n){const s=W(e,"indices","scatterND","int32"),i=W(t,"updates","scatterND");jy(i,s,n);const o=u=>u.scatterND(s,i,n),a={indices:s,updates:i},c={shape:n};return V.runKernelFunc(o,a,null,Qx,c)}const MA=P({scatterND_:HU});function YU(e,t,n,s){if(e.dtype!=="int32")throw new Error(`tf.sparseToDense() expects the indices to be int32 type, but the dtype was ${e.dtype}.`);if(e.rank>2)throw new Error(`sparseIndices should be a scalar, vector, or matrix, but got shape ${e.shape}.`);const i=e.rank>0?e.shape[0]:1,o=e.rank>1?e.shape[1]:1;if(n.length!==o)throw new Error(`outputShape has incorrect number of elements:, ${n.length}, should be: ${o}.`);const a=t.size;if(!(t.rank===0||t.rank===1&&a===i))throw new Error(`sparseValues has incorrect shape ${t.shape}, should be [] or [${i}]`);if(t.dtype!==s.dtype)throw new Error("sparseValues.dtype must match defaultValues.dtype")}function qU(e,t,n,s=0){const i=W(e,"sparseIndices","sparseToDense","int32"),o=W(t,"sparseValues","sparseToDense"),a=W(s,"defaultValue","sparseToDense",o.dtype);YU(i,o,n,a);const c={sparseIndices:i,sparseValues:o,defaultValue:a},u={outputShape:n};return V.runKernelFunc(p=>p.sparseToDense(i,o,n,a),c,null,eT,u)}const zb=P({sparseToDense_:qU});function jU(e,t){const n=W(t,"indices","gatherND","int32"),s=W(e,"x","gatherND"),i=a=>a.gatherND(s,n),o={params:s,indices:n};return V.runKernelFunc(i,o,null,Mx)}const PA=P({gatherND_:jU});function KU(e,t){if(t==null)return e.shape.slice();if(ot(e.shape,t))return t;if(e.shape.length===t.length){const n=[];for(let s=0;s`x has to be a floating point tensor since it's going to be scaled, but got a ${i.dtype} tensor instead.`),k(t>=0&&t<1,()=>`rate must be a float in the range [0, 1), but got ${t}.`),t===0)return e instanceof Q?i.clone():i;const o=KU(i,n),a=1-t,c=_e(Pa(be($o(o,0,1,"float32",s),a)),a);return X(i,c)}const zA=P({dropout_:XU});function GA(e){return Math.floor(Math.pow(2,Math.ceil(Math.log(e)/Math.log(2))))}function Gb(e,t,n){const s=1-e%2,i=new Float32Array(e);for(let o=0;o1,()=>`inTopK() expects the predictions to be of rank 2 or higher, but got ${s.rank}`),k(s.rank-1===i.rank,()=>`predictions rank should be 1 larger than targets rank, but got predictions rank ${s.rank} and targets rank ${i.rank}`),dt(s.shape.slice(0,s.shape.length-1),i.shape,"predictions's shape should be align with the targets' shape, except the last dimension.");const o=s.shape[s.shape.length-1];k(n>0&&n<=o,()=>`'k' passed to inTopK() must be > 0 && <= the predictions last dimension (${o}), but got ${n}`);const a=await s.data(),c=await i.data(),[u,p]=[a.length/o,o],m=wn("bool",u);for(let y=0;yv.value-T.value),m[y]=0;for(let T=0;T`Error in conv2dDerFilter: input must be rank 4, but got shape ${c.shape}.`),k(u.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${u.shape}.`),k(n.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${n}.`);const p=o==="NHWC"?c.shape[3]:c.shape[1],m=o==="NHWC"?u.shape[3]:u.shape[1];k(p===n[2],()=>`Error in conv2dDerFilter: depth of input ${p}) must match input depth in filter (${n[2]}.`),k(m===n[3],()=>`Error in conv2dDerFilter: depth of dy (${m}) must match output depth for filter (${n[3]}).`),a!=null&&k(Ut(i),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${a} but got pad ${i}.`);const y=I=>{const T=1,v=rh(o),N=Oi(c.shape,n,s,T,i,a,!1,v);return I.conv2dDerFilter(c,u,N)},b={x:c,dy:u},w={strides:s,pad:i,dataFormat:o,dimRoundingMode:a};return V.runKernelFunc(y,b,null,Rx,w)}const Vb=P({conv2DBackpropFilter_:QU});function mp(e,t,n){if(n==null||n==="linear")return e;if(n==="relu")return X(e,Ka(t));throw new Error(`Cannot compute gradient for fused activation ${n}.`)}function fp(e,t){let n=t;const s=an(e.shape,t.shape);return s.length>0&&(n=Ue(n,s)),K(n,e.shape)}function gp(e,t,n){if(t==="linear")return e;if(t==="relu")return Fi(e);if(t==="elu")return Do(e);if(t==="relu6")return Db(e);if(t==="prelu")return gh(e,n);throw new Error(`Unknown fused activation ${t}.`)}const yp=(e,t)=>{const n=e>0;return!n||t==="linear"};function eB({x:e,filter:t,strides:n,pad:s,dataFormat:i="NHWC",dilations:o=[1,1],dimRoundingMode:a,bias:c,activation:u="linear",preluActivationWeights:p}){if(u=u||"linear",yp(V.state.gradientDepth,u)===!1){let _=er(e,t,n,s,i,o,a);return c!=null&&(_=be(_,c)),gp(_,u,p)}const m=W(e,"x","conv2d"),y=W(t,"filter","conv2d");let b=m,w=!1;m.rank===3&&(w=!0,b=K(m,[1,m.shape[0],m.shape[1],m.shape[2]])),k(b.rank===4,()=>`Error in fused conv2d: input must be rank 4, but got rank ${b.rank}.`),k(y.rank===4,()=>`Error in fused conv2d: filter must be rank 4, but got rank ${y.rank}.`),a!=null&&k(Ut(s),()=>`Error in fused conv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`),k(b.shape[3]===y.shape[2],()=>`Error in conv2d: depth of input (${b.shape[3]}) must match input depth for filter ${y.shape[2]}.`),k(on(n,o),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),k(i==="NHWC",()=>`Error in conv2d: got dataFormat of ${i} but only NHWC is currently supported.`);const I=Oi(b.shape,y.shape,n,o,s,a);let T;c!=null&&(T=W(c,"bias","fused conv2d"),[T]=Bt(T,m),nt(I.outShape,T.shape));let v;p!=null&&(v=W(p,"prelu weights","fused conv2d"));const N=(_,B)=>{const[U,Y,q,J]=B,oe=mp(_,q,u);k($r(o),()=>`Error in gradient of fused conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${o}'`);const ce=gb(Y.shape,oe,U,n,s),ue=Vb(Y,oe,U.shape,n,s),he=[ce,ue];if(J!=null){const pe=fp(J,oe);he.push(pe)}return he},E=_=>{const B=_.fusedConv2d({input:b,filter:y,convInfo:I,bias:T,activation:u,preluActivationWeights:v});return B},D={x:b,filter:y,bias:T,preluActivationWeights:v},F={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a,activation:u};if(c==null){const _=Di((B,U,Y)=>{let q=V.runKernelFunc(E,D,null,fy,F);return Y([U,B,q]),w&&(q=K(q,[q.shape[1],q.shape[2],q.shape[3]])),{value:q,gradFunc:N}});return _(b,y)}else{const _=Di((B,U,Y,q)=>{let J=V.runKernelFunc(E,D,null,fy,F);return q([U,B,J,Y]),w&&(J=K(J,[J.shape[1],J.shape[2],J.shape[3]])),{value:J,gradFunc:N}});return _(b,y,T)}}const Hb=P({fusedConv2d_:eB});function tB(e,t,n,s){let i=e;e.rank===3&&(i=K(e,[1,e.shape[0],e.shape[1],e.shape[2]]));let o=t;o.rank===3&&(o=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const a=u=>u.depthwiseConv2DDerFilter(i,o,s),c={x:i,dy:o};return V.runKernelFunc(a,c,null,Fx)}const VA=P({depthwiseConv2dNativeBackpropFilter_:tB});function nB(e,t,n,s){let i=t,o=!1;t.rank===3&&(o=!0,i=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const a=p=>p.depthwiseConv2DDerInput(i,n,s),c={dy:i},u=V.runKernelFunc(a,c,null,_x);return o?K(u,[u.shape[1],u.shape[2],u.shape[3]]):u}const HA=P({depthwiseConv2dNativeBackpropInput_:nB});function sB({x:e,filter:t,strides:n,pad:s,dataFormat:i="NHWC",dilations:o=[1,1],dimRoundingMode:a,bias:c,activation:u="linear",preluActivationWeights:p}){if(yp(V.state.gradientDepth,u)===!1){let _=Oo(e,t,n,s,i,o,a);return c!=null&&(_=be(_,c)),gp(_,u,p)}const m=W(e,"x","depthwiseConv2d"),y=W(t,"filter","depthwiseConv2d");let b=m,w=!1;m.rank===3&&(w=!0,b=K(m,[1,m.shape[0],m.shape[1],m.shape[2]])),k(b.rank===4,()=>`Error in fused depthwiseConv2d: input must be rank 4, but got rank ${b.rank}.`),k(y.rank===4,()=>`Error in fused depthwiseConv2d: filter must be rank 4, but got rank ${y.rank}.`),k(b.shape[3]===y.shape[2],()=>`Error in fused depthwiseConv2d: number of input channels (${b.shape[3]}) must match the inChannels dimension in filter ${y.shape[2]}.`),o==null&&(o=[1,1]),k(on(n,o),()=>`Error in fused depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),a!=null&&k(Ut(s),()=>`Error in fused depthwiseConv2d: pad must be an integer when using dimRoundingMode ${a} but got pad ${s}.`);const I=Oi(b.shape,y.shape,n,o,s,a,!0);let T;c!=null&&(T=W(c,"bias","fused conv2d"),[T]=Bt(T,m),nt(I.outShape,T.shape));let v;p!=null&&(v=W(p,"prelu weights","fused depthwiseConv2d"));const N=(_,B)=>{k($r(o),()=>`Error in gradient of fused depthwiseConv2d: dilation rates greater than 1 are not yet supported. Got dilations '${o}'`);const[U,Y,q,J]=B,oe=mp(_,q,u),ce=HA(Y.shape,oe,U,I),ue=VA(Y,oe,U.shape,I);if(J!=null){const he=fp(T,oe);return[ce,ue,he]}return[ce,ue]},E=_=>{const B=_.fusedDepthwiseConv2D({input:b,filter:y,convInfo:I,bias:T,activation:u,preluActivationWeights:v});return B},D={x:b,filter:y,bias:T,preluActivationWeights:v},F={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a,activation:u};if(c==null){const _=Di((B,U,Y)=>{let q=V.runKernelFunc(E,D,null,gy,F);return Y([U,B,q]),w&&(q=K(q,[q.shape[1],q.shape[2],q.shape[3]])),{value:q,gradFunc:N}});return _(b,y)}else{const _=Di((B,U,Y,q)=>{let J=V.runKernelFunc(E,D,null,gy,F);return q([U,B,J,Y]),w&&(J=K(J,[J.shape[1],J.shape[2],J.shape[3]])),{value:J,gradFunc:N}});return _(b,y,T)}}const YA=P({fusedDepthwiseConv2d_:sB});function iB({a:e,b:t,transposeA:n=!1,transposeB:s=!1,bias:i,activation:o="linear",preluActivationWeights:a}){if(yp(V.state.gradientDepth,o)===!1){let J=at(e,t,n,s);return i!=null&&(J=be(J,i)),gp(J,o,a)}let c=W(e,"a","fused matMul"),u=W(t,"b","fused matMul");[c,u]=Bt(c,u);const p=n?c.shape[c.rank-2]:c.shape[c.rank-1],m=s?u.shape[u.rank-1]:u.shape[u.rank-2],y=n?c.shape[c.rank-1]:c.shape[c.rank-2],b=s?u.shape[u.rank-2]:u.shape[u.rank-1],w=c.shape.slice(0,-2),I=u.shape.slice(0,-2),T=we(w),v=we(I);k(c.rank>=2&&u.rank>=2&&c.rank===u.rank,()=>`Error in fused matMul: inputs must have the same rank of at least 2, got ranks ${c.rank} and ${u.rank}.`),k(ot(w,I),()=>`Error in fused matMul: outer dimensions (${w}) and (${I}) of Tensors with shapes ${c.shape} and ${u.shape} must match.`),k(p===m,()=>`Error in fused matMul: inner shapes (${p}) and (${m}) of Tensors with shapes ${c.shape} and ${u.shape} and transposeA=${n} and transposeB=${s} must match.`);const N=c.shape.slice(0,-2).concat([y,b]),E=n?K(c,[T,p,y]):K(c,[T,y,p]),D=s?K(u,[v,b,m]):K(u,[v,m,b]);let F;i!=null&&(F=W(i,"bias","fused matMul"),[F]=Bt(F,c),nt(N,F.shape));let _;a!=null&&(_=W(a,"prelu weights","fused matMul"));const B=(J,oe)=>{const[ce,ue,he,pe]=oe,le=mp(K(J,he.shape),he,o);let ye,me;if(!n&&!s?(ye=at(le,ue,!1,!0),me=at(ce,le,!0,!1)):!n&&s?(ye=at(le,ue,!1,!1),me=at(le,ce,!0,!1)):n&&!s?(ye=at(ue,le,!1,!0),me=at(ce,le,!1,!1)):(ye=at(ue,le,!0,!0),me=at(le,ce,!0,!0)),i!=null){const Ie=fp(pe,le);return[ye,me,Ie]}else return[ye,me]},U=J=>{const oe=J.fusedBatchMatMul({a:E,b:D,transposeA:n,transposeB:s,bias:F,activation:o,preluActivationWeights:_});return oe},Y={a:E,b:D,bias:F,preluActivationWeights:_},q={transposeA:n,transposeB:s,activation:o};if(i==null){const J=Di((oe,ce,ue)=>{const he=V.runKernelFunc(U,Y,null,my,q);return ue([oe,ce,he]),{value:K(he,N),gradFunc:B}});return J(E,D)}else{const J=Di((oe,ce,ue,he)=>{const pe=V.runKernelFunc(U,Y,null,my,q);return he([oe,ce,pe,ue]),{value:K(pe,N),gradFunc:B}});return J(E,D,F)}}const bp=P({fusedMatMul_:iB});var rB=Object.freeze({__proto__:null,conv2d:Hb,depthwiseConv2d:YA,matMul:bp});function oB(e){return Gb(e,.54,.46)}const aB=P({hammingWindow_:oB});function cB(e){return Gb(e,.5,.5)}const qA=P({hannWindow_:cB});function lB(e,t,n,s=!1,i=0){let o=0;const a=[];for(;o+t<=e.size;)a.push(st(e,o,t)),o+=n;if(s)for(;o`Error in cropAndResize: image must be rank 4,but got rank ${a.rank}.`),k(c.rank===2&&c.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${p},4] but had shape ${c.shape}.`),k(u.rank===1&&u.shape[0]===p,()=>`Error in cropAndResize: boxInd must be have size [${p}] but had shape ${c.shape}.`),k(s.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${s.length}.`),k(s[0]>=1&&s[1]>=1,()=>`cropSize must be atleast [1,1], but was ${s}`),k(i==="bilinear"||i==="nearest",()=>`method must be bilinear or nearest, but was ${i}`);const m=I=>I.cropAndResize(a,c,u,s,i,o),y={image:a,boxes:c,boxInd:u},b={method:i,extrapolationValue:o,cropSize:s},w=V.runKernelFunc(m,y,null,Dx,b);return w}const pB=P({cropAndResize_:dB});function mB(e){const t=W(e,"image","flipLeftRight","float32");k(t.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${t.rank}.`);const n={image:t},s=V.runKernel(rd,n,{});return s}const fB=P({flipLeftRight_:mB});function gB(e,t,n=0,s=.5){const i=W(e,"image","rotateWithOffset","float32");k(i.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${i.rank}.`);const o={image:i},a={radians:t,fillValue:n,center:s},c=V.runKernel(bd,o,a);return c}const yB=P({rotateWithOffset_:gB});function Ja(e,t,n,s,i,o){s==null&&(s=.5),i==null&&(i=Number.NEGATIVE_INFINITY),o==null&&(o=0);const a=e.shape[0];return n=Math.min(n,a),k(0<=s&&s<=1,()=>`iouThreshold must be in [0, 1], but was '${s}'`),k(e.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${e.rank}'`),k(e.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${e.shape[1]}`),k(t.rank===1,()=>"scores must be a 1D tensor"),k(t.shape[0]===a,()=>`scores has incompatible shape with boxes. Expected ${a}, but was ${t.shape[0]}`),k(0<=o&&o<=1,()=>`softNmsSigma must be in [0, 1], but was '${o}'`),{maxOutputSize:n,iouThreshold:s,scoreThreshold:i,softNmsSigma:o}}function bB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY){const o=W(e,"boxes","nonMaxSuppression"),a=W(t,"scores","nonMaxSuppression"),c=Ja(o,a,n,s,i);n=c.maxOutputSize,s=c.iouThreshold,i=c.scoreThreshold;const u={maxOutputSize:n,iouThreshold:s,scoreThreshold:i};return V.runKernelFunc(p=>p.nonMaxSuppression(o,a,n,s,i),{boxes:o,scores:a},null,Kg,u)}const wB=P({nonMaxSuppression_:bB});function LB(e,t,n){const s=SB(e,t,n),i=s<0?-(s+1):s;e.splice(i,0,t)}function SB(e,t,n){return xB(e,t,n||IB)}function IB(e,t){return e>t?1:e>>1);const c=n(t,e[o]);c>0?s=o+1:(i=o,a=!c)}return a?s:-s-1}function wp(e,t,n,s,i){return Yb(e,t,n,s,i,0).selectedIndices}function Lp(e,t,n,s,i,o){return Yb(e,t,n,s,i,0,!1,o,!0)}function Sp(e,t,n,s,i,o){return Yb(e,t,n,s,i,o,!0)}function Yb(e,t,n,s,i,o,a=!1,c=!1,u=!1){const p=[];for(let v=0;vi&&p.push({score:t[v],boxIndex:v,suppressBeginIndex:0});p.sort(KA);const m=o>0?-.5/o:0,y=[],b=[];for(;y.length0;){const v=p.pop(),{score:N,boxIndex:E,suppressBeginIndex:D}=v;if(N=D;--_){const B=TB(e,E,y[_]);if(B>=s){F=!0;break}if(v.score=v.score*AB(s,m,B),v.score<=i)break}v.suppressBeginIndex=y.length,F||(v.score===N?(y.push(E),b.push(v.score)):v.score>i&&LB(p,v,KA))}const w=y.length,I=n-w;c&&I>0&&(y.push(...new Array(I).fill(0)),b.push(...new Array(I).fill(0)));const T={selectedIndices:rs(y,"int32")};return a&&(T.selectedScores=rs(b,"float32")),u&&(T.validOutputs=Ne(w,"int32")),T}function TB(e,t,n){const s=e.subarray(t*4,t*4+4),i=e.subarray(n*4,n*4+4),o=Math.min(s[0],s[2]),a=Math.min(s[1],s[3]),c=Math.max(s[0],s[2]),u=Math.max(s[1],s[3]),p=Math.min(i[0],i[2]),m=Math.min(i[1],i[3]),y=Math.max(i[0],i[2]),b=Math.max(i[1],i[3]),w=(c-o)*(u-a),I=(y-p)*(b-m);if(w<=0||I<=0)return 0;const T=Math.max(o,p),v=Math.max(a,m),N=Math.min(c,y),E=Math.min(u,b),D=Math.max(N-T,0)*Math.max(E-v,0);return D/(w+I-D)}function AB(e,t,n){const s=Math.exp(t*n*n);return n<=e?s:0}function KA(e,t){return e.score-t.score||e.score===t.score&&t.boxIndex-e.boxIndex}async function vB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY){const o=W(e,"boxes","nonMaxSuppressionAsync"),a=W(t,"scores","nonMaxSuppressionAsync"),c=Ja(o,a,n,s,i);n=c.maxOutputSize,s=c.iouThreshold,i=c.scoreThreshold;const u=await Promise.all([o.data(),a.data()]),p=u[0],m=u[1],y=wp(p,m,n,s,i);return o!==e&&o.dispose(),a!==t&&a.dispose(),y}const NB=vB;function CB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=0){const a=W(e,"boxes","nonMaxSuppression"),c=W(t,"scores","nonMaxSuppression"),u=Ja(a,c,n,s,i,o);n=u.maxOutputSize,s=u.iouThreshold,i=u.scoreThreshold,o=u.softNmsSigma;const p={boxes:a,scores:c},m={maxOutputSize:n,iouThreshold:s,scoreThreshold:i,softNmsSigma:o},y=V.runKernel(ud,p,m);return{selectedIndices:y[0],selectedScores:y[1]}}const RB=P({nonMaxSuppressionWithScore_:CB});async function OB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=0){const a=W(e,"boxes","nonMaxSuppressionAsync"),c=W(t,"scores","nonMaxSuppressionAsync"),u=Ja(a,c,n,s,i,o);n=u.maxOutputSize,s=u.iouThreshold,i=u.scoreThreshold,o=u.softNmsSigma;const p=await Promise.all([a.data(),c.data()]),m=p[0],y=p[1],b=Sp(m,y,n,s,i,o);return a!==e&&a.dispose(),c!==t&&c.dispose(),b}const EB=OB;function DB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=!1){const a=W(e,"boxes","nonMaxSuppression"),c=W(t,"scores","nonMaxSuppression"),u=Ja(a,c,n,s,i,null),p=u.maxOutputSize,m=u.iouThreshold,y=u.scoreThreshold,b={boxes:a,scores:c},w={maxOutputSize:p,iouThreshold:m,scoreThreshold:y,padToMaxOutputSize:o},I=V.runKernel(hd,b,w);return{selectedIndices:I[0],validOutputs:I[1]}}const kB=P({nonMaxSuppressionPadded_:DB});async function FB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=!1){const a=W(e,"boxes","nonMaxSuppressionAsync"),c=W(t,"scores","nonMaxSuppressionAsync"),u=Ja(a,c,n,s,i,null),p=u.maxOutputSize,m=u.iouThreshold,y=u.scoreThreshold,[b,w]=await Promise.all([a.data(),c.data()]),I=Lp(b,w,p,m,y,o);return a!==e&&a.dispose(),c!==t&&c.dispose(),I}const _B=FB;function WB(e,t,n=!1){const s=W(e,"images","resizeBilinear");k(s.rank===3||s.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${s.rank}.`),k(t.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${t}.`);let i=s,o=!1;s.rank===3&&(o=!0,i=K(s,[1,s.shape[0],s.shape[1],s.shape[2]]));const[a,c]=t,u=(b,w)=>(w([i]),b.resizeBilinear(i,a,c,n)),p={images:i},m={alignCorners:n,size:t},y=V.runKernelFunc(u,p,null,sy,m);return o?K(y,[y.shape[1],y.shape[2],y.shape[3]]):y}const XA=P({resizeBilinear_:WB});function $B(e,t,n=!1){const s=W(e,"images","resizeNearestNeighbor");k(s.rank===3||s.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${s.rank}.`),k(t.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${t}.`),k(s.dtype==="float32"||s.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let i=s,o=!1;s.rank===3&&(o=!0,i=K(s,[1,s.shape[0],s.shape[1],s.shape[2]]));const[a,c]=t,u={images:i},p={alignCorners:n,size:t},m=(b,w)=>(w([i]),b.resizeNearestNeighbor(i,a,c,n)),y=V.runKernelFunc(m,u,null,ny,p);return o?K(y,[y.shape[1],y.shape[2],y.shape[3]]):y}const JA=P({resizeNearestNeighbor_:$B});function UB(e,t,n){k(t%1===0,()=>`bandPart(): numLower must be an integer, got ${t}.`),k(n%1===0,()=>`bandPart(): numUpper must be an integer, got ${n}.`);const s=W(e,"a","bandPart");k(s.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${s.rank}.`);const i=s.shape,[o,a]=s.shape.slice(-2);if(!(t<=o))throw new Error(`bandPart(): numLower (${t}) must not be greater than the number of rows (${o}).`);if(!(n<=a))throw new Error(`bandPart(): numUpper (${n}) must not be greater than the number of columns (${a}).`);t<0&&(t=o),n<0&&(n=a);const c=K(yh(0,o,1,"int32"),[-1,1]),u=yh(0,a,1,"int32"),p=Ce(c,u),m=Bs(Mr(p,Ne(+t,"int32")),tr(p,Ne(-n,"int32"))),y=ct([o,a],s.dtype);return K(as(_i(K(s,[-1,o,a])).map(b=>$n(m,b,y))),i)}const BB=P({bandPart_:UB});function MB(e){let t;if(Array.isArray(e)){t=!1,k(e!=null&&e.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");const i=e[0].shape[0];for(let o=1;o`Gram-Schmidt: Non-unique lengths found in the input vectors: (${e[o].shape[0]} vs. ${i})`)}else t=!0,e=os(e,e.shape[0],0).map(i=>zr(i,[0]));k(e.length<=e[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${e.length}) exceeds number of dimensions (${e[0].shape[0]}).`);const n=[],s=e;for(let i=0;i{let o=s[i];if(i>0)for(let a=0;a=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${e.rank}`),e.rank===2)return ZA(e,t);{const n=e.shape.slice(0,e.shape.length-2).reduce((u,p)=>u*p),s=_i(K(e,[n,e.shape[e.shape.length-2],e.shape[e.shape.length-1]]),0),i=[],o=[];s.forEach(u=>{const[p,m]=ZA(u,t);i.push(p),o.push(m)});const a=K(as(i,0),e.shape),c=K(as(o,0),e.shape);return[a,c]}}function ZA(e,t=!1){return V.tidy(()=>{k(e.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${e.shape.length}D Tensor.`);const n=e.shape[0],s=e.shape[1];let i=Kd(n),o=Wr(e);const a=Gr([[1]],[1,1]);let c=Wr(a);const u=n>=s?s:n;for(let p=0;p{const w=st(o,[p,p],[n-p,1]),I=pp(w),T=st(o,[p,p],[1,1]),v=$n(Ts(T,0),Gr([[-1]]),Gr([[1]])),N=Ce(T,X(v,I)),E=_e(w,N);E.shape[0]===1?c=Wr(a):c=Mt([a,st(E,[1,0],[E.shape[0]-1,E.shape[1]])],0);const D=Pt(_e(at(v,N),I)),F=st(o,[p,0],[n-p,s]),_=X(D,c),B=Pe(c);if(p===0)o=Ce(F,at(_,at(B,F)));else{const q=Ce(F,at(_,at(B,F)));o=Mt([st(o,[0,0],[p,s]),q],0)}const U=Pe(_),Y=st(i,[0,p],[n,i.shape[1]-p]);if(p===0)i=Ce(Y,at(at(Y,c),U));else{const q=Ce(Y,at(at(Y,c),U));i=Mt([st(i,[0,0],[n,p]),q],1)}return[c,o,i]}),qe([m,y,b])}return!t&&n>s&&(i=st(i,[0,0],[n,s]),o=st(o,[0,0],[s,s])),[i,o]})}const GB=P({qr_:zB});(function(e){e[e.NONE=0]="NONE",e[e.MEAN=1]="MEAN",e[e.SUM=2]="SUM",e[e.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(r.Reduction||(r.Reduction={}));function VB(e,t,n=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const s=W(e,"losses","computeWeightedLoss");let i=null;t!=null&&(i=W(t,"weights","computeWeightedLoss"));const o=i==null?s:X(s,i);if(n===r.Reduction.NONE)return o;if(n===r.Reduction.SUM)return Ue(o);if(n===r.Reduction.MEAN){if(i==null)return zt(o);{const a=s.size/i.size,c=_e(Ue(o),Ue(i));return a>1?_e(c,Ne(a)):c}}if(n===r.Reduction.SUM_BY_NONZERO_WEIGHTS){if(i==null)return _e(Ue(o),Ne(s.size));{const a=X(i,si(s.shape)),c=ve(Ue(Pr(a,Ne(0))),"float32");return _e(Ue(o),c)}}throw Error(`Unknown reduction: ${n}`)}const nr=P({computeWeightedLoss_:VB});function HB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const i=W(e,"labels","absoluteDifference"),o=W(t,"predictions","absoluteDifference");let a=null;n!=null&&(a=W(n,"weights","absoluteDifference")),dt(i.shape,o.shape,"Error in absoluteDifference: ");const c=rn(Ce(i,o));return nr(c,a,s)}const YB=P({absoluteDifference_:HB});function qB(e,t,n,s,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","cosineDistance"),a=W(t,"predictions","cosineDistance");let c=null;s!=null&&(c=W(s,"weights","cosineDistance")),dt(o.shape,a.shape,"Error in cosineDistance: ");const u=Ne(1),p=Ce(u,Ue(X(o,a),n,!0));return nr(p,c,i)}const jB=P({cosineDistance_:qB});function KB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let i=W(e,"labels","hingeLoss");const o=W(t,"predictions","hingeLoss");let a=null;n!=null&&(a=W(n,"weights","hingeLoss")),dt(i.shape,o.shape,"Error in hingeLoss: ");const c=Ne(1);i=Ce(X(Ne(2),i),c);const u=Fi(Ce(c,X(i,o)));return nr(u,a,s)}const XB=P({hingeLoss_:KB});function JB(e,t,n,s=1,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","huberLoss"),a=W(t,"predictions","huberLoss");let c=null;n!=null&&(c=W(n,"weights","huberLoss")),dt(o.shape,a.shape,"Error in huberLoss: ");const u=Ne(s),p=rn(Ce(a,o)),m=ko(p,u),y=Ce(p,m),b=be(X(Ne(.5),Lt(m)),X(u,y));return nr(b,c,i)}const ZB=P({huberLoss_:JB});function QB(e,t,n,s=1e-7,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","logLoss"),a=W(t,"predictions","logLoss");let c=null;n!=null&&(c=W(n,"weights","logLoss")),dt(o.shape,a.shape,"Error in logLoss: ");const u=Ne(1),p=Ne(s),m=Pt(X(o,is(be(a,p)))),y=X(Ce(u,o),is(be(Ce(u,a),p))),b=Ce(m,y);return nr(b,c,i)}const eM=P({logLoss_:QB});function tM(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const i=W(e,"labels","meanSquaredError"),o=W(t,"predictions","meanSquaredError");let a=null;n!=null&&(a=W(n,"weights","meanSquaredError")),dt(i.shape,o.shape,"Error in meanSquaredError: ");const c=Sh(i,o);return nr(c,a,s)}const nM=P({meanSquaredError_:tM});function sM(e,t){const n=W(e,"labels","sigmoidCrossEntropyWithLogits"),s=W(t,"logits","sigmoidCrossEntropyWithLogits");dt(n.shape,s.shape,"Error in sigmoidCrossEntropyWithLogits: ");const i=Fi(s),o=X(s,n),a=Jd(xs(Pt(rn(s))));return be(Ce(i,o),a)}function iM(e,t,n,s=0,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=W(e,"multiClassLabels","sigmoidCrossEntropy");const a=W(t,"logits","sigmoidCrossEntropy");let c=null;if(n!=null&&(c=W(n,"weights","sigmoidCrossEntropy")),dt(o.shape,a.shape,"Error in sigmoidCrossEntropy: "),s>0){const p=Ne(s),m=Ne(1),y=Ne(.5);o=be(X(o,Ce(m,p)),X(y,p))}const u=sM(o,a);return nr(u,c,i)}const rM=P({sigmoidCrossEntropy_:iM});function oM(e,t,n=-1){if(n===-1&&(n=t.rank-1),n!==t.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${t.rank} and dim was ${n}`);const s=Di((i,o,a)=>{const c=!0,u=vb(o,[n],c),p=Ce(ve(o,"float32"),u);a([i,p]);const m=Pt(X(p,i)),y=Ue(m,[n]),b=(w,I)=>{const[T,v]=I,N=En(w.shape,[n]);return[X(K(w,N),Ce(ve(T,"float32"),xs(v))),X(K(w,N),Ce(xs(v),ve(T,"float32")))]};return{value:y,gradFunc:b}});return s(e,t)}function aM(e,t,n,s=0,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=W(e,"onehotLabels","softmaxCrossEntropy");const a=W(t,"logits","softmaxCrossEntropy");let c=null;if(n!=null&&(c=W(n,"weights","softmaxCrossEntropy")),dt(o.shape,a.shape,"Error in softmaxCrossEntropy: "),s>0){const p=Ne(s),m=Ne(1),y=Ne(o.shape[1]);o=be(X(o,Ce(m,p)),_e(p,y))}const u=oM(o,a);return nr(u,c,i)}const cM=P({softmaxCrossEntropy_:aM});const lM={fft:wh,ifft:ja,rfft:Lh,irfft:hp},hM={hammingWindow:aB,hannWindow:qA,frame:jA,stft:uB},Vr={flipLeftRight:fB,resizeNearestNeighbor:JA,resizeBilinear:XA,rotateWithOffset:yB,cropAndResize:pB,nonMaxSuppression:wB,nonMaxSuppressionAsync:NB,nonMaxSuppressionWithScore:RB,nonMaxSuppressionWithScoreAsync:EB,nonMaxSuppressionPadded:kB,nonMaxSuppressionPaddedAsync:_B},QA={bandPart:BB,gramSchmidt:PB,qr:GB},uM={absoluteDifference:YB,computeWeightedLoss:nr,cosineDistance:jB,hingeLoss:XB,huberLoss:ZB,logLoss:eM,meanSquaredError:nM,sigmoidCrossEntropy:rM,softmaxCrossEntropy:cM};class sr extends No{minimize(e,t=!1,n){const{value:s,grads:i}=this.computeGradients(e,n);if(n!=null){const o=n.map(a=>({name:a.name,tensor:i[a.name]}));this.applyGradients(o)}else this.applyGradients(i);return qe(i),t?s:(s.dispose(),null)}get iterations(){return this.iterations_==null&&(this.iterations_=0),this.iterations_}incrementIterations(){this.iterations_=this.iterations+1}computeGradients(e,t){return Ab(e,t)}dispose(){this.iterations_!=null&&qe(this.iterations_)}async saveIterations(){return this.iterations_==null&&(this.iterations_=0),{name:"iter",tensor:Ne(this.iterations_,"int32")}}async getWeights(){throw new Error("getWeights() is not implemented for this optimizer yet.")}async setWeights(e){throw new Error(`setWeights() is not implemented for this optimizer class ${this.getClassName()}`)}async extractIterations(e){return this.iterations_=(await e[0].tensor.data())[0],e.slice(1)}}Object.defineProperty(sr,Symbol.hasInstance,{value:e=>e.minimize!=null&&e.computeGradients!=null&&e.applyGradients!=null});class xh extends sr{constructor(e,t,n=null){super();this.learningRate=e,this.rho=t,this.epsilon=n,this.accumulatedGrads=[],this.accumulatedUpdates=[],n==null&&(this.epsilon=V.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n],o=!1;this.accumulatedGrads[s]==null&&(this.accumulatedGrads[s]={originalName:`${n}/accum_grad`,variable:ee(()=>et(i).variable(o))}),this.accumulatedUpdates[s]==null&&(this.accumulatedUpdates[s]={originalName:`${n}/accum_var`,variable:ee(()=>et(i).variable(o))});const a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;const c=this.accumulatedGrads[s].variable,u=this.accumulatedUpdates[s].variable;ee(()=>{const p=be(X(c,this.rho),X(Lt(a),1-this.rho)),m=X(_e(Sn(be(u,this.epsilon)),Sn(be(c,this.epsilon))),a),y=be(X(u,this.rho),X(Lt(m),1-this.rho));c.assign(p),u.assign(y);const b=be(X(m,-this.learningRate),i);i.assign(b)})}),this.incrementIterations()}dispose(){this.accumulatedUpdates!=null&&(qe(this.accumulatedGrads.map(e=>e.variable)),qe(this.accumulatedUpdates.map(e=>e.variable)))}async getWeights(){const e=[...this.accumulatedGrads,...this.accumulatedUpdates];return[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=e.length/2,n=!1;this.accumulatedGrads=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedUpdates=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)}))}getConfig(){return{learningRate:this.learningRate,rho:this.rho,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.rho,t.epsilon)}}xh.className="Adadelta",ge(xh);class Th extends sr{constructor(e,t=.1){super();this.learningRate=e,this.initialAccumulatorValue=t,this.accumulatedGrads=[]}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n];if(this.accumulatedGrads[s]==null){const c=!1;this.accumulatedGrads[s]={originalName:`${n}/accumulator`,variable:ee(()=>hh(i.shape,this.initialAccumulatorValue).variable(c))}}const o=Array.isArray(e)?e[s].tensor:e[n];if(o==null)return;const a=this.accumulatedGrads[s].variable;ee(()=>{const c=be(a,Lt(o));a.assign(c);const u=be(X(_e(o,Sn(be(c,V.backend.epsilon()))),-this.learningRate),i);i.assign(u)})}),this.incrementIterations()}dispose(){this.accumulatedGrads!=null&&qe(this.accumulatedGrads.map(e=>e.variable))}async getWeights(){return[await this.saveIterations()].concat(this.accumulatedGrads.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=!1;this.accumulatedGrads=e.map(n=>({originalName:n.name,variable:n.tensor.variable(t)}))}getConfig(){return{learningRate:this.learningRate,initialAccumulatorValue:this.initialAccumulatorValue}}static fromConfig(e,t){return new e(t.learningRate,t.initialAccumulatorValue)}}Th.className="Adagrad",ge(Th);class Ah extends sr{constructor(e,t,n,s=null){super();this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.accumulatedFirstMoment=[],this.accumulatedSecondMoment=[],ee(()=>{this.accBeta1=Ne(t).variable(),this.accBeta2=Ne(n).variable()}),s==null&&(this.epsilon=V.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);ee(()=>{const n=Ce(1,this.accBeta1),s=Ce(1,this.accBeta2);t.forEach((i,o)=>{const a=V.registeredVariables[i],c=!1;this.accumulatedFirstMoment[o]==null&&(this.accumulatedFirstMoment[o]={originalName:`${i}/m`,variable:ee(()=>et(a).variable(c))}),this.accumulatedSecondMoment[o]==null&&(this.accumulatedSecondMoment[o]={originalName:`${i}/v`,variable:ee(()=>et(a).variable(c))});const u=Array.isArray(e)?e[o].tensor:e[i];if(u==null)return;const p=this.accumulatedFirstMoment[o].variable,m=this.accumulatedSecondMoment[o].variable,y=be(X(p,this.beta1),X(u,1-this.beta1)),b=be(X(m,this.beta2),X(Lt(u),1-this.beta2)),w=_e(y,n),I=_e(b,s);p.assign(y),m.assign(b);const T=be(X(_e(w,be(Sn(I),this.epsilon)),-this.learningRate),a);a.assign(T)}),this.accBeta1.assign(X(this.accBeta1,this.beta1)),this.accBeta2.assign(X(this.accBeta2,this.beta2))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.accBeta2.dispose(),this.accumulatedFirstMoment!=null&&qe(this.accumulatedFirstMoment.map(e=>e.variable)),this.accumulatedSecondMoment!=null&&qe(this.accumulatedSecondMoment.map(e=>e.variable))}async getWeights(){const e=[...this.accumulatedFirstMoment,...this.accumulatedSecondMoment];return[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e),ee(()=>{this.accBeta1.assign(ii(this.beta1,this.iterations_+1)),this.accBeta2.assign(ii(this.beta2,this.iterations_+1))});const t=e.length/2,n=!1;this.accumulatedFirstMoment=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedSecondMoment=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)}))}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon)}}Ah.className="Adam",ge(Ah);class vh extends sr{constructor(e,t,n,s=null,i=0){super();this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.decay=i,this.accumulatedFirstMoment=[],this.accumulatedWeightedInfNorm=[],ee(()=>{this.iteration=Ne(0).variable(),this.accBeta1=Ne(t).variable()}),s==null&&(this.epsilon=V.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);ee(()=>{const n=Ce(1,this.accBeta1),s=_e(-this.learningRate,be(X(this.iteration,this.decay),1));t.forEach((i,o)=>{const a=V.registeredVariables[i],c=!1;this.accumulatedFirstMoment[o]==null&&(this.accumulatedFirstMoment[o]={originalName:`${i}/m`,variable:et(a).variable(c)}),this.accumulatedWeightedInfNorm[o]==null&&(this.accumulatedWeightedInfNorm[o]={originalName:`${i}/v`,variable:et(a).variable(c)});const u=Array.isArray(e)?e[o].tensor:e[i];if(u==null)return;const p=this.accumulatedFirstMoment[o].variable,m=this.accumulatedWeightedInfNorm[o].variable,y=be(X(p,this.beta1),X(u,1-this.beta1)),b=X(m,this.beta2),w=rn(u),I=Us(b,w);p.assign(y),m.assign(I);const T=be(X(_e(s,n),_e(y,be(I,this.epsilon))),a);a.assign(T)}),this.iteration.assign(be(this.iteration,1)),this.accBeta1.assign(X(this.accBeta1,this.beta1))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.iteration.dispose(),this.accumulatedFirstMoment!=null&&qe(this.accumulatedFirstMoment.map(e=>e.variable)),this.accumulatedWeightedInfNorm!=null&&qe(this.accumulatedWeightedInfNorm.map(e=>e.variable))}async getWeights(){throw new Error("getWeights() is not implemented for Adamax yet.")}async setWeights(e){throw new Error("setWeights() is not implemented for Adamax yet.")}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon,decay:this.decay}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon,t.decay)}}vh.className="Adamax",ge(vh);class Za extends sr{constructor(e){super();this.learningRate=e,this.setLearningRate(e)}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=Array.isArray(e)?e[s].tensor:e[n];if(i==null)return;const o=V.registeredVariables[n];ee(()=>{const a=be(X(this.c,i),o);o.assign(a)})}),this.incrementIterations()}setLearningRate(e){this.learningRate=e,this.c!=null&&this.c.dispose(),this.c=Rn(Ne(-e))}dispose(){this.c.dispose()}async getWeights(){return[await this.saveIterations()]}async setWeights(e){if(e=await this.extractIterations(e),e.length!==0)throw new Error("SGD optimizer does not have settable weights.")}getConfig(){return{learningRate:this.learningRate}}static fromConfig(e,t){return new e(t.learningRate)}}Za.className="SGD",ge(Za);class Nh extends Za{constructor(e,t,n=!1){super(e);this.learningRate=e,this.momentum=t,this.useNesterov=n,this.accumulations=[],this.m=Ne(this.momentum)}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n];if(this.accumulations[s]==null){const c=!1;this.accumulations[s]={originalName:`${n}/momentum`,variable:ee(()=>et(i).variable(c))}}const o=this.accumulations[s].variable,a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;ee(()=>{let c;const u=be(X(this.m,o),a);this.useNesterov?c=be(X(this.c,be(a,X(u,this.m))),i):c=be(X(this.c,u),i),o.assign(u),i.assign(c)})}),this.incrementIterations()}dispose(){this.m.dispose(),this.accumulations!=null&&qe(this.accumulations.map(e=>e.variable))}setMomentum(e){this.momentum=e}async getWeights(){return[await this.saveIterations()].concat(this.accumulations.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=!1;this.accumulations=e.map(n=>({originalName:n.name,variable:n.tensor.variable(t)}))}getConfig(){return{learningRate:this.learningRate,momentum:this.momentum,useNesterov:this.useNesterov}}static fromConfig(e,t){return new e(t.learningRate,t.momentum,t.useNesterov)}}Nh.className="Momentum",ge(Nh);class Ch extends sr{constructor(e,t=.9,n=0,s=null,i=!1){super();if(this.learningRate=e,this.decay=t,this.momentum=n,this.epsilon=s,this.accumulatedMeanSquares=[],this.accumulatedMoments=[],this.accumulatedMeanGrads=[],this.centered=i,s==null&&(this.epsilon=V.backend.epsilon()),e==null)throw new Error("learningRate for RMSPropOptimizer must be defined.")}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n],o=!1;this.accumulatedMeanSquares[s]==null&&(this.accumulatedMeanSquares[s]={originalName:`${n}/rms`,variable:ee(()=>et(i).variable(o))}),this.accumulatedMoments[s]==null&&(this.accumulatedMoments[s]={originalName:`${n}/momentum`,variable:ee(()=>et(i).variable(o))}),this.accumulatedMeanGrads[s]==null&&this.centered&&(this.accumulatedMeanGrads[s]={originalName:`${n}/mg`,variable:ee(()=>et(i).variable(o))});const a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;const c=this.accumulatedMeanSquares[s].variable,u=this.accumulatedMoments[s].variable;ee(()=>{const p=be(X(c,this.decay),X(Lt(a),1-this.decay));if(this.centered){const m=this.accumulatedMeanGrads[s].variable,y=be(X(m,this.decay),X(a,1-this.decay)),b=_e(X(a,this.learningRate),Sn(Ce(p,be(Lt(y),this.epsilon)))),w=be(X(u,this.momentum),b);c.assign(p),m.assign(y),u.assign(w);const I=Ce(i,w);i.assign(I)}else{const m=be(X(c,this.decay),X(Lt(a),1-this.decay)),y=be(X(u,this.momentum),_e(X(a,this.learningRate),Sn(be(m,this.epsilon))));c.assign(m),u.assign(y);const b=Ce(i,y);i.assign(b)}})}),this.incrementIterations()}dispose(){this.accumulatedMeanSquares!=null&&qe(this.accumulatedMeanSquares.map(e=>e.variable)),this.accumulatedMeanGrads!=null&&this.centered&&qe(this.accumulatedMeanGrads.map(e=>e.variable)),this.accumulatedMoments!=null&&qe(this.accumulatedMoments.map(e=>e.variable))}async getWeights(){const e=[...this.accumulatedMeanSquares,...this.accumulatedMoments];return this.centered&&e.push(...this.accumulatedMeanGrads),[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=this.centered?e.length/3:e.length/2,n=!1;this.accumulatedMeanSquares=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedMoments=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.centered&&(this.accumulatedMeanGrads=e.slice(t*2,t*3).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})))}getConfig(){return{learningRate:this.learningRate,decay:this.decay,momentum:this.momentum,epsilon:this.epsilon,centered:this.centered}}static fromConfig(e,t){return new e(t.learningRate,t.decay,t.momentum,t.epsilon,t.centered)}}Ch.className="RMSProp",ge(Ch);class Bo{static sgd(e){return new Za(e)}static momentum(e,t,n=!1){return new Nh(e,t,n)}static rmsprop(e,t=.9,n=0,s=null,i=!1){return new Ch(e,t,n,s,i)}static adam(e=.001,t=.9,n=.999,s=null){return new Ah(e,t,n,s)}static adadelta(e=.001,t=.95,n=null){return new xh(e,t,n)}static adamax(e=.002,t=.9,n=.999,s=null,i=0){return new vh(e,t,n,s,i)}static adagrad(e,t=.1){return new Th(e,t)}}const Mo={sgd:Bo.sgd,momentum:Bo.momentum,adadelta:Bo.adadelta,adagrad:Bo.adagrad,rmsprop:Bo.rmsprop,adamax:Bo.adamax,adam:Bo.adam};const dM=(()=>typeof requestAnimationFrame!="undefined"?requestAnimationFrame:typeof setImmediate!="undefined"?setImmediate:e=>e())();function Ip(){return new Promise(e=>dM(()=>e()))}function qb(e,t,n){const s=n*(typeof e=="number"?e:e[0]),i=t*(typeof e=="number"?e:e[1]);return[s,i]}function Rh(e,t,n,s=!0){let i=[];if(s)i=i.concat(t.slice(0)),i.push(e[0]/n),i=i.concat(e.slice(1));else{i=i.concat(e[0]);const o=t.length;for(let a=0;a=t*2+1||a%2===1?o.push(a):i.push(a);s.push(...i),s.push(0),s.push(...o)}return s}function Eh(e,t,n,s=!0){const i=[];s?i.push(e[0]/n):i.push(e[0]*n);for(let o=1;o{const a=[...i];a[n]=o;const c=st(e,s,a);return s[n]+=o,c})}function ow(e,t){const n=new Array(e.rank);for(let i=0;iD.value-E.value);const T=y*s,v=u.subarray(T,T+s),N=p.subarray(T,T+s);for(let E=0;E{const[n]=t;return{x:()=>X(e,Ka(ve(n,"float32"),-1))}}};const gM={kernelName:de,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Lt(ve(n,"float32")),i=Sn(Ce(Ne(1),s));return Pt(_e(e,i))}}}};const yM={kernelName:Ae,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Sn(Ce(Lt(ve(n,"float32")),1));return _e(e,s)}}}};const bM={kernelName:xe,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{let c=e;const u=an(n.shape,i);return u.length>0&&(c=Ue(c,u)),K(c,n.shape)},a=()=>{let c=e;const u=an(s.shape,i);return u.length>0&&(c=Ue(c,u)),K(c,s.shape)};return{a:o,b:a}}};const wM={kernelName:Me,saveAllInputs:!0,gradFunc:(e,t)=>{const n={};return t.forEach((s,i)=>{n[i]=()=>e.clone()}),n}};const LM={kernelName:$t,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>et(n)}}};const SM={kernelName:Kt,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>et(n)}}};const IM={kernelName:Fn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,Sn(Ce(Ne(1),Lt(ve(n,"float32")))))}}};const xM={kernelName:vn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Sn(be(Ne(1),Lt(ve(n,"float32"))));return _e(e,s)}}}};const TM={kernelName:Ai,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=be(Lt(n),Lt(s));let u=X(e,_e(s,c));const p=an(n.shape,i);return p.length>0&&(u=Ue(u,p)),K(u,n.shape)},a=()=>{const c=be(Lt(n),Lt(s));let u=Pt(X(e,_e(n,c)));const p=an(s.shape,i);return p.length>0&&(u=Ue(u,p)),K(u,s.shape)};return{a:o,b:a}}};const AM={kernelName:Nn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,be(Lt(ve(n,"float32")),1))}}};const vM={kernelName:Qs,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,Ce(Ne(1),Lt(ve(n,"float32"))))}}};function NM(e,t,n,s,i=[1,1,1],o,a){const c=W(e,"dy","avgPool3dBackprop"),u=W(t,"input","avgPool3dBackprop");let p=c,m=u,y=!1;u.rank===4&&(y=!0,p=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]]),m=K(u,[1,u.shape[0],u.shape[1],u.shape[2],u.shape[3]])),k(p.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${p.rank}.`),k(m.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${m.rank}.`),k(on(s,i),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${s} and dilations '${i}'`),a!=null&&k(Ut(o),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const b=v=>{const N=sh(m.shape,n,s,i,o,a);return v.avgPool3dBackprop(p,m,N)},w={dy:p,input:m},I={filterSize:n,strides:s,dilations:i,pad:o,dimRoundingMode:a},T=V.runKernelFunc(b,w,null,Cx,I);return y?K(T,[T.shape[1],T.shape[2],T.shape[3],T.shape[4]]):T}const CM=P({avgPool3dBackprop_:NM});const RM={kernelName:hl,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:i,strides:o,dilations:a,pad:c,dimRoundingMode:u}=n,p=a==null?[1,1,1]:a;return{x:()=>CM(e,s,i,o,p,c,u)}}};function OM(e,t,n,s,i){const o=W(e,"dy","avgPoolBackprop"),a=W(t,"input","avgPoolBackprop");k(a.rank===o.rank,()=>`Rank of input (${a.rank}) does not match rank of dy (${o.rank})`);let c=a,u=o,p=!1;a.rank===3&&(p=!0,c=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]),u=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),k(u.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${u.rank}.`),k(c.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${c.rank}.`);const m=I=>{const T=Wn(c.shape,n,s,1,i);return I.avgPoolBackprop(u,c,T)},y={dy:u,input:c},b={filterSize:n,strides:s,pad:i},w=V.runKernelFunc(m,y,null,xa,b);return p?K(w,[w.shape[1],w.shape[2],w.shape[3]]):w}const EM=P({avgPoolBackprop_:OM});const DM={kernelName:ei,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:i,strides:o,pad:a}=n;return{x:()=>EM(e,s,i,o,a)}}};const kM={kernelName:vg,inputsToSave:["a","b"],gradFunc:(e,t,n)=>{const[s,i]=t,{transposeA:o,transposeB:a}=n;return!o&&!a?{a:()=>at(e,i,!1,!0),b:()=>at(s,e,!0,!1)}:!o&&a?{a:()=>at(e,i,!1,!1),b:()=>at(e,s,!0,!1)}:o&&!a?{a:()=>at(i,e,!1,!0),b:()=>at(s,e,!1,!1)}:{a:()=>at(i,e,!0,!0),b:()=>at(e,s,!0,!0)}}};const FM={kernelName:Ng,gradFunc:(e,t,n)=>{const{blockShape:s,crops:i}=n;return{x:()=>fh(e,s,i)}}};const _M={kernelName:Cg,gradFunc:(e,t,n)=>{const s=n,i=s.inputShape,o=s.shape,a=Array.from(o);for(let u=i.length-1;u>=0;u--)if(i[u]===o[u])a[u]=1;else if(i[u]!==1)throw new Error(`broadcastTo(): [${i}] cannot be broadcast to [${o}].`);const c=[];for(let u=0;u1&&c.push(u);return{x:()=>Ue(e,c,!0)}}};const WM={kernelName:ul,gradFunc:e=>({x:()=>e.clone()})};const $M={kernelName:dl,gradFunc:e=>({x:()=>et(e)})};const UM={kernelName:pl,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{clipValueMin:i,clipValueMax:o}=n;return{x:()=>$n(Bs(tr(s,i),Mr(s,o)),e,et(e))}}};const BM={kernelName:td,saveAllInputs:!0,gradFunc:(e,t,n)=>{const s=t.map(u=>u.shape),{axis:i}=n,o=ft(i,t[0].shape)[0],a=s.map(u=>u[o]),c=os(e,a,o);return c.map(u=>()=>u)}};const MM={kernelName:Og,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,{dilations:o,strides:a,pad:c,dataFormat:u}=n;return k($r(o),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${o}'`),{x:()=>gb(s.shape,e,i,a,c,u),filter:()=>Vb(s,e,i.shape,a,c,u)}}};const PM={kernelName:Eg,inputsToSave:["dy","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,{strides:o,pad:a,dataFormat:c,dimRoundingMode:u}=n;return{dy:()=>er(e,i,o,a,c,1,u),filter:()=>Vb(e,s,i.shape,o,a,c,u)}}};function zM(e,t,n,s,i){let o=e;e.rank===4&&(o=K(e,[1,e.shape[0],e.shape[1],e.shape[2],e.shape[3]]));let a=t;a.rank===4&&(a=K(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]])),k(o.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${o.shape}.`),k(a.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${a.shape}.`),k(n.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${n}.`),k(o.shape[4]===n[3],()=>`Error in conv3dDerFilter: depth of input ${o.shape[4]}) must match input depth in filter (${n[3]}.`),k(a.shape[4]===n[4],()=>`Error in conv3dDerFilter: depth of dy (${a.shape[4]}) must match output depth for filter (${n[4]}).`);const c=m=>{const y=1,b=ih(o.shape,n,s,y,i);return m.conv3dDerFilter(o,a,b)},u={x:o,y:a},p={strides:s,pad:i};return V.runKernelFunc(c,u,null,Ox,p)}const GM=P({conv3DBackpropFilter_:zM});const VM={kernelName:Dg,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:i,pad:o}=n;k($r(s),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${s}'`);const[a,c]=t;return{x:()=>lA(a.shape,e,c,i,o),filter:()=>GM(a,e,c.shape,i,o)}}};const HM={kernelName:Ta,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Pt(op(ve(n,"float32"))),e)}}};const YM={kernelName:ml,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(ap(ve(n,"float32")),e)}}};const qM={kernelName:kg,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{axis:i,exclusive:o,reverse:a}=n;return{x:()=>{const c=_n([i],s.rank);let u=jd(e,i,o,!a);return c!=null&&(u=Pe(u,c)),u}}}};const jM={kernelName:Fg,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:i,pad:o,dimRoundingMode:a}=n,c=s==null?[1,1]:s;k($r(c),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${c}'`);const[u,p]=t;k(u.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${u.rank}.`),k(p.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${p.rank}.`),k(u.shape[3]===p.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${u.shape[3]}) must match the inChannels dimension in filter ${p.shape[2]}.`),k(on(i,c),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${i} and dilations '${c}'.`),a!=null&&k(Ut(o),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const m=Oi(u.shape,p.shape,i,c,o,a,!0);return{x:()=>HA(u.shape,e,p,m),filter:()=>VA(u,e,p.shape,m)}}};const KM={kernelName:nd,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,o={x:s,filter:i,dy:e},a={x:s,filter:i,dy:e};return{x:()=>V.runKernel(sd,o,n),filter:()=>V.runKernel(id,a,n)}}};const XM={kernelName:Aa,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=_e(e,ve(s,"float32")),u=an(n.shape,i);return u.length>0?K(Ue(c,u),n.shape):c},a=()=>{let c=X(e,ve(n,"float32"));const u=an(s.shape,i);u.length>0&&(c=K(Ue(c,u),s.shape));const p=Lt(s);return Pt(_e(c,ve(p,"float32")))};return{a:o,b:a}}};const JM={kernelName:fl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t,s=o=>o.eluDer(e,n),i={dy:e,y:n};return{x:()=>V.runKernelFunc(s,i,null,$x)}}};const ZM={kernelName:gl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=X(xs(Pt(Lt(n))),2/Math.sqrt(Math.PI));return{x:()=>X(e,s)}}};const QM={kernelName:yl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,n)}}};const eP={kernelName:bl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,xs(n))}}};const tP={kernelName:wl,gradFunc:e=>({x:()=>et(e)})};const nP={kernelName:Wg,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=_e(e,ve(s,"float32")),u=an(n.shape,i);return u.length>0?K(Ue(c,u),n.shape):c},a=()=>{let c=X(e,ve(n,"float32"));const u=an(s.shape,i);u.length>0&&(c=K(Ue(c,u),s.shape));const p=Lt(s);return Pt(_e(c,ve(p,"float32")))};return{a:o,b:a}}};const sP={kernelName:Ll,inputsToSave:["x","mean","variance","scale"],gradFunc:(e,t,n)=>{const{varianceEpsilon:s}=n,[i,o,a,c]=t,u=c==null?Ne(1):c,p=an(o.shape,i.shape),m=[];if(o.rank===1){for(let F=0;Fo.rank===1?K(X(X(e,Br(K(w,[1,1,1,o.shape[0]]),m)),u),i.shape):K(X(X(e,w),u),i.shape),v=()=>{let F=X(X(w,Ne(-1)),b);return o.rank===1&&(F=Ue(F,p)),K(F,o.shape)},N=()=>{let F=X(X(I,y),b);return o.rank===1&&(F=Ue(F,p)),K(F,o.shape)},E=()=>{const F=X(y,w);let _=X(e,F);return o.rank===1&&(_=Ue(_,p)),K(_,o.shape)},D=()=>{let F=e;return o.rank===1&&(F=Ue(F,p)),K(F,o.shape)};return{x:T,mean:v,variance:N,scale:E,offset:D}}};const iP={kernelName:$g,inputsToSave:["x","indices"],gradFunc:(e,t,n)=>{const[s,i]=t,{axis:o}=n,a=ft(o,s.shape)[0],c=()=>{const u=s.shape,p=i.size,m=u.slice(0,a),y=m.length,b=u.slice(o,u.length).slice(1),w=b.length,I=cv(0,y),T=cv(y+1,y+1+w),v=lv([m,[p],b]),N=K(e,v),E=K(i,[p]),D=lv([[y],I,T]),F=Pe(N,D);let _=Mb(F,E,s.shape[a]);const B=eh(D);return _=Pe(_,B),_};return{x:c,indices:()=>i}}};function cv(e,t){const n=[];for(let s=e;s{const[n,s]=t;return{a:()=>et(n),b:()=>et(s)}}};const oP={kernelName:Sl,gradFunc:e=>({x:()=>ve(e,"float32")})};const aP={kernelName:Il,gradFunc:e=>({x:()=>et(e)})};const cP={kernelName:xl,gradFunc:e=>({x:()=>et(e)})};const lP={kernelName:Tl,gradFunc:e=>({x:()=>et(e)})};const hP={kernelName:vl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,be(n,1))}}};const uP={kernelName:Al,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,ve(n,"float32"))}}};const dP={kernelName:Pg,inputsToSave:[],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{axis:i}=n;return{logits:()=>{const o=!0,a=xs(s);return Ce(e,X(Ue(e,i,o),a))}}}};function pP(e,t,n,s=5,i=1,o=1,a=.5){const c=m=>m.LRNGrad(n,e,t,s,i,o,a),u={x:e,y:t,dy:n},p={depthRadius:s,bias:i,alpha:o,beta:a};return V.runKernelFunc(c,u,null,qx,p)}const mP=P({localResponseNormalizationBackprop_:pP});const fP={kernelName:zg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{depthRadius:o,bias:a,alpha:c,beta:u}=n;return{x:()=>mP(s,i,e,o,a,c,u)}}};function hv(e,t,n,s,i){return t.rank{const o=X(e,ve(ni(n,t),e.dtype));return i==null?o:Pe(o,i)}}}const uv={kernelName:Nl,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{reductionIndices:i}=s,[o,a]=t,c=ft(i,o.shape),u=_n(c,o.rank),p=hv(e,a,o,c,u);return{x:()=>{let m=p.x();return u!=null&&(m=Pe(m)),m}}}};const gP={kernelName:Gg,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=()=>X(e,ve(tr(n,s),"float32")),o=()=>X(e,ve(dh(n,s),"float32"));return{a:i,b:o}}};function yP(e,t,n,s,i,o=[1,1,1],a,c){const u=W(e,"dy","maxPool3dBackprop"),p=W(t,"input","maxPool3dBackprop"),m=W(n,"output","maxPool3dBackprop");let y=u,b=p,w=m,I=!1;p.rank===4&&(I=!0,y=K(u,[1,u.shape[0],u.shape[1],u.shape[2],u.shape[3]]),b=K(p,[1,p.shape[0],p.shape[1],p.shape[2],p.shape[3]]),w=K(m,[1,m.shape[0],m.shape[1],m.shape[2],m.shape[3]])),k(y.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${y.rank}.`),k(b.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${b.rank}.`),k(w.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${w.rank}.`),k(on(i,o),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${i} and dilations '${o}'`),c!=null&&k(Ut(a),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${c} but got pad ${a}.`);const T=D=>{const F=sh(b.shape,s,i,o,a,c);return D.maxPool3dBackprop(y,b,w,F)},v={dy:y,input:b,output:w},N={filterSize:s,strides:i,dilations:o,pad:a,dimRoundingMode:c},E=V.runKernelFunc(T,v,null,jx,N);return I?K(E,[E.shape[1],E.shape[2],E.shape[3],E.shape[4]]):E}const bP=P({maxPool3dBackprop_:yP});const wP={kernelName:Vg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{filterSize:o,strides:a,dilations:c,pad:u,dimRoundingMode:p}=n,m=c==null?[1,1,1]:c;return{x:()=>bP(e,s,i,o,a,m,u,p)}}};function LP(e,t,n,s,i,o,a){const c=W(e,"dy","maxPoolBackprop"),u=W(t,"input","maxPoolBackprop"),p=W(n,"output","maxPoolBackprop");k(u.rank===c.rank,()=>`Rank of input (${u.rank}) does not match rank of dy (${c.rank})`),k(c.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${c.rank}.`),k(u.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${u.rank}.`),a!=null&&k(Ut(o),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const m=w=>{const I=Wn(u.shape,s,i,1,o,a);return w.maxPoolBackprop(c,u,p,I)},y={dy:c,input:u,output:p},b={filterSize:s,strides:i,pad:o,dimRoundingMode:a};return V.runKernelFunc(m,y,null,ad,b)}const SP=P({maxPoolBackprop_:LP});const IP={kernelName:Cl,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{filterSize:o,strides:a,pad:c}=n;return{x:()=>SP(e,s,i,o,a,c)}}};const xP={kernelName:Hg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{axis:i}=s,[o,a]=t,c=ft(i,o.shape),u=_n(c,o.rank),p=hv(e,a,o,c,u);return{x:()=>{let m=p.x();return u!=null&&(m=Pe(m)),m}}}};const TP={kernelName:Yg,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=()=>X(e,ve(Mr(n,s),"float32")),o=()=>X(e,ve(Ts(n,s),"float32"));return{a:i,b:o}}};const AP={kernelName:qg,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=an(n.shape,i);return c.length>0?K(Ue(e,c),n.shape):e},a=()=>{const c=X(e,Pt(Pa(_e(n,s)))),u=an(s.shape,i);return u.length>0?K(Ue(c,u),s.shape):c};return{a:o,b:a}}};const vP={kernelName:Rl,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=X(e,ve(s,"float32")),u=an(n.shape,i);return u.length>0?K(Ue(c,u),n.shape):c},a=()=>{const c=X(e,ve(n,"float32")),u=an(s.shape,i);return u.length>0?K(Ue(c,u),s.shape):c};return{a:o,b:a}}};const NP={kernelName:jg,gradFunc:e=>({x:()=>Pt(e)})};const CP={kernelName:Jg,inputsToSave:["indices"],gradFunc:(e,t)=>{const n=t[0];return{indices:()=>ct(n.shape,"float32")}}};const RP={kernelName:Xg,gradFunc:e=>({x:()=>et(e)})};const dv={kernelName:dd,inputsToSave:["x"],gradFunc:(e,t,n)=>{const s=t[0],{paddings:i}=n,o=i.map(a=>a[0]);return{x:()=>st(e,o,s.shape)}}};const OP={kernelName:Zg,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(e,t)=>{const[n,s,i]=t,o=n,a=s,c=nt(o.shape,a.shape),u=()=>{const m=ve(a,"float32");let y=X(e,X(m,ii(o,Ce(m,Ne(1)))));const b=an(o.shape,c);return b.length>0&&(y=Ue(y,b)),K(y,o.shape)},p=()=>{const m=Ts(o,0),y=$n(m,is(o),et(o));let b=X(e,X(i,y));const w=an(a.shape,c);return w.length>0&&(b=Ue(b,w)),K(b,a.shape)};return{a:u,b:p}}};const EP={kernelName:Qg,inputsToSave:["x","alpha"],gradFunc:(e,t)=>{const[n,s]=t,i=Ts(n,0);return{x:()=>$n(i,e,X(e,s)),alpha:()=>{let o=$n(i,et(e),X(e,n));const a=an(s.shape,e.shape);return a.length>0&&(o=Ue(o,a)),K(o,s.shape)}}}};const DP={kernelName:Ol,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,Pt(Lt(n)))}}};const kP={kernelName:iy,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=X(Mr(n,6),Ka(n));return{x:()=>X(e,ve(s,"float32"))}}};const FP={kernelName:ty,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,ve(Ka(n),"float32"))}}};const _P={kernelName:El,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>K(e,n.shape)}}};const WP={kernelName:sy,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,i=c=>{const{alignCorners:u}=n;return c.resizeBilinearBackprop(e,s,u)},o={images:s},a=()=>V.runKernelFunc(i,o,null,Zx,n);return{images:a}}};const $P={kernelName:ny,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,i=c=>{const{alignCorners:u}=n;return c.resizeNearestNeighborBackprop(e,s,u)},o={images:s},a=()=>V.runKernelFunc(i,o,null,Jx,n);return{images:a}}};const UP={kernelName:ry,gradFunc:(e,t,n)=>{const{dims:s}=n,i=ft(s,e.shape);return{x:()=>As(e,i)}}};const BP={kernelName:Dl,gradFunc:e=>({x:()=>et(e)})};const MP={kernelName:kl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Pt(_e(e,X(ii(n,1.5),2)))}}};const PP={kernelName:oy,inputsToSave:["condition"],gradFunc:(e,t)=>{const[n]=t;return{condition:()=>ve(et(n),"float32"),t:()=>X(e,ve(n,e.dtype)),e:()=>X(e,ve(ph(n),e.dtype))}}};const zP={kernelName:Fl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Ts(n,Ne(0)),i=Ne(xp),o=Ne(Tp),a=X(e,o),c=X(X(e,i),xs(ve(n,"float32")));return $n(s,a,c)}}}};const GP={kernelName:$l,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,X(n,Ce(Ne(1),n)))}}};const VP={kernelName:Wl,gradFunc:e=>({x:()=>et(e)})};const HP={kernelName:va,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(lh(ve(n,"float32")),e)}}};const YP={kernelName:_l,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(qd(ve(n,"float32")),e)}}};const qP={kernelName:pd,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{begin:i,size:o}=n,a=s.shape,[c,u]=$d(s,i,o),p=[];for(let m=0;mki(e,p)}}};const jP={kernelName:ly,outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{dim:i}=n,o=!0,a=X(e,s);return{logits:()=>Ce(a,X(Ue(a,[i],o),s))}}};const KP={kernelName:Ul,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,Ei(n))}}};const pv={kernelName:md,gradFunc:(e,t,n)=>{const{blockShape:s,paddings:i}=n;return{x:()=>ah(e,s,i)}}};const mv={kernelName:cy,gradFunc:(e,t,n)=>{const{axis:s}=n;return{x:()=>Mt(e,s)}}};const XP={kernelName:Bl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,X(Sn(ve(n,"float32")),2))}}};const JP={kernelName:fd,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,X(ve(n,"float32"),2))}}};const ZP={kernelName:Na,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=Ne(2),o=()=>X(e,X(i,Ce(n,s))),a=()=>X(e,X(i,Ce(s,n)));return{a:o,b:a}}};const QP={kernelName:Gl,gradFunc:e=>({x:()=>et(e)})};const ez={kernelName:Ml,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{let c=e;const u=an(n.shape,i);return u.length>0&&(c=Ue(c,u)),K(c,n.shape)},a=()=>{let c=e;const u=an(s.shape,i);return u.length>0&&(c=Ue(c,u)),K(Pt(c),s.shape)};return{a:o,b:a}}};const tz={kernelName:ay,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,i=s.shape.slice(),{axis:o}=n,a=ft(o,s.shape);a.forEach(p=>{i[p]=1});const c=K(e,i),u=X(c,si(s.shape,"float32"));return{x:()=>u}}};const nz={kernelName:Ca,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,Lt(lh(n)))}}};const sz={kernelName:Pl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Ce(Ne(1),Lt(n)),e)}}};const iz={kernelName:hy,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{reps:i}=n,o=()=>{let a=et(s);if(s.rank===1)for(let c=0;c{const s=n,{perm:i}=s,o=eh(i);return{x:()=>Pe(e,o)}}};const oz={kernelName:uy,gradFunc:(e,t,n)=>{const s=n,{axis:i}=s;return{value:()=>as(e,i)}}};const az={kernelName:dy,inputsToSave:["segmentIds"],gradFunc:(e,t)=>{const[n]=t,s=()=>cz(e,n);return{x:s}}};function cz(e,t){const n=Us(t,et(t)),s=za(e,n);let i=tr(t,Ne(0,"int32"));const o=s.rank-i.rank;for(let c=0;c({x:()=>et(e)})};const hz=[fM,gM,yM,bM,wM,LM,SM,IM,xM,TM,AM,vM,RM,DM,kM,FM,_M,WM,$M,UM,BM,PM,MM,VM,HM,YM,qM,jM,KM,XM,JM,ZM,QM,eP,nP,tP,sP,iP,rP,oP,aP,cP,lP,hP,uP,dP,fP,uv,uv,gP,wP,IP,xP,TP,AP,vP,NP,CP,RP,dv,dv,OP,EP,DP,kP,FP,_P,WP,$P,UP,BP,MP,PP,zP,GP,VP,HP,YP,qP,jP,KP,pv,pv,mv,mv,XP,ZP,JP,QP,ez,tz,nz,sz,iz,rz,oz,az,lz];for(const e of hz)sT(e);Q.prototype.abs=function(){return this.throwIfDisposed(),rn(this)};Q.prototype.acos=function(){return this.throwIfDisposed(),nb(this)};Q.prototype.acosh=function(){return this.throwIfDisposed(),sb(this)};Q.prototype.addStrict=function(e){return this.throwIfDisposed(),OA(this,e)};Q.prototype.add=function(e){return this.throwIfDisposed(),be(this,e)};Q.prototype.all=function(e,t){return this.throwIfDisposed(),Pd(this,e,t)};Q.prototype.any=function(e,t){return this.throwIfDisposed(),th(this,e,t)};Q.prototype.argMax=function(e){return this.throwIfDisposed(),nh(this,e)};Q.prototype.argMin=function(e){return this.throwIfDisposed(),rb(this,e)};Q.prototype.asScalar=function(){return this.throwIfDisposed(),k(this.size===1,()=>"The array must have only 1 element."),K(this,[])};Q.prototype.asType=function(e){return this.throwIfDisposed(),ve(this,e)};Q.prototype.as1D=function(){return this.throwIfDisposed(),K(this,[this.size])};Q.prototype.as2D=function(e,t){return this.throwIfDisposed(),K(this,[e,t])};Q.prototype.as3D=function(e,t,n){return this.throwIfDisposed(),K(this,[e,t,n])};Q.prototype.as4D=function(e,t,n,s){return this.throwIfDisposed(),K(this,[e,t,n,s])};Q.prototype.as5D=function(e,t,n,s,i){return this.throwIfDisposed(),K(this,[e,t,n,s,i])};Q.prototype.asin=function(){return this.throwIfDisposed(),ob(this)};Q.prototype.asinh=function(){return this.throwIfDisposed(),ab(this)};Q.prototype.atan=function(){return this.throwIfDisposed(),cb(this)};Q.prototype.atan2=function(e){return this.throwIfDisposed(),lb(this,e)};Q.prototype.atanh=function(){return this.throwIfDisposed(),hb(this)};Q.prototype.avgPool=function(e,t,n,s){return this.throwIfDisposed(),oh(this,e,t,n,s)};Q.prototype.batchToSpaceND=function(e,t){return this.throwIfDisposed(),ah(this,e,t)};Q.prototype.batchNorm=function(e,t,n,s,i){return this.throwIfDisposed(),Ro(this,e,t,n,s,i)};Q.prototype.broadcastTo=function(e){return this.throwIfDisposed(),ch(this,e)};Q.prototype.cast=function(e){return this.throwIfDisposed(),ve(this,e)};Q.prototype.ceil=function(){return this.throwIfDisposed(),fb(this)};Q.prototype.clipByValue=function(e,t){return this.throwIfDisposed(),jn(this,e,t)};Q.prototype.concat=function(e,t){return this.throwIfDisposed(),e instanceof Q&&(e=[e]),Mt([this,...e],t)};Q.prototype.conv1d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Hd(this,e,t,n,s,i,o)};Q.prototype.conv2dTranspose=function(e,t,n,s,i){return this.throwIfDisposed(),Yd(this,e,t,n,s,i)};Q.prototype.conv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),er(this,e,t,n,s,i,o)};Q.prototype.cos=function(){return this.throwIfDisposed(),lh(this)};Q.prototype.cosh=function(){return this.throwIfDisposed(),qd(this)};Q.prototype.cumsum=function(e,t,n){return this.throwIfDisposed(),jd(this,e,t,n)};Q.prototype.depthToSpace=function(e,t){return this.throwIfDisposed(),bb(this,e,t)};Q.prototype.depthwiseConv2D=function(e,t,n,s,i,o){return sn("depthwiseConv2D is deprecated, use depthwiseConv2d instead"),this.throwIfDisposed(),Oo(this,e,t,n,s,i,o)};Q.prototype.depthwiseConv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Oo(this,e,t,n,s,i,o)};Q.prototype.dilation2d=function(e,t,n,s,i){return this.throwIfDisposed(),wb(this,e,t,n,s,i)};Q.prototype.divNoNan=function(e){return this.throwIfDisposed(),Lb(this,e)};Q.prototype.divStrict=function(e){return this.throwIfDisposed(),EA(this,e)};Q.prototype.div=function(e){return this.throwIfDisposed(),_e(this,e)};Q.prototype.dot=function(e){return this.throwIfDisposed(),hA(this,e)};Q.prototype.elu=function(){return this.throwIfDisposed(),Do(this)};Q.prototype.equalStrict=function(e){return this.throwIfDisposed(),TA(this,e)};Q.prototype.equal=function(e){return this.throwIfDisposed(),ni(this,e)};Q.prototype.erf=function(){return this.throwIfDisposed(),Sb(this)};Q.prototype.exp=function(){return this.throwIfDisposed(),xs(this)};Q.prototype.expandDims=function(e){return this.throwIfDisposed(),Kn(this,e)};Q.prototype.expm1=function(){return this.throwIfDisposed(),Ib(this)};Q.prototype.fft=function(){return this.throwIfDisposed(),wh(this)};Q.prototype.flatten=function(){return this.throwIfDisposed(),K(this,[this.size])};Q.prototype.floor=function(){return this.throwIfDisposed(),Pa(this)};Q.prototype.floorDiv=function(e){return this.throwIfDisposed(),Md(this,e)};Q.prototype.gather=function(e,t){return this.throwIfDisposed(),za(this,e,t)};Q.prototype.greaterEqualStrict=function(e){return this.throwIfDisposed(),AA(this,e)};Q.prototype.greaterEqual=function(e){return this.throwIfDisposed(),tr(this,e)};Q.prototype.greaterStrict=function(e){return this.throwIfDisposed(),vA(this,e)};Q.prototype.greater=function(e){return this.throwIfDisposed(),Ts(this,e)};Q.prototype.ifft=function(){return this.throwIfDisposed(),ja(this)};Q.prototype.irfft=function(){return this.throwIfDisposed(),hp(this)};Q.prototype.isFinite=function(){return this.throwIfDisposed(),dA(this)};Q.prototype.isInf=function(){return this.throwIfDisposed(),pA(this)};Q.prototype.isNaN=function(){return this.throwIfDisposed(),mA(this)};Q.prototype.leakyRelu=function(e){return this.throwIfDisposed(),Xd(this,e)};Q.prototype.lessEqualStrict=function(e){return this.throwIfDisposed(),NA(this,e)};Q.prototype.lessEqual=function(e){return this.throwIfDisposed(),Mr(this,e)};Q.prototype.lessStrict=function(e){return this.throwIfDisposed(),CA(this,e)};Q.prototype.less=function(e){return this.throwIfDisposed(),dh(this,e)};Q.prototype.localResponseNormalization=function(e,t,n,s){return this.throwIfDisposed(),Tb(this,e,t,n,s)};Q.prototype.logSigmoid=function(){return this.throwIfDisposed(),gA(this)};Q.prototype.logSoftmax=function(e){return this.throwIfDisposed(),Qd(this,e)};Q.prototype.logSumExp=function(e,t){return this.throwIfDisposed(),vb(this,e,t)};Q.prototype.log=function(){return this.throwIfDisposed(),is(this)};Q.prototype.log1p=function(){return this.throwIfDisposed(),Jd(this)};Q.prototype.logicalAnd=function(e){return this.throwIfDisposed(),Bs(this,e)};Q.prototype.logicalNot=function(){return this.throwIfDisposed(),ph(this)};Q.prototype.logicalOr=function(e){return this.throwIfDisposed(),ep(this,e)};Q.prototype.logicalXor=function(e){return this.throwIfDisposed(),yA(this,e)};Q.prototype.matMul=function(e,t,n){return this.throwIfDisposed(),at(this,e,t,n)};Q.prototype.maxPool=function(e,t,n,s){return this.throwIfDisposed(),mh(this,e,t,n,s)};Q.prototype.max=function(e,t){return this.throwIfDisposed(),Xn(this,e,t)};Q.prototype.maximumStrict=function(e){return this.throwIfDisposed(),DA(this,e)};Q.prototype.maximum=function(e){return this.throwIfDisposed(),Us(this,e)};Q.prototype.mean=function(e,t){return this.throwIfDisposed(),zt(this,e,t)};Q.prototype.min=function(e,t){return this.throwIfDisposed(),Ha(this,e,t)};Q.prototype.minimumStrict=function(e){return this.throwIfDisposed(),kA(this,e)};Q.prototype.minimum=function(e){return this.throwIfDisposed(),ko(this,e)};Q.prototype.modStrict=function(e){return this.throwIfDisposed(),FA(this,e)};Q.prototype.mod=function(e){return this.throwIfDisposed(),tp(this,e)};Q.prototype.mulStrict=function(e){return this.throwIfDisposed(),_A(this,e)};Q.prototype.mul=function(e){return this.throwIfDisposed(),X(this,e)};Q.prototype.neg=function(){return this.throwIfDisposed(),Pt(this)};Q.prototype.norm=function(e,t,n){return this.throwIfDisposed(),pp(this,e,t,n)};Q.prototype.notEqualStrict=function(e){return this.throwIfDisposed(),RA(this,e)};Q.prototype.notEqual=function(e){return this.throwIfDisposed(),Pr(this,e)};Q.prototype.oneHot=function(e,t=1,n=0){return this.throwIfDisposed(),vo(this,e,t,n)};Q.prototype.onesLike=function(){return this.throwIfDisposed(),Dn(this)};Q.prototype.pad=function(e,t){return this.throwIfDisposed(),ki(this,e,t)};Q.prototype.pool=function(e,t,n,s,i){return this.throwIfDisposed(),LA(this,e,t,n,s,i)};Q.prototype.powStrict=function(e){return this.throwIfDisposed(),WA(this,e)};Q.prototype.pow=function(e){return this.throwIfDisposed(),ii(this,e)};Q.prototype.prelu=function(e){return this.throwIfDisposed(),gh(this,e)};Q.prototype.prod=function(e,t){return this.throwIfDisposed(),sp(this,e,t)};Q.prototype.reciprocal=function(){return this.throwIfDisposed(),Eb(this)};Q.prototype.relu=function(){return this.throwIfDisposed(),Fi(this)};Q.prototype.relu6=function(){return this.throwIfDisposed(),Db(this)};Q.prototype.reshapeAs=function(e){return this.throwIfDisposed(),K(this,e.shape)};Q.prototype.reshape=function(e){return this.throwIfDisposed(),K(this,e)};Q.prototype.resizeBilinear=function(e,t){return this.throwIfDisposed(),XA(this,e,t)};Q.prototype.resizeNearestNeighbor=function(e,t){return this.throwIfDisposed(),JA(this,e,t)};Q.prototype.reverse=function(e){return this.throwIfDisposed(),As(this,e)};Q.prototype.rfft=function(){return this.throwIfDisposed(),Lh(this)};Q.prototype.round=function(){return this.throwIfDisposed(),kb(this)};Q.prototype.rsqrt=function(){return this.throwIfDisposed(),ip(this)};Q.prototype.selu=function(){return this.throwIfDisposed(),rp(this)};Q.prototype.separableConv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Fb(this,e,t,n,s,i,o)};Q.prototype.sigmoid=function(){return this.throwIfDisposed(),Ei(this)};Q.prototype.sign=function(){return this.throwIfDisposed(),_b(this)};Q.prototype.sin=function(){return this.throwIfDisposed(),op(this)};Q.prototype.sinh=function(){return this.throwIfDisposed(),ap(this)};Q.prototype.slice=function(e,t){return this.throwIfDisposed(),st(this,e,t)};Q.prototype.softmax=function(e){return this.throwIfDisposed(),Uo(this,e)};Q.prototype.softplus=function(){return this.throwIfDisposed(),Va(this)};Q.prototype.spaceToBatchND=function(e,t){return this.throwIfDisposed(),fh(this,e,t)};Q.prototype.split=function(e,t){return this.throwIfDisposed(),os(this,e,t)};Q.prototype.sqrt=function(){return this.throwIfDisposed(),Sn(this)};Q.prototype.square=function(){return this.throwIfDisposed(),Lt(this)};Q.prototype.squaredDifference=function(e){return this.throwIfDisposed(),Sh(this,e)};Q.prototype.squaredDifferenceStrict=function(e){return this.throwIfDisposed(),$A(this,e)};Q.prototype.squeeze=function(e){return this.throwIfDisposed(),zr(this,e)};Q.prototype.stack=function(e,t){this.throwIfDisposed();const n=e instanceof Q?[this,e]:[this,...e];return as(n,t)};Q.prototype.step=function(e){return this.throwIfDisposed(),Ka(this,e)};Q.prototype.stridedSlice=function(e,t,n,s,i,o,a,c){return this.throwIfDisposed(),$b(this,e,t,n,s,i,o,a,c)};Q.prototype.subStrict=function(e){return this.throwIfDisposed(),UA(this,e)};Q.prototype.sub=function(e){return this.throwIfDisposed(),Ce(this,e)};Q.prototype.sum=function(e,t){return this.throwIfDisposed(),Ue(this,e,t)};Q.prototype.tan=function(){return this.throwIfDisposed(),Ub(this)};Q.prototype.tanh=function(){return this.throwIfDisposed(),Ma(this)};Q.prototype.tile=function(e){return this.throwIfDisposed(),Br(this,e)};Q.prototype.toBool=function(){return this.throwIfDisposed(),ve(this,"bool")};Q.prototype.toFloat=function(){return this.throwIfDisposed(),ve(this,"float32")};Q.prototype.toInt=function(){return this.throwIfDisposed(),ve(this,"int32")};Q.prototype.topk=function(e,t){return this.throwIfDisposed(),Bb(this,e,t)};Q.prototype.transpose=function(e){return this.throwIfDisposed(),Pe(this,e)};Q.prototype.unique=function(e){return this.throwIfDisposed(),up(this,e)};Q.prototype.unsortedSegmentSum=function(e,t){return this.throwIfDisposed(),Mb(this,e,t)};Q.prototype.unstack=function(e){return this.throwIfDisposed(),_i(this,e)};Q.prototype.where=function(e,t){return this.throwIfDisposed(),$n(e,this,t)};Q.prototype.zerosLike=function(){return this.throwIfDisposed(),et(this)};let Ap;function cn(){return Ap==null&&(Ap=QT().epsilon()),Ap}function $Q(e){Ap=e}function ri(){return"channelsLast"}class rr extends Error{constructor(e){super(e);Object.setPrototypeOf(this,rr.prototype)}}class oi extends Error{constructor(e){super(e);Object.setPrototypeOf(this,oi.prototype)}}class j extends Error{constructor(e){super(e);Object.setPrototypeOf(this,j.prototype)}}class Ge extends Error{constructor(e){super(e);Object.setPrototypeOf(this,Ge.prototype)}}class fv extends Error{constructor(e){super(e);Object.setPrototypeOf(this,fv.prototype)}}class uz extends Error{constructor(e){super(e);Object.setPrototypeOf(this,uz.prototype)}}function Po(e,t){if(Array.isArray(e)){let n=[];for(let s=0;sn.toUpperCase())}let Ms={};function cw(e){if(e==null)return null;const t={};return t.className=e.getClassName(),t.config=e.getConfig(),t}function lw(e){if(e==null||typeof e!="object")return;if(Array.isArray(e))e.forEach(t=>lw(t));else{const t=Object.keys(e);for(const n of t){const s=e[n];s!=null&&typeof s=="object"&&(!Array.isArray(s)&&s.type==="ndarray"&&typeof s.value=="number"?e[n]=s.value:lw(s))}}}function Dh(e,t={},n={},s="object",i=!1){if(typeof e=="string"){const o=e;let a;if(o in n)a=n[o];else if(o in Ms)a=Ms[o];else if(a=t[o],a==null)throw new j(`Unknown ${s}: ${e}. This may be due to one of the following reasons: 1. The ${s} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code. 2. The custom ${s} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);return a}else{const o=e;if(o.className==null||o.config==null)throw new j(`${s}: Improper config format: ${JSON.stringify(o)}. 'className' and 'config' must set.`);const a=o.className;let c,u;if(a in n?[c,u]=n[a]:a in Ms?[c,u]=Ms.className:a in t&&([c,u]=t[a]),c==null)throw new j(`Unknown ${s}: ${a}. This may be due to one of the following reasons: 1. The ${s} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code. -2. The custom ${s} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);if(u!=null){const p={};for(const w of Object.keys(Ms))p[w]=Ms[w];for(const w of Object.keys(n))p[w]=n[w];const m=o.config;m.customObjects=p;const y=Object.assign({},Ms);for(const w of Object.keys(n))Ms[w]=n[w];lw(o.config);const b=u(c,o.config,n,i);return Ms=Object.assign({},y),b}else{const p=Object.assign({},Ms);for(const y of Object.keys(n))Ms[y]=n[y];const m=new c(o.config);return Ms=Object.assign({},p),m}}}function dz(e,t){return et?1:0}function vp(e,t){return-1*dz(e,t)}function BQ(e){switch(e){case"float32":return"float32";default:throw new j(`Invalid dtype: ${e}`)}}function MQ(e,t){if(e==null||t==null)return e===t;if(e.length!==t.length)return!1;for(let n=0;n=0),vs(s>=n),Array.isArray(e)&&e.length>=n&&e.length<=s&&e.every(i=>typeof i===t)}function mn(e,t){Array.isArray(e)?(k(e.length>0,()=>`${t} is unexpectedly an empty array.`),e.forEach((n,s)=>mn(n,`element ${s+1} of ${t}`))):k(Number.isInteger(e)&&e>0,()=>`Expected ${t} to be a positive integer, but got ${yv(e)}.`)}function yv(e){return e===null?"null":Array.isArray(e)?"["+e.map(t=>yv(t)).join(",")+"]":typeof e=="string"?`"${e}"`:`${e}`}function mz(e,t){let n=qn(),s;const i=(...o)=>{const a=qn();return a-n0,"arrayOfValues is empty");for(const t of e)vs(Array.isArray(t),"one of the values is not an array"),vs(t.length>0,"one of the values is empty");return e.reduce((t,n)=>t.length===0?n.map(s=>[s]):n.map(s=>t.map(i=>[...i,s])).reduce((s,i)=>s.concat(i),[]),[])}function uw(e,t){return ee(()=>Sn(Ue(X(e,e),t,!0)))}class kh extends Ao{getConfig(){return{}}}class dw extends kh{constructor(e){super();this.defaultMaxValue=2,this.defaultAxis=0,this.maxValue=e.maxValue!=null?e.maxValue:this.defaultMaxValue,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return ee(()=>{const t=uw(e,this.axis),n=jn(t,0,this.maxValue);return X(e,_e(n,be(an(),t)))})}getConfig(){return{maxValue:this.maxValue,axis:this.axis}}}dw.className="MaxNorm",ge(dw);class pw extends kh{constructor(e){super();this.defaultAxis=0,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return ee(()=>_e(e,be(an(),uw(e,this.axis))))}getConfig(){return{axis:this.axis}}}pw.className="UnitNorm",ge(pw);class mw extends kh{apply(e){return Fi(e)}}mw.className="NonNeg",ge(mw);class fw extends kh{constructor(e){super();this.defaultMinValue=0,this.defaultMaxValue=1,this.defaultRate=1,this.defaultAxis=0,this.minValue=e.minValue!=null?e.minValue:this.defaultMinValue,this.maxValue=e.maxValue!=null?e.maxValue:this.defaultMaxValue,this.rate=e.rate!=null?e.rate:this.defaultRate,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return ee(()=>{const t=uw(e,this.axis),n=be(X(this.rate,jn(t,this.minValue,this.maxValue)),X(1-this.rate,t));return X(e,_e(n,be(an(),t)))})}getConfig(){return{minValue:this.minValue,maxValue:this.maxValue,rate:this.rate,axis:this.axis}}}fw.className="MinMaxNorm",ge(fw);const wv={maxNorm:"MaxNorm",minMaxNorm:"MinMaxNorm",nonNeg:"NonNeg",unitNorm:"UnitNorm"};function cn(e){return cw(e)}function Lv(e,t={}){return Dh(e,Ws.getMap().classNameMap,t,"constraint")}function ln(e){if(e==null)return null;if(typeof e=="string"){const t=e in wv?wv[e]:e,n={className:t,config:{}};return Lv(n)}else return e instanceof kh?e:Lv(e)}function fz(e){return new dw(e)}function gz(e){return new pw(e)}function yz(){return new mw}function bz(e){return new fw(e)}var wz=Object.freeze({__proto__:null,maxNorm:fz,unitNorm:gz,nonNeg:yz,minMaxNorm:bz});const Lz=["channelsFirst","channelsLast"],Sz=["valid","same","causal"],Iz=["max","avg"],xz=["sum","mul","concat","ave"],zQ=["temporal"];const Qa=new Map;function Gt(e){Za(Lz,"DataFormat",e)}function Ns(e){Za(Sz,"PaddingMode",e)}function Sv(e){Za(Iz,"PoolMode",e)}const Fh=[],Iv="/";function Po(e,t){Fh.push(e);try{const n=t();return Fh.pop(),n}catch(n){throw Fh.pop(),n}}function Tz(){return Fh.length===0?"":Fh.join(Iv)+Iv}function xv(e){if(!Av(e))throw new Error("Not a valid tensor name: '"+e+"'");return Tz()+e}function Tv(e){if(!Av(e))throw new Error("Not a valid tensor name: '"+e+"'");Qa.has(e)||Qa.set(e,0);const t=Qa.get(e);if(Qa.set(e,Qa.get(e)+1),t>0){const n=`${e}_${t}`;return Qa.set(n,1),n}else return e}const Az=new RegExp(/^[A-Za-z0-9][-A-Za-z0-9\._\/]*$/);function Av(e){return!!e.match(Az)}function vz(e){return e===parseInt(e.toString(),10)}function Yr(e,t,n){t==null&&(t=0),n==null&&(n=e.length);let s=1;for(let i=t;ii-o),n=Math.floor((t.length-1)/2),s=Math.ceil((t.length-1)/2);return n===s?t[n]:(t[n]+t[s])/2}function ai(e,t){if(t0?t.reduce((n,s)=>n*s):1}function _h(e,t){return e.asType(t)}function Wh(e,t=-1){const n=e.shape.slice();return t<0&&(t=n.length+t+1),n.splice(t,0,1),e.reshape(n)}function Rz(e,t){return ee(()=>{if(e.shape.length!==2)throw new j(`repeat() expects a rank-2 tensor, but received a rank-${e.shape.length} tensor.`);const n=Wh(e,1);return bw(n,[1,t,1])})}function Oz(e){const t=[Yr(e.shape)];return e.reshape(t)}function Ez(e){if(e.rank<=1)throw new j(`batchFlatten requires a minimum rank of 2. Got rank: ${e.rank}.`);const t=[e.shape[0],Yr(e.shape,1)];return e.reshape(t)}function zo(e,t,n){return ee(()=>{switch(e.rank){case 1:return cp(e,t,n);case 2:return Wb(e,[t,0],[n,e.shape[1]]);case 3:return lp(e,[t,0,0],[n,e.shape[1],e.shape[2]]);case 4:return bh(e,[t,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3]]);case 5:return st(e,[t,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4]]);case 6:return st(e,[t,0,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4],e.shape[5]]);default:throw new j(`sliceAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}})}function gw(e,t,n){return ee(()=>{switch(e.rank){case 1:return cp(e,t,n);case 2:return Wb(e,[0,t],[e.shape[0],n]);case 3:return lp(e,[0,0,t],[e.shape[0],e.shape[1],n]);case 4:return bh(e,[0,0,0,t],[e.shape[0],e.shape[1],e.shape[2],n]);default:throw new j(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}})}function Cp(e,t,n,s){return ee(()=>{switch(e.rank){case 1:return cp(e,t,n);case 2:switch(s){case 1:return zo(e,t,n);case 2:return gw(e,t,n);default:throw new j(`The axis is not within the rank of the tensor ${s}`)}case 3:switch(s){case 1:return zo(e,t,n);case 2:return lp(e,[0,t,0],[e.shape[0],n,e.shape[2]]);case 3:return gw(e,t,n);default:throw new j(`The axis is not within the rank of the tensor ${s}`)}case 4:switch(s){case 1:return zo(e,t,n);case 2:return bh(e,[0,t,0,0],[e.shape[0],n,e.shape[2],e.shape[3]]);case 3:return bh(e,[0,0,t,0],[e.shape[0],e.shape[1],n,e.shape[3]]);case 4:return gw(e,t,n);default:throw new j(`The axis is not within the rank of the tensor ${s}`)}default:throw new j(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}})}function yw(e,t=-1){let n;return t<0&&(n=e[0].rank,n!==0?t=n:t=0),t===e[0].rank&&(t=-1),Mt(e,t)}function Nv(e,t){switch(e.rank){case 1:return rA([e,t]);case 2:return oA([e,t],0);case 3:return aA([e,t],0);case 4:return cA([e,t],0);default:throw new j(`concatAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}}function bw(e,t){if(Array.isArray(t)||(t=[t]),e.rank!==t.length)throw new j(`The length of input n (${t.length}) does not match the number of dimensions in input x (${e.rank})`);return Br(e,t)}function Rp(e,t=0,n=1,s,i){return Ob(e,t,n,s,i)}function Wi(e,t,n,s){if(e.rank<2||t.rank<2)throw new Ge(`dot requires both inputs to be rank >= 2 but got x shape = ${e.shape} and y shape = ${t.shape}`);if(t.rank>=3){const i=e.shape.slice(-1)[0],o=t.shape.slice(-2)[0];if(i!==o)throw new Ge(`If rank y >= 3, then the second last dim of y must equal the last dim of x but got x shape = ${e.shape} and y shape = ${t.shape}`)}if(e.rank===2&&t.rank===2){const i=!1,o=!1;return bp({a:e,b:t,transposeA:i,transposeB:o,bias:s?ww(e.rank,s,ri()):null,activation:n})}else{const i=e.shape.slice(),o=i.pop();e=e.reshape([-1,o]);const a=t.shape.slice(),c=a.pop(),u=a.pop(),p=[...a,c],m=Array.from({length:t.rank},(I,T)=>T===0?t.rank-2:T<=t.rank-2?T-1:T);t=t.transpose(m).reshape([u,-1]);const y=[...i,...p],b=!1,w=!1;return bp({a:e,b:t,transposeA:b,transposeB:w,bias:s?ww(e.rank,s,ri()):null,activation:n}).reshape(y)}}function KQ(e){return ee(()=>{const t=et(e),n=Dn(e);return $n(ni(e,t),t,$n(Ts(e,et(e)),n,X(-1,n)))})}function XQ(e,t){return ee(()=>{if(e.rank!==1)throw new Error("Only 1D one-hot tensors are supported in the deeplearn backend, at present.");return e=e.toInt(),To(e,t).toFloat()})}function Cv(e,t,n){return ee(()=>(Array.isArray(t)?t=rs(t,"int32"):t=t.toInt(),Ma(e,t,n)))}function $h(e){return X(e,e)}function JQ(e,t){return ee(()=>{if(typeof t=="number"&&(t=Ne(Math.round(t),"int32")),t.dtype!=="int32")throw new Ge(`Non-int32 dtype (${t.dtype}) is not supported by pow() yet`);return ii(e,t)})}function ww(e,t,n){const s=t.shape;if(t.rank!==1&&t.rank!==e)throw new j(`Unexpected bias dimensions: ${t.rank}; expected it to be 1 or ${e}`);if(e===5){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1,1,1]):t.reshape([1,s[3],s[0],s[1],s[2]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,1,1,s[0]]):t.reshape([1].concat(s))}else if(e===4){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1,1]):t.reshape([1,s[2],s[0],s[1]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,1,s[0]]):t.reshape([1].concat(s))}else if(e===3){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1]):t.reshape([1,s[1],s[0]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,s[0]]):t.reshape([1].concat(s))}else if(e<3)return t;throw new j(`Unsupported input rank by biasAdd: ${t.rank}`)}function $i(e,t,n){return ee(()=>(n==null&&(n=ri()),Gt(n),e.add(ww(e.rank,t,n))))}function Dz(e,t=1){if(t!==1)throw new Ge(`Support for alpha values other than 1 (${t}) is not implemented yet.`);return Oo(e)}function kz(e){return ee(()=>_e(e,sn(e).add(1)))}function Rv(e,t,n,s){return ee(()=>zA(e,t,n,s))}function Fz(e){return ee(()=>{const t=be(.5,X(.2,e));return jn(t,0,1)})}function Uh(e,t,n=!1){return n?e():t()}const _z=["fanIn","fanOut","fanAvg"],Wz=["normal","uniform","truncatedNormal"],ZQ=["Zeros","Ones","Constant","RandomNormal","RandomUniform","TruncatedNormal","VarianceScaling","Orthogonal","Identity"];function $z(e){Za(_z,"FanMode",e)}function Uz(e){Za(Wz,"Distribution",e)}class Ps extends Ao{fromConfigUsesCustomObjects(){return!1}getConfig(){return{}}}class Lw extends Ps{apply(e,t){return ct(e,t)}}Lw.className="Zeros",ge(Lw);class Op extends Ps{apply(e,t){return si(e,t)}}Op.className="Ones",ge(Op);class Sw extends Ps{constructor(e){super();if(typeof e!="object")throw new j(`Expected argument of type ConstantConfig but got ${e}`);if(e.value===void 0)throw new j(`config must have value set but got ${e}`);this.value=e.value}apply(e,t){return ee(()=>X(Ne(this.value),si(e,t)))}getConfig(){return{value:this.value}}}Sw.className="Constant",ge(Sw);class Iw extends Ps{constructor(e){super();this.DEFAULT_MINVAL=-.05,this.DEFAULT_MAXVAL=.05,this.minval=e.minval||this.DEFAULT_MINVAL,this.maxval=e.maxval||this.DEFAULT_MAXVAL,this.seed=e.seed}apply(e,t){return _o(e,this.minval,this.maxval,t)}getConfig(){return{minval:this.minval,maxval:this.maxval,seed:this.seed}}}Iw.className="RandomUniform",ge(Iw);class xw extends Ps{constructor(e){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Ge(`randomNormal does not support dType ${t}.`);return Rp(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}xw.className="RandomNormal",ge(xw);class Tw extends Ps{constructor(e){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Ge(`truncatedNormal does not support dType ${t}.`);return Ih(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}Tw.className="TruncatedNormal",ge(Tw);class Aw extends Ps{constructor(e){super();this.gain=e.gain!=null?e.gain:1}apply(e,t){return ee(()=>{if(e.length!==2||e[0]!==e[1])throw new j("Identity matrix initializer can only be used for 2D square matrices.");return X(this.gain,Kd(e[0]))})}getConfig(){return{gain:this.gain}}}Aw.className="Identity",ge(Aw);function Bz(e,t="channelsLast"){let n,s;if(Gt(t),e.length===2)n=e[0],s=e[1];else if([3,4,5].indexOf(e.length)!==-1){if(t==="channelsFirst"){const i=Yr(e,2);n=e[1]*i,s=e[0]*i}else if(t==="channelsLast"){const i=Yr(e,0,e.length-2);n=e[e.length-2]*i,s=e[e.length-1]*i}}else{const i=Yr(e);n=Math.sqrt(i),s=Math.sqrt(i)}return[n,s]}class Zn extends Ps{constructor(e){super();if(e.scale<0)throw new j(`scale must be a positive float. Got: ${e.scale}`);this.scale=e.scale==null?1:e.scale,this.mode=e.mode==null?"fanIn":e.mode,$z(this.mode),this.distribution=e.distribution==null?"normal":e.distribution,Uz(this.distribution),this.seed=e.seed}apply(e,t){const n=Bz(e),s=n[0],i=n[1];let o=this.scale;if(this.mode==="fanIn"?o/=Math.max(1,s):this.mode==="fanOut"?o/=Math.max(1,i):o/=Math.max(1,(s+i)/2),this.distribution==="normal"){const a=Math.sqrt(o);if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Ge(`${this.getClassName()} does not support dType ${t}.`);return Ih(e,0,a,t,this.seed)}else{const a=Math.sqrt(3*o);return _o(e,-a,a,t)}}getConfig(){return{scale:this.scale,mode:this.mode,distribution:this.distribution,seed:this.seed}}}Zn.className="VarianceScaling",ge(Zn);class Ep extends Zn{constructor(e){super({scale:1,mode:"fanAvg",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}Ep.className="GlorotUniform",ge(Ep);class Dp extends Zn{constructor(e){super({scale:1,mode:"fanAvg",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}Dp.className="GlorotNormal",ge(Dp);class kp extends Zn{constructor(e){super({scale:2,mode:"fanIn",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}kp.className="HeNormal",ge(kp);class Fp extends Zn{constructor(e){super({scale:2,mode:"fanIn",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}Fp.className="HeUniform",ge(Fp);class _p extends Zn{constructor(e){super({scale:1,mode:"fanIn",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}_p.className="LeCunNormal",ge(_p);class Wp extends Zn{constructor(e){super({scale:1,mode:"fanIn",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}Wp.className="LeCunNormal",ge(Wp);class vw extends Ps{constructor(e){super();if(this.DEFAULT_GAIN=1,this.gain=e.gain==null?this.DEFAULT_GAIN:e.gain,this.seed=e.seed,this.seed!=null)throw new Ge("Random seed is not implemented for Orthogonal Initializer yet.")}apply(e,t){return ee(()=>{if(e.length<2)throw new Ge("Shape must be at least 2D.");e[0]*e[1]>2e3&&console.warn(`Orthogonal initializer is being called on a matrix with more than 2000 (${e[0]*e[1]}) elements: Slowness may result.`);const n=e[0]>e[1]?[e[1],e[0]]:e,s=Rp(n,0,1,"float32");let i=QA.gramSchmidt(s);return e[0]>e[1]&&(i=i.transpose()),X(this.gain,i)})}getConfig(){return{gain:this.gain,seed:this.seed}}}vw.className="Orthogonal",ge(vw);const Ov={constant:"Constant",glorotNormal:"GlorotNormal",glorotUniform:"GlorotUniform",heNormal:"HeNormal",heUniform:"HeUniform",identity:"Identity",leCunNormal:"LeCunNormal",leCunUniform:"LeCunUniform",ones:"Ones",orthogonal:"Orthogonal",randomNormal:"RandomNormal",randomUniform:"RandomUniform",truncatedNormal:"TruncatedNormal",varianceScaling:"VarianceScaling",zeros:"Zeros"};function Ev(e,t={}){return Dh(e,Ws.getMap().classNameMap,t,"initializer")}function Vt(e){return cw(e)}function Ft(e){if(typeof e=="string"){const t=e in Ov?Ov[e]:e;if(t==="GlorotNormal")return new Dp;if(t==="GlorotUniform")return new Ep;if(t==="HeNormal")return new kp;if(t==="HeUniform")return new Fp;if(t==="LeCunNormal")return new _p;if(t==="LeCunUniform")return new Wp;{const n={};return n.className=t,n.config={},Ev(n)}}else return e instanceof Ps?e:Ev(e)}function Mz(){return new Lw}function Pz(){return new Op}function zz(e){return new Sw(e)}function Gz(e){return new Iw(e)}function Vz(e){return new xw(e)}function Hz(e){return new Tw(e)}function Yz(e){return new Aw(e)}function qz(e){return new Zn(e)}function jz(e){return new Ep(e)}function Kz(e){return new Dp(e)}function Xz(e){return new kp(e)}function Jz(e){return new Fp(e)}function Zz(e){return new _p(e)}function Qz(e){return new Wp(e)}function e3(e){return new vw(e)}var t3=Object.freeze({__proto__:null,zeros:Mz,ones:Pz,constant:zz,randomUniform:Gz,randomNormal:Vz,truncatedNormal:Hz,identity:Yz,varianceScaling:qz,glorotUniform:jz,glorotNormal:Kz,heNormal:Xz,heUniform:Jz,leCunNormal:Zz,leCunUniform:Qz,orthogonal:e3});let n3=0;function Dv(){return n3++}const $p={};function Up(e=""){return e in $p||($p[e]=0),$p[e]+=1,e+$p[e].toString()}function Nw(e){return Array.isArray(e)&&Array.isArray(e[0])}function Bp(e){return e.length===0?[]:Array.isArray(e[0])?e:[e]}function Xe(e){let t;if(Array.isArray(e)){if(e.length!==1)throw new j(`Expected Tensor length to be 1; got ${e.length}`);t=e[0]}else t=e;return t}function It(e){if(Array.isArray(e)&&Array.isArray(e[0])){if(e.length===1)return e=e,e[0];throw new j(`Expected exactly 1 Shape; got ${e.length}`)}else return e}function Mp(e){let t=0;for(const n of e)n.shape.length===0?t+=1:t+=n.shape.reduce((s,i)=>s*i);return t}const kv="Variable";class ci{constructor(e,t="float32",n=kv,s=!0,i=null){this.dtype=t==null?"float32":t,this.shape=e.shape,this.id=Dv(),n=n==null?kv:n,this.originalName=xv(n),this.name=Tv(this.originalName),this.trainable_=s,this.constraint=i,this.val=xA(e,this.trainable_,this.name,this.dtype)}read(){return this.assertNotDisposed(),this.val}write(e){return this.assertNotDisposed(),s3(this.val,e),this.val.id!==e.id&&(this.val.assign(e),this.constraint!=null&&this.val.assign(this.constraint.apply(this.val))),this}dispose(){this.assertNotDisposed(),this.val.dispose()}assertNotDisposed(){if(this.val.isDisposed)throw new Error(`LayersVariable ${this.name} is already disposed.`)}get trainable(){return this.trainable_}set trainable(e){this.trainable_=e,this.val.trainable=e}}function s3(e,t){if(e.shape.toString()!==t.shape.toString())throw new Error("Shape mismatch: "+JSON.stringify(e.shape)+" vs. "+JSON.stringify(t.shape))}function QQ(e,t,n,s){return new ci(e,t,n,!0,s)}function eee(e,t,n){return new ci(ct(e),t,n)}function tee(e,t,n){return new ci(et(e),t,n)}function nee(e,t,n){const s=si(e);return new ci(s,t,n)}function see(e,t,n){const s=Dn(e);return new ci(s,t,n)}function iee(e,t,n){return new ci(Kd(e),t,n)}function ree(e,t,n,s,i,o="randomUniform"){return new ci(_o(e,t,n,s),s,o)}function oee(e,t=0,n=1,s,i,o="truncatedNormal"){if(s=s||"float32",s!=="float32"&&s!=="int32")throw new Ge(`randomNormal does not support dType ${s}.`);return new ci(Ih(e,t,n,s,i),s,o)}function aee(e,t=0,n=1,s,i,o="randomNormal"){if(s=s||"float32",s!=="float32"&&s!=="int32")throw new Ge(`randomNormalVariable does not support dType ${s}.`);return new ci(Ob(e,t,n,s,i),s,o)}function cee(e,t){return e.write(t)}function lee(e,t){return e.write(be(e.read(),t))}function hee(e,t){return e.write(Ce(e.read(),t))}function Cw(e){return e.map(t=>t.read())}function Rw(e){e.forEach(t=>{const n=t[0];n.write(t[1])})}function uee(e,t){const n=t.map(i=>i.read()),s=Ab(e,n);return t.map(i=>s.grads[i.name])}class fn{constructor(e){this.dtype=e.dtype,this.shape=e.shape,e.shape!=null?this.ndim=e.shape.length:this.ndim=e.ndim,this.maxNDim=e.maxNDim,this.minNDim=e.minNDim,this.axes=e.axes||{}}}class li{constructor(e,t,n,s,i,o,a){this.dtype=e,this.shape=t,this.sourceLayer=n,this.inputs=s,this.callArgs=i,this.outputTensorIndex=a,this.id=Dv(),o!=null&&(this.originalName=xv(o),this.name=Tv(this.originalName)),this.rank=t.length}}let i3=0;class Pp{constructor(e,t){this.callArgs=t,this.id=i3++,this.outboundLayer=e.outboundLayer,this.inboundLayers=e.inboundLayers,this.nodeIndices=e.nodeIndices,this.tensorIndices=e.tensorIndices,this.inputTensors=e.inputTensors,this.outputTensors=e.outputTensors,this.inputMasks=e.inputMasks,this.outputMasks=e.outputMasks,this.inputShapes=e.inputShapes,this.outputShapes=e.outputShapes;for(const n of e.inboundLayers)n!=null&&n.outboundNodes.push(this);e.outboundLayer.inboundNodes.push(this)}getConfig(){const e=[];for(const t of this.inboundLayers)t!=null?e.push(t.name):e.push(null);return{outboundLayer:this.outboundLayer?this.outboundLayer.name:null,inboundLayers:e,nodeIndices:this.nodeIndices,tensorIndices:this.tensorIndices}}}let r3=0;class lt extends Ao{constructor(e={}){super();this._callHook=null,this._addedWeightNames=[],this._stateful=!1,this.id=r3++,this.activityRegularizer=null,this.inputSpec=null,this.supportsMasking=!1,this._trainableWeights=[],this._nonTrainableWeights=[],this._losses=[],this._updates=[],this._built=!1,this.inboundNodes=[],this.outboundNodes=[];let t=e.name;if(!t){const n=this.getClassName();t=or(n)+"_"+Up(n)}if(this.name=t,this.trainable_=e.trainable==null?!0:e.trainable,e.inputShape!=null||e.batchInputShape!=null){let n;if(e.batchInputShape!=null)n=e.batchInputShape;else if(e.inputShape!=null){let i=null;e.batchSize!=null&&(i=e.batchSize),n=[i].concat(e.inputShape)}this.batchInputShape=n;let s=e.dtype;s==null&&(s=e.inputDType),s==null&&(s="float32"),this.dtype=s}e.weights!=null?this.initialWeights=e.weights:this.initialWeights=null,this._refCount=null,this.fastWeightInitDuringBuild=!1}static nodeKey(e,t){return e.name+"_ib-"+t.toString()}getNodeAtIndex(e,t){if(this.inboundNodes.length===0)throw new oi(`The layer has never been called and thus has no defined ${t}.`);if(this.inboundNodes.length<=e)throw new j(`Asked to get ${t} at node ${e}, but the layer has only ${this.inboundNodes.length} inbound nodes.`);return this.inboundNodes[e]}getInputAt(e){return Jn(this.getNodeAtIndex(e,"input").inputTensors)}getOutputAt(e){return Jn(this.getNodeAtIndex(e,"output").outputTensors)}get input(){if(this.inboundNodes.length>1)throw new rr(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer input" is ill-defined. Use \`getInputAt(nodeIndex)\` instead.`);if(this.inboundNodes.length===0)throw new rr(`Layer ${this.name} is not connected, no input to return.`);return Jn(this.getNodeAtIndex(0,"input").inputTensors)}get output(){if(this.inboundNodes.length===0)throw new rr(`Layer ${this.name} has no inbound nodes.`);if(this.inboundNodes.length>1)throw new rr(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer output" is ill-defined. Use \`getOutputAt(nodeIndex)\` instead.`);return Jn(this.getNodeAtIndex(0,"output").outputTensors)}get losses(){return this._losses}calculateLosses(){return this.losses.map(e=>e())}get updates(){return this._updates}get built(){return this._built}set built(e){this._built=e}get trainable(){return this.trainable_}set trainable(e){this._trainableWeights.forEach(t=>t.trainable=e),this.trainable_=e}get trainableWeights(){return this.trainable_?this._trainableWeights.filter(e=>e.trainable):[]}set trainableWeights(e){this._trainableWeights=e}get nonTrainableWeights(){return this.trainable?this._trainableWeights.filter(e=>!e.trainable).concat(this._nonTrainableWeights):this._trainableWeights.concat(this._nonTrainableWeights)}set nonTrainableWeights(e){this._nonTrainableWeights=e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}get stateful(){return this._stateful}resetStates(){if(!this.stateful)throw new Error("Cannot call the resetStates() method of a non-stateful Layer object.")}assertInputCompatibility(e){if(e=Nt(e),this.inputSpec==null||this.inputSpec.length===0)return;const t=Nt(this.inputSpec);if(e.length!==t.length)throw new j(`Layer ${this.name} expects ${t.length} inputs, but it received ${e.length} input tensors. Input received: ${e}`);for(let n=0;ni.maxNDim)throw new j(`Input ${n} is incompatible with layer ${this.name}: expected max_ndim=${i.maxNDim}, found ndim=${o}`);if(i.minNDim!=null&&o=0?a[u]:a[a.length+u];if(p!=null&&[p,null].indexOf(m)===-1)throw new j(`Input ${n} is incompatible with layer ${this.name}: expected axis ${u} of input shape to have value ${p} but got shape ${a}.`)}}if(i.shape!=null)for(let a=0;a{if(!this.built){this.assertInputCompatibility(e);const o=[];for(const a of Nt(e))o.push(a.shape);this.build(Jn(o)),this.built=!0,this.initialWeights&&this.setWeights(this.initialWeights),this._refCount===null&&i&&(this._refCount=1)}if(this.assertInputCompatibility(e),i){let o=this.call(e,t);const a=Nt(o),c=[];for(let u of a)n.indexOf(u)!==-1&&(u=u.clone()),c.push(u);if(o=Jn(c),this.activityRegularizer!=null)throw new Ge("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return o}else{const o=o3(e),a=this.computeOutputShape(o);let c;const u=a3(e);if(this.warnOnIncompatibleInputShape(Array.isArray(e)?o[0]:o),a!=null&&a.length>0&&Array.isArray(a[0])?c=a.map((p,m)=>new li(u,p,this,Nt(e),t,this.name,m)):c=new li(u,a,this,Nt(e),t,this.name),this.addInboundNode(e,c,null,null,o,a,t),this._refCount++,this.activityRegularizer!=null)throw new Ge("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return c}})}warnOnIncompatibleInputShape(e){if(this.batchInputShape==null)return;if(e.length!==this.batchInputShape.length)console.warn(`The rank of the input tensor provided (shape: ${JSON.stringify(e)}) does not match that of the batchInputShape (${JSON.stringify(this.batchInputShape)}) of the layer ${this.name}`);else{let t=!1;this.batchInputShape.forEach((n,s)=>{n!=null&&e[s]!=null&&e[s]!==n&&(t=!0)}),t&&console.warn(`The shape of the input tensor (${JSON.stringify(e)}) does not match the expectation of layer ${this.name}: ${JSON.stringify(this.batchInputShape)}`)}}get outputShape(){if(this.inboundNodes==null||this.inboundNodes.length===0)throw new rr(`The layer ${this.name} has never been called and thus has no defined output shape.`);const e=[];for(const t of this.inboundNodes){const n=JSON.stringify(t.outputShapes);e.indexOf(n)===-1&&e.push(n)}if(e.length===1){const t=this.inboundNodes[0].outputShapes;return Array.isArray(t)&&Array.isArray(t[0])&&t.length===1?t[0]:t}else throw new rr(`The layer ${this.name} has multiple inbound nodes with different output shapes. Hence the notion of "output shape" is ill-defined for the layer.`)}countParams(){if(!this.built)throw new oi(`You tried to call countParams() on ${this.name}, but the layer is not built yet. Build it first by calling build(batchInputShape).`);return Mp(this.weights)}build(e){this.built=!0}getWeights(e=!1){return Cw(e?this.trainableWeights:this.weights)}setWeights(e){ee(()=>{const t=this.weights;if(t.length!==e.length)throw new j(`You called setWeights(weights) on layer "${this.name}" with a weight list of length ${e.length}, but the layer was expecting ${t.length} weights. Provided weights: ${e}...`);if(t.length===0)return;const n=[],s=Cw(t);for(let i=0;ii.apply(u.read())),o==null&&(o=!0),o?this._trainableWeights.push(u):this._nonTrainableWeights.push(u),u}setFastWeightInitDuringBuild(e){this.fastWeightInitDuringBuild=e}addLoss(e){if(e==null||Array.isArray(e)&&e.length===0)return;e=Nt(e),this._losses!==void 0&&this._losses!==null&&this.losses.push(...e)}computeOutputShape(e){return e}computeMask(e,t){if(!this.supportsMasking){if(t!=null)if(Array.isArray(t))t.forEach(n=>{if(n!=null)throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`)});else throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`);return null}return t}addInboundNode(e,t,n,s,i,o,a=null){const c=Nt(e);t=Nt(t),n=Nt(n),s=Nt(s),i=Bp(i),o=Bp(o);const u=[],p=[],m=[];for(const y of c)u.push(y.sourceLayer),p.push(y.nodeIndex),m.push(y.tensorIndex);new Pp({outboundLayer:this,inboundLayers:u,nodeIndices:p,tensorIndices:m,inputTensors:c,outputTensors:t,inputMasks:n,outputMasks:s,inputShapes:i,outputShapes:o},a);for(let y=0;ye.dispose()),this.weights.length}assertNotDisposed(){if(this._refCount===0)throw new Error(`Layer '${this.name}' is already disposed.`)}dispose(){if(!this.built)throw new Error(`Cannot dispose Layer ${this.name} because it has not been built yet.`);if(this._refCount===null)throw new Error(`Cannot dispose Layer ${this.name} because it has not been used yet.`);this.assertNotDisposed();let e=0;return--this._refCount===0&&(e=this.disposeWeights()),{refCountAfterDispose:this._refCount,numDisposedVariables:e}}}function o3(e){e=Nt(e);const t=[];for(const n of e)t.push(n.shape);return Jn(t)}function a3(e){return"float32"}function Fv(e,t,n){if((t==null||n!=null&&n>0)&&(t=e.sourceLayer,n=e.nodeIndex),t.inboundNodes.length===0)return[e];{const s=t.inboundNodes[n];if(s.inboundLayers.length===0)return s.inputTensors;{const i=[];for(let o=0;o0){const i=await Promise.all(t);for(let o=0;obe(this.totals[s],X(i,n)));this.totals[s]=a,o!=null&&o.dispose()}}}async onEpochEnd(e,t){if(t!=null)for(const n of this.params.metrics){if(this.totals[n]==null)continue;typeof this.totals[n]=="number"?t[n]=this.totals[n]/this.seen:ee(()=>{const s=X(_e(1,this.seen),this.totals[n]);t[n]=s,this.totals[n].dispose(),Rn(t[n])})}}}class Bv extends nc{async onTrainBegin(e){this.epoch=[],this.history={}}async onEpochEnd(e,t){t==null&&(t={}),this.epoch.push(e);for(const n in t)this.history[n]==null&&(this.history[n]=[]),this.history[n].push(t[n])}async syncData(){const e=[],t=[],n=[];for(const i in this.history){const o=this.history[i];for(let a=0;anew Mv(s,t))}class zs{constructor(){}static registerCallbackConstructor(e,t){k(e>=0&&Number.isInteger(e),()=>`Verbosity level is expected to be an integer >= 0, but got ${e}`),zs.checkForDuplicate(t),zs.constructors[e]==null&&(zs.constructors[e]=[]),zs.constructors[e].push(t)}static checkForDuplicate(e){for(const t in zs.constructors){const n=zs.constructors[+t];n.forEach(s=>{if(s===e)throw new j("Duplicate callback constructor.")})}}static clear(){zs.constructors={}}static createCallbacks(e){const t=[];for(const n in zs.constructors){const s=+n;e>=s&&t.push(...zs.constructors[s])}return t.map(n=>new n)}}zs.constructors={};function zv(e,t,n,s,i,o,a,c,u){const p=new Bv,m=[new l3,...zs.createCallbacks(t)];e!=null&&m.push(...e),m.push(p);const y=new Uv(m);return y.setParams({epochs:n,initialEpoch:s,samples:i,steps:o,batchSize:a,verbose:t,doValidation:c,metrics:u}),{callbackList:y,history:p}}function hi(e,t={},n=!1){return Dh(e,Ws.getMap().classNameMap,t,"layer",n)}function zp(e,t){return ee(()=>{e.dtype!=="float32"&&(e=e.asType("float32"));const n=Ue($h(e),t,!0),s=hh(n.shape,an()),i=Sn(Us(n,s));return _e(e,i)})}function ar(e,t){return ee(()=>zt($h(Ce(t,e)),-1))}function sc(e,t){return ee(()=>zt(sn(Ce(t,e)),-1))}function Kr(e,t){return ee(()=>{const n=Ce(e,t),s=jn(sn(e),an(),Number.MAX_VALUE),i=sn(_e(n,s));return X(100,zt(i,-1))})}function Ow(e,t){return ee(()=>{const n=jn(t,an(),Number.MAX_VALUE),s=is(be(1,n)),i=jn(e,an(),Number.MAX_VALUE),o=is(be(1,i));return zt($h(Ce(s,o)),-1)})}function h3(e,t){return ee(()=>{const n=Us(0,Ce(1,X(e,t)));return zt($h(n),-1)})}function u3(e,t){return ee(()=>{const n=Us(0,Ce(1,X(e,t)));return zt(n,-1)})}function d3(e,t){return ee(()=>{const n=Ue(X(e,t),-1),s=Xn(X(Ce(1,e),t),-1);return Us(0,be(1,Ce(s,n)))})}function p3(e,t){return ee(()=>{const n=Math.log(2),s=Ce(t,e),i=Ce(be(s,za(X(-2,s))),n);return zt(i,-1)})}function Bh(e,t,n=!1){return ee(()=>{if(n)t=Wo(t);else{const s=Ue(t,t.shape.length-1,!0);t=_e(t,s)}return t=jn(t,an(),1-an()),Pt(Ue(X(e.toFloat(),is(t)),t.shape.length-1))})}function Gp(e,t,n=!1){return ee(()=>{const s=Ba(Oz(e)).toInt();t=jn(t,an(),1-an());const i=t.shape,o=To(s,i[i.length-1]).reshape(i);return Bh(o,t,n)})}function m3(e,t){if(!ot(e.shape,t.shape))throw new j(`logits and labels must have the same shape, but got shapes ${JSON.stringify(e.shape)} and ${JSON.stringify(t.shape)}`);return ee(()=>{const n=t.relu(),s=t.abs().neg();return n.sub(t.mul(e)).add(s.exp().log1p())})}function Vp(e,t){return ee(()=>{let n;return n=jn(t,an(),1-an()),n=is(_e(n,Ce(1,n))),zt(m3(e,n),-1)})}function Ew(e,t){return ee(()=>{const n=jn(e,an(),1),s=jn(t,an(),1);return Ue(X(e,is(_e(n,s))),-1)})}function f3(e,t){return ee(()=>{const n=is(be(an(),t));return zt(Ce(t,X(e,n)),-1)})}function Hp(e,t){return ee(()=>{const n=zp(e,-1),s=zp(t,-1),i=X(n,s);return Pt(Ue(i,-1))})}const dee=ar,pee=ar,mee=sc,fee=sc,gee=Kr,yee=Kr,bee=Ow,wee=Ow,Lee=Ew,See=Ew,Iee=Hp,Yp={meanSquaredError:ar,meanAbsoluteError:sc,meanAbsolutePercentageError:Kr,meanSquaredLogarithmicError:Ow,squaredHinge:h3,hinge:u3,categoricalHinge:d3,logcosh:p3,categoricalCrossentropy:Bh,sparseCategoricalCrossentropy:Gp,binaryCrossentropy:Vp,kullbackLeiblerDivergence:Ew,poisson:f3,cosineProximity:Hp};function Dw(e){if(typeof e=="string"){if(e in Yp)return Yp[e];let t=`Unknown loss ${e}`;throw e.toLowerCase().includes("softmaxcrossentropy")&&(t=`Unknown loss ${e}. Use "categoricalCrossentropy" as the string name for tf.losses.softmaxCrossEntropy`),new j(t)}else return e}function kw(e,t){return ee(()=>{const n=X(.5,Dn(t)),s=_h(Ts(t,n),e.dtype);return zt(ni(e,s),-1)})}function Fw(e,t){return ee(()=>_h(ni(nh(e,-1),nh(t,-1)),"float32"))}function Gv(e,t){return ee(()=>Bs(e.equal(1),t.equal(1)).sum().cast("float32"))}function g3(e,t){return ee(()=>Bs(e.equal(1),t.equal(0)).sum().cast("float32"))}function y3(e,t){return ee(()=>Bs(e.equal(0),t.equal(1)).sum().cast("float32"))}function Vv(e,t){return ee(()=>{const n=Gv(e,t),s=y3(e,t),i=n.add(s);return $n(Ts(i,0),n.div(i),0).cast("float32")})}function b3(e,t){return ee(()=>{const n=Gv(e,t),s=g3(e,t),i=n.add(s);return $n(Ts(i,0),n.div(i),0).cast("float32")})}function Hv(e,t){return Vp(e,t)}function Yv(e,t){return e.rank===t.rank&&(e=e.squeeze([e.rank-1])),t=t.argMax(-1),t.dtype!==e.dtype&&(t=t.asType(e.dtype)),ni(e,t).asType("float32")}function xee(e,t){throw new Ge}function Tee(e,t){throw new Ge}const w3=ar,L3=ar,S3=sc,I3=sc,x3=Kr,T3=Kr,_w=Bh,A3=Hp,qv=Gp,qp={binaryAccuracy:kw,categoricalAccuracy:Fw,precision:Vv,categoricalCrossentropy:_w,sparseCategoricalCrossentropy:qv,mse:w3,MSE:L3,mae:S3,MAE:I3,mape:x3,MAPE:T3,cosine:A3};function v3(e){if(typeof e=="string"&&e in qp)return qp[e];if(typeof e!="string"&&e!=null)return e;throw new j(`Unknown metric ${e}`)}function jp(e){if(vs(e!==null,`Unknown LossOrMetricFn ${e}`),typeof e=="string")return e;{let t;for(const n of Object.keys(Yp))if(Yp[n]===e){t=n;break}if(t!==void 0)return t;for(const n of Object.keys(qp))if(qp[n]===e){t=n;break}return t!==void 0?t:e.name}}function N3(e){const t={Adagrad:()=>Uo.adagrad(.01),Adadelta:()=>Uo.adadelta(1,.95,an()),Adam:()=>Uo.adam(.001,.9,.999,an()),Adamax:()=>Uo.adamax(.002,.9,.999,an(),0),RMSProp:()=>Uo.rmsprop(.001,.9,0,an()),SGD:()=>Uo.sgd(.01)};if(t.adagrad=t.Adagrad,t.adadelta=t.Adadelta,t.adam=t.Adam,t.adamax=t.Adamax,t.rmsprop=t.RMSProp,t.sgd=t.SGD,e in t)return t[e]();throw new j(`Unknown Optimizer ${e}`)}const jv=1*1024*1024;function Kv(e,t,n=!1){if(e==null||typeof e!="object"||Object.getPrototypeOf(e)!==Object.prototype||!Ww(e))throw new Error("User-defined metadata is expected to be a JSON object, but is not.");if(n){const s=JSON.stringify(e);s.length>jv&&console.warn(`User-defined metadata of model "${t}" is too large in size (length=${s.length} when serialized). It is not recommended to store such large objects in user-defined metadata. Please make sure its serialized length is <= ${jv}.`)}}function Ww(e){if(e===null)return!0;if(typeof e=="object")if(Object.getPrototypeOf(e)===Object.prototype){const t=Object.keys(e);for(const n of t){if(typeof n!="string")return!1;if(!Ww(e[n]))return!1}return!0}else if(Array.isArray(e)){for(const t of e)if(!Ww(t))return!1;return!0}else return!1;else{const t=typeof e;return t==="string"||t==="number"||t==="boolean"}}function C3(e,t,n,s=console.log){const i=O3(e),o=["Layer (type)","Output shape","Param #"];i?(t=t||65,n=n||[.45,.85,1]):(t=t||98,n=n||[.33,.55,.67,1]),n[n.length-1]<=1&&(n=n.map(m=>Math.floor(t*m)));let a;if(!i){o.push("Receives inputs"),a=[];for(const m in e.nodesByDepth)a.push(...e.nodesByDepth[m])}s("_".repeat(t)),Kp(o,n,s),s("=".repeat(t));const c=e.layers;for(let m=0;m1||i.length===1&&i[0].inboundLayers.length>1){t=!1;break}s.push(...i)}if(t)for(const i of e.layers){let o=!1;for(const a of i.inboundNodes)if(s.indexOf(a)!==-1)if(o){t=!1;break}else o=!0;if(!t)break}return t}function Kp(e,t,n=console.log){let s="";for(let i=0;i0&&(s=s.slice(0,s.length-1)+" "),s+=e[i],s=s.slice(0,t[i]),s+=" ".repeat(t[i]-s.length);n(s)}function E3(e,t,n){let s;try{s=JSON.stringify(e.outputShape)}catch(c){s="multiple"}const i=e.name,o=e.getClassName(),a=[`${i} (${o})`,s,e.countParams().toString()];Kp(a,t,n)}function D3(e,t,n,s){let i;try{i=JSON.stringify(e.outputShape)}catch(m){i="multiple"}const o=[];for(const m of e.inboundNodes){if(n!=null&&n.length>0&&n.indexOf(m)===-1)continue;for(let y=0;yI.name),u=[],p=t.names();for(const I of c)p.indexOf(I)!==-1?u.push(t.getValue(I)):u.push(null);s!=null&&(s.maxNumTensors=-Infinity,s.minNumTensors=Infinity);const m=c.join(",")+"|"+t.names().join(",");let y,b;if(Uw[m]==null){const I=F3(a,t);y=I.sorted,b=I.recipientCounts,Uw[m]=y,Jv[m]=b}y=Uw[m],b={},i||Object.assign(b,Jv[m]);const w=new Go(t);for(let I=0;Is.maxNumTensors&&(s.maxNumTensors=q),q0,()=>"Expected at least one fetch, got none");let n=[],s={};if(e.length===1){const i=Zv(e[0],t);n=i.sorted,s=i.recipientMap}else{const i=new Set;for(const o of e){const{sorted:a,recipientMap:c}=Zv(o,t);for(const u of a)i.has(u.name)||(n.push(u),i.add(u.name));for(const u in c)s[u]==null&&(s[u]=new Set),c[u].forEach(p=>s[u].add(p))}}return{sorted:n,recipientCounts:_3(s)}}function _3(e){const t={};for(const n in e)t[n]=e[n].size;return t}function Zv(e,t){const n=new Set,s=[],i={};for(const c of t.names())n.add(c);const o=[],a=[];for(o.push(e);o.length>0;){const c=o[o.length-1];if(n.has(c.name)){o.pop();continue}const u=a[a.length-1]===o.length-1;if(c.inputs.length===0||u)o.pop(),s.push(c),n.add(c.name),u&&a.pop();else{a.push(o.length-1);for(const p of c.inputs){if(i[p.name]==null&&(i[p.name]=new Set),i[p.name].add(c.name),n.has(p.name))continue;o.push(p)}}}return{sorted:s,recipientMap:i}}function W3(e){let t;if(e.sourceLayer.inboundNodes.length===1)t=e.sourceLayer.output;else{let n=null;for(let s=0;sN.name)}`);Hr(this.outputs).length!==this.outputs.length&&console.warn(`The list of outputs passed to the model is redundant. All outputs should only appear once. Found: ${this.outputs.map(N=>N.name)}`),this.inputLayers=[],this.inputLayersNodeIndices=[],this.inputLayersTensorIndices=[],this.outputLayers=[],this.outputLayersNodeIndices=[],this.outputLayersTensorIndices=[],this.layers=[],this.internalContainerRefs=[];for(const N of this.outputs){const E=N.sourceLayer,D=N.nodeIndex,F=N.tensorIndex;this.outputLayers.push(E),this.outputLayersNodeIndices.push(D),this.outputLayersTensorIndices.push(F)}for(const N of this.inputs){const E=N.sourceLayer,D=N.nodeIndex,F=N.tensorIndex;vs(D===0,"input layer has >1 nodes"),vs(F===0,"input layer has >1 tensors"),this.inputLayers.push(E),this.inputLayersNodeIndices.push(D),this.inputLayersTensorIndices.push(F)}this.inputNames=[],this.outputNames=[],this.feedInputShapes=[],this.feedInputNames=[],this.feedOutputNames=[];for(let N=0;NN.shape),this.internalOutputShapes=this.outputs.map(N=>N.shape);const t={},n={},s={},i={},o={},a=[],c=(N,E,D,F,_,B)=>{(F==null||_==null||B==null)&&(F=N.sourceLayer,_=N.nodeIndex,B=N.tensorIndex);const U=F.inboundNodes[_];if(D.indexOf(U)!==-1)throw new oi(`The tensor ${N.name} at layer "${F.name}" is part of a cycle.`);if(E.indexOf(U)!==-1)return;this.containerNodes.add(Ui.nodeKey(F,_)),F.id in o||(o[F.id]=Object.keys(o).length),D.indexOf(U)===-1&&D.push(U);const Y=U.inboundLayers.length;for(let q=0;q=0;)D.splice(D.indexOf(U),1);a.push(U)},u=[],p=[];for(const N of this.outputs)c(N,u,p);const m=a.slice().reverse();for(const N of m){n[N.id]=N,N.id in t||(t[N.id]=0);let E=t[N.id];const D=s[N.outboundLayer.id]==null?0:s[N.outboundLayer.id];E=Math.max(E,D),s[N.outboundLayer.id]=E,i[N.outboundLayer.id]=N.outboundLayer,t[N.id]=E;for(let F=0;FparseInt(N,10)).sort(vp);this.layers=[];for(const N of w){const E=b[N];E.sort((D,F)=>{const _=o[D.id],B=o[F.id];return _B?1:0});for(const D of E)D instanceof Ui&&this.internalContainerRefs.push(D),this.layers.push(D)}this.layersByDepth=b,w=Object.keys(y).map(N=>parseInt(N,10)).sort(vp);const I=this.inputs.slice(),T=[];for(const N of w)for(const E of y[N]){const D=E.outboundLayer;if(D!=null){for(const F of E.inputTensors)if(I.indexOf(F)===-1)throw new oi(`Graph disconnected: cannot obtain value for tensor ${F} at layer "${D.name}". The following previous layers were accessed without issue: ${T}`);for(const F of E.outputTensors)I.push(F);T.push(D.name)}}this.nodesByDepth=y;const v=this.layers.map(N=>N.name);for(const N of v){const E=v.filter(D=>D===N).length;if(E!==1)throw new oi(`The name "${N}" is used ${E} times in the model. All layer names should be unique. Layer names: `+JSON.stringify(v))}this.outboundNodes=[],this.inboundNodes=[],new Pp({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:this.inputs.map(N=>null),outputMasks:this.outputs.map(N=>null),inputShapes:this.inputs.map(N=>N.shape),outputShapes:this.outputs.map(N=>N.shape)}),this.built=!0,this._refCount=1}assertNotDisposed(){if(this._refCount===0)throw new Error(`Container '${this.name}' is already disposed.`)}dispose(){this.assertNotDisposed();const e={refCountAfterDispose:null,numDisposedVariables:0};if(--this._refCount===0){for(const t of this.layers)e.numDisposedVariables+=t.dispose().numDisposedVariables;for(const t of this.internalContainerRefs)e.numDisposedVariables+=t.dispose().numDisposedVariables}return e.refCountAfterDispose=this._refCount,e}get trainable(){return this.trainable_}set trainable(e){this.layers.forEach(t=>{t._trainableWeights.forEach(n=>n.trainable=e)}),this.trainable_=e}get trainableWeights(){if(this._trainableWeights.length>0)throw new j("Container instance unexpectedly contains _trainableWeights.The trainable weights of a Container are a union of the trainable weights of its consituent Layers. Its own _trainableWeights must remain an empty Array.");if(!this.trainable)return[];let e=[];for(const t of this.layers)e=e.concat(t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.layers)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const n of this.layers)t.push(...n.trainableWeights);return t.concat(e)}return e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}loadWeights(e,t=!0){const n={};let s=0;for(const o of this.layers)for(const a of o.weights){if(n[a.originalName]!=null)throw new j(`Duplicate weight name: ${a.originalName}`);n[a.originalName]=a,s++}const i=[];for(const o in e){let a=o;if(n[o]==null){const c=o.split("/"),u=c.slice(0,-2).concat([c[c.length-1]]);a=u.join("/")}if(n[a]!=null)i.push([n[a],e[o]]);else if(t)throw new j(`Provided weight data has no target variable: ${o}`);delete n[a]}if(t){const o=[];for(const a in n)o.push(a);if(o.length>0)throw new j(`${o.length} of ${s} weights are not set: ${o}`)}Rw(i)}updatedConfig(){const e=this.getConfig(),t={};return t.className=this.getClassName(),t.config=e,t.kerasVersion=`tfjs-layers ${Xp}`,t.backend="TensorFlow.js",t}toJSON(e,t=!0){const n=$w(this.updatedConfig());return t?JSON.stringify(n):n}call(e,t){return ee(()=>{e=Nt(e);const n=new Go;for(let s=0;s{e=Nt(e);let n;return t==null?n=Bo(null,e.length):n=Nt(t),this.runInternalGraph(e,n)[1]})}computeOutputShape(e){const t=Bp(e);if(t.length!==this.inputLayers.length)throw new j(`Invalid inputShape argument ${e}: model has ${this.inputLayers.length} tensor inputs.`);const n={};for(let a=0;aparseInt(a,10)).sort(vp);if(s.length>1)for(const a of s){const c=this.nodesByDepth[a];for(const u of c){const p=u.outboundLayer;if(this.inputLayers.map(I=>I.id).indexOf(p.id)!==-1)continue;const m=[];for(let I=0;IparseInt(c,10)).sort(vp);for(const c of s){const u=this.nodesByDepth[c];for(const p of u){const m=p.outboundLayer,y=p.inputTensors,b=p.outputTensors,w=new Array;for(const I of y)I.id in n&&w.push(n[I.id]);if(w.length===y.length){let I={},T,v,N,E;if(p.callArgs!=null&&(I=p.callArgs),w.length===1){const[D,F]=w[0];I.mask==null&&(I.mask=F),N=Nt(m.call(D,I)),E=Nt(m.computeMask(D,F)),T=[D],v=[F]}else T=w.map(D=>D[0]),v=w.map(D=>D[1]),I.mask==null&&(I.mask=v),N=Nt(m.call(T,I)),E=Nt(m.computeMask(T,v));if(m.activityRegularizer)throw new Ge("LayersModel invocation with concrete Tensor value(s) in the presence of activity regularizer(s) is not supported yet.");for(let D=0;D{const e=[];for(const t of this.layers)for(let n=0;n0){const I=[];for(let T=0;T0&&T.apply(Jn(N),E)}function u(T){const v=T.name,N=hi(T,t.customObjects!=null?t.customObjects:{});N.setFastWeightInitDuringBuild(s),i[v]=N;const E=T.inboundNodes;E.forEach(D=>{if(!(D instanceof Array))throw new j(`Corrupted configuration, expected array for nodeData: ${D}`);a(N,D)})}const p=t.name,m=t.layers;for(const T of m)u(T);for(;!pz(o);)for(const T of m){const v=i[T.name];if(v.name in o){const N=o[v.name];delete o[v.name];for(const E of N)c(v,E)}}const y=[],b=[],w=t.inputLayers;for(const T of w){const v=T[0],N=T[1],E=T[2];vs(v in i);const D=i[v],F=D.inboundNodes[N].outputTensors;y.push(F[E])}const I=t.outputLayers;for(const T of I){const v=T[0],N=T[1],E=T[2];vs(v in i);const D=i[v],F=D.inboundNodes[N].outputTensors;b.push(F[E])}return new e({inputs:y,outputs:b,name:p})}get stateful(){if(this._stateful)throw new j("Container instance unexpectedly has _stateful = true. The statefulness of a Container is determined by the Layers it contains. Its _stateful property must remain the default false.");for(const e of this.layers)if(e.stateful)return!0;return!1}resetStates(){ee(()=>{this.layers.forEach(e=>{e.stateful&&e.resetStates()})})}}function Qv(e,t,n){const s=t.length;if(e==null||Array.isArray(e)&&e.length===0)return t.map(i=>null);if(s===1)return Array.isArray(e)&&e.length===1?e:typeof e=="object"&&t[0]in e?[e[t[0]]]:[e];if(Array.isArray(e)){if(e.length!==s)throw new Error(`Provided ${n} is an array of ${e.length} element(s), but the model has ${s} outputs. Make sure a set of weights is provided for each model output.`);return e}else if(typeof e=="object"&&Object.keys(e).length>0&&typeof e[Object.keys(e)[0]]=="object"){const i=[];return t.forEach(o=>{o in e?i.push(e[o]):i.push(null)}),i}else throw new Error(`The model has multiple (${s}) outputs, so ${n} must be either an array with ${s} elements or an object with ${t} keys. Provided ${n} not understood: ${JSON.stringify(e)}`)}function eN(e,t){return Qv(e,t,"classWeight")}function Aee(e,t){return Qv(e,t,"sampleWeight")}async function tN(e,t,n,s){if(t!=null||s!=null)throw new Error("Support sampleWeight is not implemented yet");if(n!=null){const i=ee(()=>{if(e.shape.length===1)return e.clone();if(e.shape.length===2)if(e.shape[1]>1){const c=1;return e.argMax(c)}else{if(e.shape[1]===1)return e.reshape([e.shape[0]]);throw new Error(`Encountered unexpected last-dimension size (${e.shape[1]}) during handling of class weights. The size is expected to be >= 1.`)}else throw new Error(`Unexpected rank of target (y) tensor (${e.rank}) during handling of class weights. The rank is expected to be 1 or 2.`)}),o=Array.from(await i.data());qe(i);const a=[];return o.forEach(c=>{if(n[c]==null)throw new Error(`classWeight must contain all classes in the training data. The class ${c} exists in the data but not in classWeight`);a.push(n[c])}),rs(a,"float32")}else return null}function $3(e,t){return X(e,t)}const U3=32;function nN(e,t){let n,s;const i=t;n=i.xs,s=i.ys,k(n!=null&&s!=null,()=>`A Dataset iterator for fitDataset() is expected to generate objects of the form \`{xs: xVal, ys: yVal}\`, where the two values may be \`tf.Tensor\`, an array of Tensors, or a map of string to Tensor. The provided Dataset instead generates ${t}`);const o=sN("input",e.inputNames,n),a=sN("output",e.outputNames,s),c=o[0].shape[0];k(o.length===e.inputs.length,()=>`LayersModel has ${e.inputs.length} inputs, but the dataset provides ${o.length} inputs. (Expected input keys: ${JSON.stringify(e.inputNames)})`),k(a.length===e.outputs.length,()=>`LayersModel has ${e.outputs.length} outputs, but the dataset provides ${a.length} outputs. (Expected output keys: ${JSON.stringify(e.outputNames)})`);for(let u=0;u`Batch size mismatch: input ${e.inputNames[u]} has ${o[u].shape[0]}; expected ${c} based on input ${e.inputNames[0]}.`);for(let u=0;u`Batch size mismatch: output ${e.outputNames[u]} has ${a[u].shape[0]}; expected ${c} based on input ${e.inputNames[0]}.`);return{xs:o,ys:a}}function sN(e,t,n){if(n instanceof Q)return[n];if(Array.isArray(n))return k(n.length===t.length,()=>`Received an array of ${n.length} Tensors, but expected ${t.length} to match the ${e} keys ${t}.`),n;{const s=[];for(const i of t){if(n[i]==null)throw new j(`The feature data generated by the dataset lacks the required ${e} key '${i}'.`);s.push(n[i])}return s}}function B3(e){if(e.length===3)throw new Ge("Validation with sample weights is not implemented yet.");return{xs:e[0],ys:e[1]}}async function M3(e,t,n){const s=n.batchesPerEpoch!=null;if(k(e.optimizer!=null,()=>"You must compile a model before training/testing. Use LayersModel.compile(modelCompileConfig)."),k(n!=null,()=>"For fitDataset(), the 2nd argument (config) is required, but it is not provided in this call."),k(n.epochs!=null&&n.epochs>0&&Number.isInteger(n.epochs),()=>`For fitDataset(), config.epochs is expected to be a positive integer, but got ${n.epochs}`),k(!s||n.batchesPerEpoch>0&&Number.isInteger(n.batchesPerEpoch),()=>`For fitDataset(), config.batchesPerEpoch is expected to be a positive integer if specified, but got ${n.batchesPerEpoch}`),k(n.validationSplit==null,()=>"`validationSplit` is not supported by `fitDataset()`. Use validationData instead."),e.isTraining)throw new Error("Cannot start training because another fit() call is ongoing.");e.isTraining=!0;try{const i=n.validationData!=null;let o,a;if(i)if(iN(n.validationData))k(n.validationBatches==null||n.validationBatches>0&&Number.isInteger(n.validationBatches),()=>`For fitDataset() with dataset-based validation, config.validationBatches is expected not to be provided, or to be a positive integer, but got ${n.validationBatches}`);else{const v=B3(n.validationData);o=v.xs,a=v.ys}const c=e.makeTrainFunction(),u=e.getDedupedMetricsNames();let p;i?p=u.slice().concat(u.map(v=>"val_"+v)):p=u.slice();const m=Pv(n.callbacks,n.yieldEvery),y=n.verbose==null?1:n.verbose,{callbackList:b,history:w}=zv(m,y,n.epochs,null,null,P3(t,n),null,i,p);b.setModel(e),e.history=w,await b.onTrainBegin(),e.stopTraining_=!1;let I=n.initialEpoch==null?0:n.initialEpoch,T=await t.iterator();for(;I=n.batchesPerEpoch:D.done){if(i){let F;iN(n.validationData)?F=Nt(await e.evaluateDataset(n.validationData,{batches:n.validationBatches})):F=Nt(e.evaluate(o,a,{batchSize:n.validationBatchSize==null?U3:n.validationBatchSize,verbose:0}));for(let _=0;_0)throw new Ge("Verbose mode is not implemented yet.");k(!s||n.batches>0&&Number.isInteger(n.batches),()=>`Test loop expects \`batches\` to be a positive integer, but received ${JSON.stringify(n.batches)}`);const a=z3(t)?t:await t.iterator();let c=0,u=0;for(;s?u{if(p.value){const{xs:m,ys:y}=nN(e,p.value),b=m.concat(y),w=ee(()=>i(b));if(qe(b),u===0)for(let T=0;Tbe(o[T],X(I,v))),u>0&&qe(N)}qe(w),c+=I,++u}return o}),p.done){s&&console.warn(`Your dataset iterator ran out of data during evaluateDataset(). Interrupting evalution. Make sure that your dataset can generate at least \`batches\` batches (in this case, ${n.batches} batches). You may need to use the repeat() function when building your dataset.`);break}}for(let p=0;p0&&Number.isInteger(e),()=>`batchSize is required to be a positive integer, but got ${e}`)}function zh(e,t,n){return e==null?[null]:Array.isArray(e)?e.map(s=>zo(s,t,n-t)):zo(e,t,n-t)}function Mw(e,t){return ee(()=>e==null?null:Array.isArray(e)?e.map(n=>Mw(n,t)):Cv(e,t.dtype==="int32"?t:t.toInt()))}function Pw(e,t){const n=[];let s=0,i=null;for(;s=e&&(i=e),n.push([s,i]),s=i;return n}async function V3(e,t,n,s,i,o,a,c,u,p,m,y,b,w,I){i==null&&(i=32),o==null&&(o=1),m==null&&(m=!0),b==null&&(b=0);let T=!1;if(u!=null&&p!=null&&(T=!0),I!=null&&(T=!0,w==null))throw new j("Can only use `validationSteps` when doing step-wise training, i.e., `stepsPerEpoch` must be set.");const v=e.checkNumSamples(n,i,w,"steps_per_epoch");let N;v!=null&&(N=ai(0,v)),a==null&&(a=1);const{callbackList:E,history:D}=zv(c,a,o,b,v,w,i,T,y);E.setModel(e),e.history=D,await E.onTrainBegin(),e.stopTraining_=!1;for(let F=b;F{const J=U[Y][0],oe=U[Y][1],ce=zo(B,J,oe-J);q.batch=Y,q.size=oe-J;const ue=Mw(n,ce),he=t(ue);for(let pe=0;pe0){if(I=!0,s.validationData.length===2)a=s.validationData[0],c=s.validationData[1];else throw s.validationData.length===3?new Ge("validationData including sample weights is not supported yet."):new j(`When passing validation data, it must contain 2 (valX, valY) or 3 (valX, valY, valSampleWeight) items; ${s.validationData} is invalid.`);const U=!0,Y=await e.standardizeUserData(a,c,null,null,U,y);u=Y[0],p=Y[1],T=u.concat(p)}else if(s.validationSplit!=null&&s.validationSplit>0&&s.validationSplit<1){I=!0;const U=Math.floor(i[0].shape[0]*(1-s.validationSplit)),Y=i[0].shape[0];u=zh(i,U,Y),i=zh(i,0,U),p=zh(o,U,Y),o=zh(o,0,U),T=u.concat(p)}else s.validationSteps!=null&&(I=!0);const v=i.concat(o).concat(m);e.checkTrainableWeightsConsistency();const N=e.makeTrainFunction(),E=e.getDedupedMetricsNames();let D,F;I?(e.makeTestFunction(),D=e.testFunction,F=E.slice().concat(E.map(U=>"val_"+U))):(D=null,T=[],F=E.slice());const _=Pv(s.callbacks,s.yieldEvery),B=await V3(e,N,v,E,y,s.epochs,s.verbose,_,D,T,s.shuffle,F,s.initialEpoch,null,null);return B}finally{e.isTraining=!1,Vo(i,t),Vo(o,n),Vo(u,a),Vo(p,c),m!=null&&qe(m)}}function rN(e){const t=[];e instanceof Q&&(e=[e]);for(let n=0;nn.push(i.id));else if(t!=null)for(const i in t){const o=t[i];n.push(o.id)}const s=[];if(e instanceof Q)n.indexOf(e.id)===-1&&s.push(e);else if(Array.isArray(e))e.forEach(i=>{n.indexOf(i.id)===-1&&s.push(i)});else if(e!=null)for(const i in e){const o=e[i];n.indexOf(o.id)===-1&&s.push(o)}s.forEach(i=>{i.isDisposed||i.dispose()})}function Y3(e){return e instanceof Q}function zw(e){return Array.isArray(e)}function oN(e){return!Y3(e)&&!zw(e)}function aN(e,t,n,s=!0,i=""){if(t==null||t.length===0){if(e!=null){let a=!1;if(zw(e)&&e.length>0)a=!0;else if(oN(e)){for(const c in e)if(e.hasOwnProperty(c)){a=!0;break}}else a=!0;if(a)throw new j(`Error when checking model ${i} expected no data, but got ${e}`)}return[]}if(e==null)return t.map(a=>null);let o;if(oN(e)){e=e,o=[];for(const a of t){if(e[a]==null)throw new j(`No data provided for "${a}". Need data for each key in: ${t}`);o.push(e[a])}}else if(zw(e)){if(e=e,e.length!==t.length)throw new j(`Error when checking model ${i}: the Array of Tensors that you are passing to your model is not the size the model expected. Expected to see ${t.length} Tensor(s), but instead got the following list of Tensor(s): ${e}`);o=e}else{if(e=e,t.length>1)throw new j(`The model ${i} expects ${t.length} Tensor(s), but only received one Tensor. Found: Tensor with shape ${e.shape}`);o=[e]}if(o=rN(o),n!=null)for(let a=0;a=0&&p!==m)throw new j(`Error when checking ${i}: expected ${t[a]} to have shape [${n[a]}], but got array with shape [${c.shape}].`)}}return o}function q3(e,t,n){const s=Hr(e.map(o=>o.shape[0]));s.sort();const i=Hr(t.map(o=>o.shape[0]));if(i.sort(),s.length>1)throw new j(`All input Tensors (x) should have the same number of samples. Got array shapes: ${JSON.stringify(e.map(o=>o.shape))}`);if(i.length>1)throw new j(`All target Tensors (y) should have the same number of samples. Got array shapes: ${JSON.stringify(t.map(o=>o.shape))}`);if(s.length>0&&i.length>0&&!ot(s,i))throw new j(`Input Tensors should have the same number of samples as target Tensors. Found ${s[0]} input sample(s) and ${i[0]} target sample(s).`)}function j3(e,t,n){const s=[ar,Vp,Bh];for(let i=0;i1)throw new j(`The model expects ${t.length} ${i} Tensors, but only received one Tensor. Found: array with shape ${JSON.stringify(e.shape)}.`);o=[e]}if(n!=null)for(let a=0;a[]);let n;if(typeof e=="string"||typeof e=="function")n=[e];else if(Array.isArray(e)||typeof e=="object")n=e;else throw new TypeError(`Type of metrics argument not understood. Expected an string,function, Array, or Object, found: ${e}`);if(Array.isArray(n))return t.map(s=>n);{const s=[];for(const i of t){let o=n.hasOwnProperty(i)?n[i]:[];Array.isArray(o)||(o=[o]),s.push(o)}return s}}const X3="layers-model";class cr extends Ui{constructor(e){super(e);this.isTraining=!1}summary(e,t,n=console.log){if(!this.built)throw new j("This model has never been called, thus its weights have not been created yet. So no summary can be displayed. Build the model first (e.g., by calling it on some test data).");C3(this,e,t,n)}compile(e){if(e.loss==null&&(e.loss=[]),this.loss=e.loss,typeof e.optimizer=="string")this.optimizer_=N3(e.optimizer),this.isOptimizerOwned=!0;else{if(!(e.optimizer instanceof sr))throw new j("User-defined optimizer must be an instance of tf.Optimizer.");this.optimizer_=e.optimizer,this.isOptimizerOwned=!1}let t=[];if(!Array.isArray(e.loss)&&typeof e.loss!="string"&&typeof e.loss!="function"){e.loss=e.loss;for(const o in e.loss)if(this.outputNames.indexOf(o)===-1)throw new j(`Unknown entry in loss dictionary: "${o}". Only expected the following keys: ${this.outputNames}`);for(const o of this.outputNames)e.loss[o]==null&&console.warn(`Output "${o}" is missing from loss dictionary. We assume this was done on purpose, and we will not be expecting data to be passed to ${o} during training`),t.push(Dw(e.loss[o]))}else if(Array.isArray(e.loss)){if(e.loss.length!==this.outputs.length)throw new j(`When passing an Array as loss, it should have one entry per model output. The model has ${this.outputs.length} output(s), but you passed loss=${e.loss}.`);const o=e.loss;t=o.map(a=>Dw(a))}else{const o=Dw(e.loss);this.outputs.forEach(a=>{t.push(o)})}this.lossFunctions=t,this.feedOutputNames=[],this.feedOutputShapes=[],this.feedLossFns=[];for(let o=0;o{for(let o=0;o1&&(this.metricsTensors.push([a,o]),this.metricsNames.push(this.outputNames[o]+"_loss"))}});const s=K3(e.metrics,this.outputNames),i=(o,a,c)=>{this.outputNames.length>1&&(a=this.outputNames[o]+"_"+a),this.metricsNames.push(a),this.metricsTensors.push([c,o])};Po("metric",()=>{for(let o=0;o{const p="";let m,y,b;for(const w of u){if(typeof w=="string"&&["accuracy","acc","crossentropy","ce"].indexOf(w)!==-1){const T=this.internalOutputShapes[o];T[T.length-1]===1||this.lossFunctions[o]===Vp?["accuracy","acc"].indexOf(w)!==-1?y=kw:["crossentropy","ce"].indexOf(w)!==-1&&(y=Hv):this.lossFunctions[o]===Gp?["accuracy","acc"].indexOf(w)!==-1?y=Yv:["crossentropy","ce"].indexOf(w)!==-1&&(y=qv):["accuracy","acc"].indexOf(w)!==-1?y=Fw:["crossentropy","ce"].indexOf(w)!==-1&&(y=_w);let v;["accuracy","acc"].indexOf(w)!==-1?v="acc":["crossentropy","ce"].indexOf(w)!==-1&&(v="ce"),b=y,m=p+v}else{const T=v3(w);b=T,m=p+jp(w)}let I;Po(m,()=>{I=b}),i(o,m,I)}};c(a)}}),this.collectedTrainableWeights=this.trainableWeights}checkTrainableWeightsConsistency(){if(this.collectedTrainableWeights==null)return;this.trainableWeights.length!==this.collectedTrainableWeights.length&&console.warn("Discrepancy between trainableweights and collected trainable weights. Did you set `model.trainable` without calling `model.compile()` afterwards?")}evaluate(e,t,n={}){const s=n.batchSize==null?32:n.batchSize;Bw(s);const i=!0,o=this.standardizeUserDataXY(e,t,i,s);try{const a=o[0].concat(o[1]);this.makeTestFunction();const c=this.testFunction,u=this.testLoop(c,a,s,n.verbose,n.steps);return Jn(u)}finally{Vo(o[0],e),Vo(o[1],t)}}async evaluateDataset(e,t){return this.makeTestFunction(),G3(this,e,t)}checkNumSamples(e,t,n,s="steps"){let i;if(n!=null){if(i=null,t!=null)throw new j(`If ${s} is set, batchSize must be null or undefined.Got batchSize = ${t}`)}else if(e!=null)Array.isArray(e)?i=e[0].shape[0]:i=e.shape[0];else throw new j(`Either the input data should have a defined shape, or ${s} shoud be specified.`);return i}execute(e,t){if(Array.isArray(t)&&t.length===0)throw new j("`outputs` is an empty Array, which is not allowed.");const n=Array.isArray(t),s=n?t:[t],i=this.retrieveSymbolicTensors(s),o=new Go;if(e instanceof Q&&(e=[e]),Array.isArray(e)){if(e.length!==this.inputs.length)throw new j(`The number of inputs provided (${e.length}) does not match the number of inputs of this model (${this.inputs.length}).`);for(let c=0;ca.name);for(let a=0;a0){const s=[];throw t.forEach((i,o)=>{i==null&&s.push(e[o])}),new j(`Cannot find SymbolicTensors for output name(s): ${JSON.stringify(s)}`)}return t}predictLoop(e,t=32,n=!1){return ee(()=>{const s=this.checkNumSamples(e);if(n)throw new Ge("Verbose predictLoop() is not implemented yet.");const i=Pw(s,t),o=this.outputs.map(a=>[]);for(let a=0;a{const u=i[a][0],p=i[a][1],m=zh(e,u,p),y=[];if(Array.isArray(m))for(let w=0;wo[p].push(u))}return Jn(o.map(a=>Mt(a,0)))})}predict(e,t={}){const n=rN(e);cN(n,this.inputNames,this.feedInputShapes,!1);try{const s=t.batchSize==null?32:t.batchSize;return Bw(s),this.predictLoop(n,s)}finally{Vo(n,e)}}predictOnBatch(e){cN(e,this.inputNames,this.feedInputShapes,!0);const t=(Array.isArray(e)?e[0]:e).shape[0];return this.predictLoop(e,t)}standardizeUserDataXY(e,t,n=!0,s){if(this.optimizer_==null)throw new oi("You must compile a model before training/testing. Use LayersModel.compile(modelCompileArgs).");const i=[];for(let o=0;o0&&e[0].shape[0]%s!==0)throw new j(`In a stateful network, you should only pass inputs with a number of samples that is divisible by the batch size ${s}. Found: ${e[0].shape[0]} sample(s).`);return[e,t]}async standardizeUserData(e,t,n,s,i=!0,o){const[a,c]=this.standardizeUserDataXY(e,t,i,o);if(n!=null)throw new Error("sample weight is not supported yet.");let u=null;if(s!=null){const p=eN(s,this.outputNames);u=[];for(let m=0;m{const o=this.checkNumSamples(t,n,i,"steps"),a=[];if(s>0)throw new Ge("Verbose mode is not implemented yet.");if(i!=null)throw new Ge("steps mode in testLoop() is not implemented yet");{const c=Pw(o,n),u=rs(ai(0,o));for(let p=0;p1){const o=gv(e.slice(0,n),s);i+=`_${o}`}t.push(i)}return t}makeTrainFunction(){return e=>{const t=[],n=e.slice(0,this.inputs.length),s=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),i=e.slice(this.inputs.length+this.outputs.length,this.inputs.length+this.outputs.length*2),o=[],a=()=>{const m=[];for(let I=0;I1&&I{w=be(w,I)}),w},c=this.collectedTrainableWeights.map(m=>m.read()),u=!0,p=this.optimizer_.minimize(a,u,c);return[p].concat(o)}}makeTestFunction(){this.testFunction=e=>ee(()=>{const t=[];let n;const s=e.slice(0,this.inputs.length),i=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),o=[];for(let u=0;uor(t))}else{const t=Object.keys(this.loss);e={};const n=this.loss;for(const s of t)if(typeof n[s]=="string")e[s]=or(n[s]);else throw new Error("Serialization of non-string loss is not supported.")}return e}getMetricIdentifiers(){if(typeof this.metrics=="string"||typeof this.metrics=="function")return[or(jp(this.metrics))];if(Array.isArray(this.metrics))return this.metrics.map(e=>or(jp(e)));{const e={};for(const t in this.metrics)e[t]=or(jp(this.metrics[t]));return e}}getTrainingConfig(){return{loss:this.getLossIdentifiers(),metrics:this.getMetricIdentifiers(),optimizer_config:{class_name:this.optimizer.getClassName(),config:this.optimizer.getConfig()}}}loadTrainingConfig(e){if(e.weighted_metrics!=null)throw new Error("Loading weight_metrics is not supported yet.");if(e.loss_weights!=null)throw new Error("Loading loss_weights is not supported yet.");if(e.sample_weight_mode!=null)throw new Error("Loading sample_weight_mode is not supported yet.");const t=Mh(e.optimizer_config),n=hi(t);let s;if(typeof e.loss=="string")s=Mo(e.loss);else if(Array.isArray(e.loss))s=e.loss.map(o=>Mo(o));else if(e.loss!=null){s={};for(const o in e.loss)s[o]=Mo(e.loss[o])}let i;if(Array.isArray(e.metrics))i=e.metrics.map(o=>Mo(o));else if(e.metrics!=null){i={};for(const o in e.metrics)i[o]=Mo(e.metrics[o])}this.compile({loss:s,metrics:i,optimizer:n})}async save(e,t){if(typeof e=="string"){const u=Uy(e);if(u.length===0)throw new j(`Cannot find any save handlers for URL '${e}'`);if(u.length>1)throw new j(`Found more than one (${u.length}) save handlers for URL '${e}'`);e=u[0]}if(e.save==null)throw new j("LayersModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");const n=await Wy(this.getNamedWeights(t)),s=!1,i=null,o=this.toJSON(i,s),a={modelTopology:o,format:X3,generatedBy:`TensorFlow.js tfjs-layers v${Xp}`,convertedBy:null},c=t==null?!1:t.includeOptimizer;if(c&&this.optimizer!=null){a.trainingConfig=this.getTrainingConfig();const u="optimizer",{data:p,specs:m}=await Wy(await this.optimizer.getWeights(),u);n.specs.push(...m),n.data=Od([n.data,p])}if(this.userDefinedMetadata!=null){const u=!0;Kv(this.userDefinedMetadata,this.name,u),a.userDefinedMetadata=this.userDefinedMetadata}return a.weightData=n.data,a.weightSpecs=n.specs,e.save(a)}setUserDefinedMetadata(e){Kv(e,this.name),this.userDefinedMetadata=e}getUserDefinedMetadata(){return this.userDefinedMetadata}}cr.className="Model",ge(cr);class lN extends cr{}lN.className="Functional",ge(lN);async function J3(e,t){"modelTopology"in e||(e={modelTopology:e}),e=e;let n=e.modelTopology;n.model_config!=null&&(n=n.model_config);const s=Mh(n),i=hi(s,t);if(e.weightsManifest!=null){const o=await _T(e.weightsManifest,e.pathPrefix,i.weights.map(c=>c.originalName)),a={};for(const c of i.weights)a[c.originalName]=o[c.originalName];i.loadWeights(a),qe(o)}return i}async function Z3(e,t){if(t==null&&(t={}),typeof e=="string"){const n=By(e,t);if(n.length===0)n.push(kd(e,t));else if(n.length>1)throw new j(`Found more than one (${n.length}) load handlers for URL '${e}'`);e=n[0]}return Q3(e,void 0,t)}async function Q3(e,t,n){if(n==null&&(n={}),e.load==null)throw new j("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const s=await e.load();let i=s.modelTopology;i.model_config!=null&&(i=i.model_config);const o=n.strict==null?!0:n.strict,a=s.weightData!=null&&s.weightSpecs!=null&&o,c=hi(Mh(i),t,a),u=s.trainingConfig;if(u!=null&&c.loadTrainingConfig(u),s.userDefinedMetadata!=null&&c.setUserDefinedMetadata(s.userDefinedMetadata),s.weightData!=null){if(s.weightSpecs==null)throw new j("LayersModel artifacts contains weight data, but not weight specs. Therefore loading of weights cannot proceed.");const{modelWeights:p,optimizerWeights:m}=eG(s.weightData,s.weightSpecs);c.loadWeights(p,o),c.optimizer!=null&&m.length>0&&await c.optimizer.setWeights(m),qe(p),qe(m.map(y=>y.tensor))}return c}function eG(e,t){const n=Rd(e,t),s={},i=[];return t.forEach(o=>{o.group==="optimizer"?i.push({name:o.name,tensor:n[o.name]}):s[o.name]=n[o.name]}),{modelWeights:s,optimizerWeights:i}}class ic extends cr{constructor(e){super({inputs:[],outputs:[]});if(e=e||{},this.trainable=!0,this.built=!1,this.name=e.name!=null?e.name:Up("sequential_"),e.layers!=null)for(const t of e.layers)this.add(t)}checkShape(e){const t=e.inboundNodes[0].outputTensors[0].shape;if(t.some(n=>n<0))throw new j(`Negative dimension size caused by adding layer ${e.name} with input shape [${e.inboundNodes[0].inputTensors[0].shape}]`)}add(e){const t=e instanceof ic||e instanceof cr;let n;if(t){if(n=e,n.outputs.length!==1)throw new j("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");if(n.inputs.length!==1)throw new j("All layers in a Sequential model should have a single input tensor. For multi-input layers, use the functional API.")}if(this.outputs.length===0){if(e.inboundNodes.length===0){if(e.batchInputShape==null)throw new j("The first layer in a Sequential model must get an `inputShape` or `batchInputShape` argument.");const s=_v({batchShape:e.batchInputShape,dtype:e.dtype,name:e.name+"_input"});e.apply(s)}if(t)this.outputs=n.outputs,this.inputs=n.inputs;else{if(e.inboundNodes.length!==1)throw new j(`A layer added to a Sequential model must not already be connected somewhere else. LayersModel received layer ${e.name} which has ${e.inboundNodes.length} pre-existing inbound connections.`);if(e.inboundNodes[0].outputTensors.length!==1)throw new j("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[e.inboundNodes[0].outputTensors[0]],this.inputs=Fv(this.outputs[0])}this.inboundNodes=[],new Pp({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:Bo(null,this.inputs.length),outputMasks:[null],inputShapes:this.inputs.map(s=>s.shape),outputShapes:this.outputs[0].shape})}else{const s=e.apply(this.outputs[0]);if(Array.isArray(s))throw new TypeError("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[s],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}this.layers.push(e),this.built=!1}pop(){if(this.layers.length===0)throw new TypeError("There are no layers in the model.");if(this.layers.pop(),this.layers.length===0)this.outputs=[],this.inboundNodes=[],this.outboundNodes=[];else{const e=this.layers.length-1;this.layers[e].outboundNodes=[],this.outputs=[this.layers[e].output],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}}call(e,t){return this.model==null&&this.build(),this.model.call(e,t)}build(e){if(It(e),this.inputs.length===0||this.outputs.length===0)throw new TypeError("Sequential model cannot be built: model is empty. Add some layers first.");this.model=new cr({inputs:this.inputs,outputs:this.outputs[0],name:this.name+"_model"}),this.model.trainable=this.trainable,this.supportsMasking=this.model.supportsMasking,this.inputLayers=this.model.inputLayers,this.inputLayersNodeIndices=this.model.inputLayersNodeIndices,this.inputLayersTensorIndices=this.model.inputLayersTensorIndices,this.outputLayers=this.model.outputLayers,this.outputLayersNodeIndices=this.model.outputLayersNodeIndices,this.outputLayersTensorIndices=this.model.outputLayersTensorIndices,this.nodesByDepth=this.model.nodesByDepth,this.containerNodes=this.model.containerNodes,this.outputNames=this.model.outputNames,this.inputNames=this.model.inputNames,this.built=!0}countParams(){return this.built||this.build(),super.countParams()}summary(e,t,n=console.log){this.built||this.build(),super.summary(e,t,n)}setWeights(e){this.model==null&&this.build(),this.model.setWeights(e)}evaluate(e,t,n={}){if(!this.built)throw new oi("The model needs to be compiled before being used.");return this.model.evaluate(e,t,n)}async evaluateDataset(e,t){if(!this.built)throw new oi("The model needs to be compiled before being used.");return this.model.evaluateDataset(e,t)}predict(e,t={}){return this.model==null&&this.build(),this.model.predict(e,t)}predictOnBatch(e){return this.model==null&&this.build(),this.model.predictOnBatch(e)}compile(e){this.build(),this.model.compile(e),this.optimizer_=this.model.optimizer,this.isOptimizerOwned=this.model.isOptimizerOwned,this.loss=this.model.loss,this.metrics=this.model.metrics,this.metricsTensors=this.model.metricsTensors,this.metricsNames=this.model.metricsNames}get optimizer(){return this.model==null?void 0:this.model.optimizer}set optimizer(e){this.model.optimizer=e}async fit(e,t,n={}){if(!this.built)throw new oi("The model needs to be compiled before being used.");return this.model.fit(e,t,n)}async fitDataset(e,t){if(!this.built)throw new oi("The model needs to be compiled before being used.");return this.model.fitDataset(e,t)}async trainOnBatch(e,t){return this.model.trainOnBatch(e,t)}static fromConfig(e,t,n={},s=!1){let i,o={};if(t instanceof Array){if(!(t[0].className!=null)||t[0].className==="Merge")throw new j("Legacy serialization format not supported yet.");i=t}else k(t.layers!=null,()=>"When the config data for a Sequential model is not an Array, it must be an Object that contains the 'layers' field."),i=t.layers,delete t.layers,o=t;const a=new e(o);if(!(a instanceof ic))throw new Ge(`Sequential.fromConfig called on non-Sequential input: ${a}`);for(const c of i){const u=void 0,p=hi(c,u,s);s&&p.setFastWeightInitDuringBuild(!0),a.add(p)}return a}set stopTraining(e){if(this.model==null)throw new j("Cannot set the stopTraining property of a sequential model before it is compiled.");this.model.stopTraining=e}get stopTraining(){if(this.model==null)throw new j("Cannot get the stopTraining property of a sequential model before it is compiled.");return this.model.stopTraining}getConfig(){const e=[];for(const t of this.layers){const n={};n.className=t.getClassName(),n.config=t.getConfig(),e.push(n)}return{name:this.name,layers:e}}}ic.className="Sequential",ge(ic);function tG(e){return new cr(e)}function nG(e){return new ic(e)}function sG(e,t){return t==null&&(t={}),Z3(e,t)}function hN(e){return _v(e)}function iG(e,t){zs.registerCallbackConstructor(e,t)}class cs extends Ao{getConfig(){return{}}}class uN extends cs{apply(e,t=1){return Dz(e,t)}}uN.className="elu",ge(uN);class dN extends cs{apply(e){return rp(e)}}dN.className="selu",ge(dN);class pN extends cs{apply(e){return Fi(e)}}pN.className="relu",ge(pN);class mN extends cs{apply(e){return ee(()=>Eo(6,Fi(e)))}}mN.className="relu6",ge(mN);class fN extends cs{apply(e){return e}}fN.className="linear",ge(fN);class gN extends cs{apply(e){return Ei(e)}}gN.className="sigmoid",ge(gN);class yN extends cs{apply(e){return Fz(e)}}yN.className="hardSigmoid",ge(yN);class bN extends cs{apply(e){return za(e)}}bN.className="softplus",ge(bN);class wN extends cs{apply(e){return kz(e)}}wN.className="softsign",ge(wN);class LN extends cs{apply(e){return Ua(e)}}LN.className="tanh",ge(LN);class Gw extends cs{apply(e,t=-1){return Wo(e,t)}}Gw.className="softmax",ge(Gw);class SN extends cs{apply(e,t=-1){return Qd(e,t)}}SN.className="logSoftmax",ge(SN);class IN extends cs{apply(e,t=1){return ee(()=>Ei(e.mul(t)).mul(e))}}IN.className="swish",ge(IN);function Xr(e){return e.getClassName()}function Vw(e,t={}){return Dh(e,Ws.getMap().classNameMap,t,"activation")}function Jr(e){if(e==null){const t={};return t.className="linear",t.config={},Vw(t)}if(typeof e=="string"){const t={};return t.className=e,t.config={},Vw(t)}else return e instanceof cs?e:Vw(e)}function Hw(e){if(e!=null&&typeof e!="object")throw new Error(`Argument to L1L2 regularizer's constructor is expected to be an object, but received: ${e}`)}class xN extends Ao{}class Gh extends xN{constructor(e){super();Hw(e),this.l1=e==null||e.l1==null?.01:e.l1,this.l2=e==null||e.l2==null?.01:e.l2,this.hasL1=this.l1!==0,this.hasL2=this.l2!==0}apply(e){return ee(()=>{let t=ct([1]);return this.hasL1&&(t=be(t,Ue(X(this.l1,sn(e))))),this.hasL2&&(t=be(t,Ue(X(this.l2,$h(e))))),t.asScalar()})}getConfig(){return{l1:this.l1,l2:this.l2}}static fromConfig(e,t){return new e({l1:t.l1,l2:t.l2})}}Gh.className="L1L2",ge(Gh);function rG(e){return Hw(e),new Gh({l1:e!=null?e.l1:null,l2:0})}function oG(e){return Hw(e),new Gh({l2:e!=null?e.l2:null,l1:0})}const TN={l1l2:"L1L2"};function xt(e){return cw(e)}function AN(e,t={}){return Dh(e,Ws.getMap().classNameMap,t,"regularizer")}function _t(e){if(e==null)return null;if(typeof e=="string"){const t=e in TN?TN[e]:e,n={className:t,config:{}};return AN(n)}else return e instanceof xN?e:AN(e)}class Yw extends lt{constructor(e){super(e==null?{}:e);this.supportsMasking=!0,e!=null&&(this.maxValue=e.maxValue)}call(e,t){e=Xe(e);let n=Fi(e);return this.maxValue!=null&&(n=jn(n,0,this.maxValue)),n}computeOutputShape(e){return e}getConfig(){const e={maxValue:this.maxValue},t=super.getConfig();return Object.assign(e,t),e}}Yw.className="ReLU",ge(Yw);class qw extends lt{constructor(e){super(e==null?{}:e);this.DEFAULT_ALPHA=.3,e==null&&(e={}),this.alpha=e.alpha==null?this.DEFAULT_ALPHA:e.alpha}call(e,t){const n=Xe(e);return Xd(n,this.alpha)}computeOutputShape(e){return e}getConfig(){const e={alpha:this.alpha},t=super.getConfig();return Object.assign(e,t),e}}qw.className="LeakyReLU",ge(qw);class jw extends lt{constructor(e){super(e==null?{}:e);if(this.DEFAULT_ALPHA_INITIALIZER="zeros",e==null&&(e={}),this.supportsMasking=!0,this.alphaInitializer=Ft(e.alphaInitializer||this.DEFAULT_ALPHA_INITIALIZER),this.alphaRegularizer=_t(e.alphaRegularizer),this.alphaConstraint=ln(e.alphaConstraint),e.sharedAxes==null)this.sharedAxes=null;else if(Array.isArray(e.sharedAxes))this.sharedAxes=e.sharedAxes;else if(typeof e.sharedAxes=="number")this.sharedAxes=[e.sharedAxes];else throw new j(`Expected sharedAxes to be a number or an array of numbers, but got ${e.sharedAxes}`)}build(e){e=It(e);const t=e.slice(1);if(this.sharedAxes!=null)for(const s of this.sharedAxes)t[s-1]=1;this.alpha=this.addWeight("alpha",t,"float32",this.alphaInitializer,this.alphaRegularizer,!0,this.alphaConstraint);const n={};if(this.sharedAxes!=null)for(let s=1;s(Gt(t),t==="channelsFirst"?Pe(e,[0,2,3,1]):e))}function vN(e,t){return ee(()=>(Gt(t),t==="channelsFirst"?Pe(e,[0,2,3,4,1]):e))}function NN(e,t,n,s=1,i="valid",o,a=1){return ee(()=>{if(o==null&&(o=ri()),Gt(o),e.shape.length!==3)throw new j(`The input of a conv1dWithBias operation should be 3, but is ${e.shape.length} instead.`);if(t.shape.length!==3)throw new j(`The kernel for a conv1dWithBias operation should be 3, but is ${t.shape.length} instead`);if(n!=null&&n.shape.length!==1)throw new j(`The bias for a conv1dWithBias operation should be 1, but is ${t.shape.length} instead`);if(o==="channelsFirst"&&(e=Pe(e,[0,2,1])),i==="causal")throw new Ge("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");let c=Hd(e,t,s,i==="same"?"same":"valid","NWC",a);return n!=null&&(c=$i(c,n)),c})}function vee(e,t,n=1,s="valid",i,o=1){return ee(()=>(Gt(i),NN(e,t,null,n,s,i,o)))}function Nee(e,t,n=[1,1],s="valid",i,o){return ee(()=>(Gt(i),Qw(e,t,null,n,s,i,o)))}function Qw(e,t,n,s=[1,1],i="valid",o,a,c=null){return ee(()=>{if(o==null&&(o=ri()),Gt(o),e.rank!==3&&e.rank!==4)throw new j(`conv2dWithBiasActivation expects input to be of rank 3 or 4, but received ${e.rank}.`);if(t.rank!==3&&t.rank!==4)throw new j(`conv2dWithBiasActivation expects kernel to be of rank 3 or 4, but received ${e.rank}.`);let u=Zw(e,o);if(i==="causal")throw new Ge("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");return u=Hb({x:u,filter:t,strides:s,pad:i==="same"?"same":"valid",dilations:a,dataFormat:"NHWC",bias:n,activation:c}),o==="channelsFirst"&&(u=Pe(u,[0,3,1,2])),u})}function Cee(e,t,n=[1,1,1],s="valid",i,o){return ee(()=>(Gt(i),CN(e,t,null,n,s,i,o)))}function CN(e,t,n,s=[1,1,1],i="valid",o,a){return ee(()=>{if(o==null&&(o=ri()),Gt(o),e.rank!==4&&e.rank!==5)throw new j(`conv3dWithBias expects input to be of rank 4 or 5, but received ${e.rank}.`);if(t.rank!==4&&t.rank!==5)throw new j(`conv3dWithBias expects kernel to be of rank 4 or 5, but received ${e.rank}.`);let c=vN(e,o);if(i==="causal")throw new Ge("The support for CAUSAL padding mode in conv3dWithBias is not implemented yet.");return c=yb(c,t,s,i==="same"?"same":"valid","NDHWC",a),n!=null&&(c=$i(c,n)),o==="channelsFirst"&&(c=Pe(c,[0,4,1,2,3])),c})}class eL extends lt{constructor(e,t){super(t);if(this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",eL.verifyArgs(t),this.rank=e,mn(this.rank,"rank"),this.rank!==1&&this.rank!==2&&this.rank!==3)throw new Ge(`Convolution layer for rank other than 1, 2, or 3 (${this.rank}) is not implemented yet.`);if(this.kernelSize=rc(t.kernelSize,e,"kernelSize"),this.strides=rc(t.strides==null?1:t.strides,e,"strides"),this.padding=t.padding==null?"valid":t.padding,Ns(this.padding),this.dataFormat=t.dataFormat==null?"channelsLast":t.dataFormat,Gt(this.dataFormat),this.activation=Jr(t.activation),this.useBias=t.useBias==null?!0:t.useBias,this.biasInitializer=Ft(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.biasConstraint=ln(t.biasConstraint),this.biasRegularizer=_t(t.biasRegularizer),this.activityRegularizer=_t(t.activityRegularizer),this.dilationRate=rc(t.dilationRate==null?1:t.dilationRate,e,"dilationRate"),this.rank===1&&Array.isArray(this.dilationRate)&&this.dilationRate.length!==1)throw new j(`dilationRate must be a number or an array of a single number for 1D convolution, but received ${JSON.stringify(this.dilationRate)}`);if(this.rank===2){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==2)throw new j(`dilationRate must be a number or array of two numbers for 2D convolution, but received ${JSON.stringify(this.dilationRate)}`)}else if(this.rank===3){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==3)throw new j(`dilationRate must be a number or array of three numbers for 3D convolution, but received ${JSON.stringify(this.dilationRate)}`)}}static verifyArgs(e){if(vs("kernelSize"in e,"required key 'kernelSize' not in config"),typeof e.kernelSize!="number"&&!hw(e.kernelSize,"number",1,3))throw new j(`BaseConv expects config.kernelSize to be number or number[] with length 1, 2, or 3, but received ${JSON.stringify(e.kernelSize)}.`)}getConfig(){const e={kernelSize:this.kernelSize,strides:this.strides,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,activation:Xr(this.activation),useBias:this.useBias,biasInitializer:Vt(this.biasInitializer),biasRegularizer:xt(this.biasRegularizer),activityRegularizer:xt(this.activityRegularizer),biasConstraint:cn(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}class Vh extends eL{constructor(e,t){super(e,t);this.kernel=null,Vh.verifyArgs(t),this.filters=t.filters,mn(this.filters,"filters"),this.kernelInitializer=Ft(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.kernelConstraint=ln(t.kernelConstraint),this.kernelRegularizer=_t(t.kernelRegularizer)}build(e){e=It(e);const t=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[t]==null)throw new j(`The channel dimension of the input should be defined. Found ${e[t]}`);const n=e[t],s=this.kernelSize.concat([n,this.filters]);this.kernel=this.addWeight("kernel",s,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[{ndim:this.rank+2,axes:{[t]:n}}],this.built=!0}call(e,t){return ee(()=>{e=Xe(e);let n;const s=this.bias==null?null:this.bias.read(),i=bv(this.activation.getClassName());if(i!=null&&this.rank===2)n=Qw(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate,i);else{if(this.rank===1)n=NN(e,this.kernel.read(),s,this.strides[0],this.padding,this.dataFormat,this.dilationRate[0]);else if(this.rank===2)n=Qw(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate);else if(this.rank===3)n=CN(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate);else throw new Ge("convolutions greater than 3D are not implemented yet.");this.activation!=null&&(n=this.activation.apply(n))}return n})}computeOutputShape(e){e=It(e);const t=[],n=this.dataFormat==="channelsLast"?e.slice(1,e.length-1):e.slice(2);for(let i=0;i 0 but got ${JSON.stringify(e.filters)}`)}}class Hh extends Vh{constructor(e){super(2,e);Hh.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!hw(e.kernelSize,"number",1,2))throw new j(`Conv2D expects config.kernelSize to be number or number[] with length 1 or 2, but received ${JSON.stringify(e.kernelSize)}.`)}}Hh.className="Conv2D",ge(Hh);class Zp extends Vh{constructor(e){super(3,e);Zp.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!(Array.isArray(e.kernelSize)&&(e.kernelSize.length===1||e.kernelSize.length===3)))throw new j(`Conv3D expects config.kernelSize to be number or [number, number, number], but received ${JSON.stringify(e.kernelSize)}.`)}}Zp.className="Conv3D",ge(Zp);class tL extends Hh{constructor(e){super(e);if(this.inputSpec=[new fn({ndim:4})],this.padding!=="same"&&this.padding!=="valid")throw new j(`Conv2DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`)}build(e){if(e=It(e),e.length!==4)throw new j("Input should have rank 4; Received input shape: "+JSON.stringify(e));const t=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[t]==null)throw new j("The channel dimension of the inputs should be defined. Found `None`.");const n=e[t],s=this.kernelSize.concat([this.filters,n]);this.kernel=this.addWeight("kernel",s,"float32",this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[new fn({ndim:4,axes:{[t]:n}})],this.built=!0}call(e,t){return ee(()=>{let n=Xe(e);if(n.shape.length!==4)throw new j(`Conv2DTranspose.call() expects input tensor to be rank-4, but received a tensor of rank-${n.shape.length}`);const s=n.shape,i=s[0];let o,a;this.dataFormat==="channelsFirst"?(o=2,a=3):(o=1,a=2);const c=s[o],u=s[a],p=this.kernelSize[0],m=this.kernelSize[1],y=this.strides[0],b=this.strides[1],w=Jp(c,y,p,this.padding),I=Jp(u,b,m,this.padding),T=[i,w,I,this.filters];this.dataFormat!=="channelsLast"&&(n=Pe(n,[0,2,3,1]));let v=Yd(n,this.kernel.read(),T,this.strides,this.padding);return this.dataFormat!=="channelsLast"&&(v=Pe(v,[0,3,1,2])),this.bias!=null&&(v=$i(v,this.bias.read(),this.dataFormat)),this.activation!=null&&(v=this.activation.apply(v)),v})}computeOutputShape(e){e=It(e);const t=e.slice();let n,s,i;this.dataFormat==="channelsFirst"?(n=1,s=2,i=3):(n=3,s=1,i=2);const o=this.kernelSize[0],a=this.kernelSize[1],c=this.strides[0],u=this.strides[1];return t[n]=this.filters,t[s]=Jp(t[s],c,o,this.padding),t[i]=Jp(t[i],u,a,this.padding),t}getConfig(){const e=super.getConfig();return delete e.dilationRate,e}}tL.className="Conv2DTranspose",ge(tL);class RN extends Vh{constructor(e,t){super(e,t);if(this.DEFAULT_DEPTHWISE_INITIALIZER="glorotUniform",this.DEFAULT_POINTWISE_INITIALIZER="glorotUniform",this.depthwiseKernel=null,this.pointwiseKernel=null,t.filters==null)throw new j("The `filters` configuration field is required by SeparableConv, but is unspecified.");if(t.kernelInitializer!=null||t.kernelRegularizer!=null||t.kernelConstraint!=null)throw new j("Fields kernelInitializer, kernelRegularizer and kernelConstraint are invalid for SeparableConv2D. Use depthwiseInitializer, depthwiseRegularizer, depthwiseConstraint, pointwiseInitializer, pointwiseRegularizer and pointwiseConstraint instead.");if(t.padding!=null&&t.padding!=="same"&&t.padding!=="valid")throw new j(`SeparableConv${this.rank}D supports only padding modes: 'same' and 'valid', but received ${JSON.stringify(t.padding)}`);this.depthMultiplier=t.depthMultiplier==null?1:t.depthMultiplier,this.depthwiseInitializer=Ft(t.depthwiseInitializer||this.DEFAULT_DEPTHWISE_INITIALIZER),this.depthwiseRegularizer=_t(t.depthwiseRegularizer),this.depthwiseConstraint=ln(t.depthwiseConstraint),this.pointwiseInitializer=Ft(t.depthwiseInitializer||this.DEFAULT_POINTWISE_INITIALIZER),this.pointwiseRegularizer=_t(t.pointwiseRegularizer),this.pointwiseConstraint=ln(t.pointwiseConstraint)}build(e){if(e=It(e),e.length{e=Xe(e);let n;if(this.rank===1)throw new Ge("1D separable convolution is not implemented yet.");return this.rank===2&&(this.dataFormat==="channelsFirst"&&(e=Pe(e,[0,2,3,1])),n=Fb(e,this.depthwiseKernel.read(),this.pointwiseKernel.read(),this.strides,this.padding,this.dilationRate,"NHWC")),this.useBias&&(n=$i(n,this.bias.read(),this.dataFormat)),this.activation!=null&&(n=this.activation.apply(n)),this.dataFormat==="channelsFirst"&&(n=Pe(n,[0,3,1,2])),n})}getConfig(){const e=super.getConfig();return delete e.rank,delete e.kernelInitializer,delete e.kernelRegularizer,delete e.kernelConstraint,e.depthwiseInitializer=Vt(this.depthwiseInitializer),e.pointwiseInitializer=Vt(this.pointwiseInitializer),e.depthwiseRegularizer=xt(this.depthwiseRegularizer),e.pointwiseRegularizer=xt(this.pointwiseRegularizer),e.depthwiseConstraint=cn(this.depthwiseConstraint),e.pointwiseConstraint=cn(this.pointwiseConstraint),e}}RN.className="SeparableConv";class nL extends RN{constructor(e){super(2,e)}}nL.className="SeparableConv2D",ge(nL);class Qp extends Vh{constructor(e){super(1,e);Qp.verifyArgs(e),this.inputSpec=[{ndim:3}]}getConfig(){const e=super.getConfig();return delete e.rank,delete e.dataFormat,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!hw(e.kernelSize,"number",1,1))throw new j(`Conv1D expects config.kernelSize to be number or number[] with length 1, but received ${JSON.stringify(e.kernelSize)}.`)}}Qp.className="Conv1D",ge(Qp);class sL extends lt{constructor(e){super(e);typeof e.cropping=="number"?this.cropping=[[e.cropping,e.cropping],[e.cropping,e.cropping]]:typeof e.cropping[0]=="number"?this.cropping=[[e.cropping[0],e.cropping[0]],[e.cropping[1],e.cropping[1]]]:this.cropping=e.cropping,this.dataFormat=e.dataFormat===void 0?"channelsLast":e.dataFormat,this.inputSpec=[{ndim:4}]}computeOutputShape(e){return this.dataFormat==="channelsFirst"?[e[0],e[1],e[2]-this.cropping[0][0]-this.cropping[0][1],e[3]-this.cropping[1][0]-this.cropping[1][1]]:[e[0],e[1]-this.cropping[0][0]-this.cropping[0][1],e[2]-this.cropping[1][0]-this.cropping[1][1],e[3]]}call(e,t){return ee(()=>{if(e=Xe(e),this.dataFormat==="channelsLast"){const n=Cp(e,this.cropping[0][0],e.shape[1]-this.cropping[0][0]-this.cropping[0][1],2);return Cp(n,this.cropping[1][0],e.shape[2]-this.cropping[1][1]-this.cropping[1][0],3)}else{const n=Cp(e,this.cropping[0][0],e.shape[2]-this.cropping[0][0]-this.cropping[0][1],3);return Cp(n,this.cropping[1][0],e.shape[3]-this.cropping[1][1]-this.cropping[1][0],4)}})}getConfig(){const e={cropping:this.cropping,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}sL.className="Cropping2D",ge(sL);class iL extends lt{constructor(e){super(e);this.DEFAULT_SIZE=[2,2],this.inputSpec=[{ndim:4}],this.size=e.size==null?this.DEFAULT_SIZE:e.size,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat}computeOutputShape(e){if(this.dataFormat==="channelsFirst"){const t=e[2]==null?null:this.size[0]*e[2],n=e[3]==null?null:this.size[1]*e[3];return[e[0],e[1],t,n]}else{const t=e[1]==null?null:this.size[0]*e[1],n=e[2]==null?null:this.size[1]*e[2];return[e[0],t,n,e[3]]}}call(e,t){return ee(()=>{let n=Xe(e);const s=n.shape;if(this.dataFormat==="channelsFirst"){n=Pe(n,[0,2,3,1]);const i=this.size[0]*s[2],o=this.size[1]*s[3],a=n.resizeNearestNeighbor([i,o]);return Pe(a,[0,3,1,2])}else{const i=this.size[0]*s[1],o=this.size[1]*s[2];return n.resizeNearestNeighbor([i,o])}})}getConfig(){const e={size:this.size,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}iL.className="UpSampling2D",ge(iL);function aG(e,t,n=[1,1],s="valid",i,o){return ee(()=>{i==null&&(i=ri()),Gt(i);let a=Zw(e,i);if(e.rank!==4)throw new j(`Input for depthwiseConv2d is required to be 4-D, but is instead ${e.rank}-D`);if(t.rank!==4)throw new j(`depthwiseKernel is required to be 4-D, but is instead ${t.rank}-D`);return a=Co(a,t,n,s==="same"?"same":"valid","NHWC",o),i==="channelsFirst"&&(a=Pe(a,[0,3,1,2])),a})}class rL extends eL{constructor(e){super(2,e);this.depthwiseKernel=null,this.depthMultiplier=e.depthMultiplier==null?1:e.depthMultiplier,this.depthwiseInitializer=Ft(e.depthwiseInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.depthwiseConstraint=ln(e.depthwiseConstraint),this.depthwiseRegularizer=_t(e.depthwiseRegularizer)}build(e){if(e=It(e),e.length<4)throw new j(`Inputs to DepthwiseConv2D should have rank 4. Received input shape: ${JSON.stringify(e)}.`);const t=this.dataFormat==="channelsFirst"?1:3;if(e[t]==null||e[t]<0)throw new j(`The channel dimension of the inputs to DepthwiseConv2D should be defined, but is not (${e[t]}).`);const n=e[t],s=[this.kernelSize[0],this.kernelSize[1],n,this.depthMultiplier];this.depthwiseKernel=this.addWeight("depthwise_kernel",s,null,this.depthwiseInitializer,this.depthwiseRegularizer,!0,this.depthwiseConstraint),this.useBias?this.bias=this.addWeight("bias",[n*this.depthMultiplier],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return ee(()=>{e=Xe(e);let n=aG(e,this.depthwiseKernel.read(),this.strides,this.padding,this.dataFormat,null);return this.useBias&&(n=$i(n,this.bias.read(),this.dataFormat)),this.activation!=null&&(n=this.activation.apply(n)),n})}computeOutputShape(e){e=It(e);const t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2],s=this.dataFormat==="channelsFirst"?e[1]*this.depthMultiplier:e[3]*this.depthMultiplier,i=ui(t,this.kernelSize[0],this.padding,this.strides[0]),o=ui(n,this.kernelSize[1],this.padding,this.strides[1]);return this.dataFormat==="channelsFirst"?[e[0],s,i,o]:[e[0],i,o,s]}getConfig(){const e=super.getConfig();return e.depthMultiplier=this.depthMultiplier,e.depthwiseInitializer=Vt(this.depthwiseInitializer),e.depthwiseRegularizer=xt(this.depthwiseRegularizer),e.depthwiseConstraint=cn(this.depthwiseRegularizer),e}}rL.className="DepthwiseConv2D",ge(rL);function ON(e,t,n,s){if(Array.isArray(e)){if(t!=null||n!=null)throw new j("When inputs is an array, neither initialState or constants should be provided");s!=null&&(n=e.slice(e.length-s,e.length),e=e.slice(0,e.length-s)),e.length>1&&(t=e.slice(1,e.length)),e=e[0]}function i(o){return o==null||Array.isArray(o)?o:[o]}return t=i(t),n=i(n),{inputs:e,initialState:t,constants:n}}function EN(e,t,n,s=!1,i,o,a=!1,c=!1){return ee(()=>{const u=t.shape.length;if(u<3)throw new j(`Input should be at least 3D, but is ${u}D.`);const p=[1,0].concat(ai(2,u));if(t=Pe(t,p),o!=null)throw new Ge("The rnn() functoin of the deeplearn.js backend does not support constants yet.");a&&console.warn("Backend rnn(): the unroll = true option is not applicable to the imperative deeplearn.js backend."),i!=null&&(i=i.asType("bool").asType("float32"),i.rank===u-1&&(i=Kn(i,-1)),i=Pe(i,p)),s&&(t=As(t,0),i!=null&&(i=As(i,0)));const m=[];let y,b=n;const w=t.shape[0],I=_i(t);let T;i!=null&&(T=_i(i));for(let N=0;Ne(E,b));if(i==null)y=D[0],b=D[1];else{const F=ee(()=>{const _=T[N],B=Dn(_).sub(_),U=D[0].mul(_).add(b[0].mul(B)),Y=b.map((q,J)=>D[1][J].mul(_).add(q.mul(B)));return{output:U,newStates:Y}});y=F.output,b=F.newStates}c&&m.push(y)}let v;if(c){const N=1;v=as(m,N)}return[y,v,b]})}class Bi extends lt{constructor(e){super(e);let t;if(e.cell==null)throw new j("cell property is missing for the constructor of RNN.");if(Array.isArray(e.cell)?t=new nm({cells:e.cell}):t=e.cell,t.stateSize==null)throw new j("The RNN cell should have an attribute `stateSize` (tuple of integers, one integer per RNN state).");this.cell=t,this.returnSequences=e.returnSequences==null?!1:e.returnSequences,this.returnState=e.returnState==null?!1:e.returnState,this.goBackwards=e.goBackwards==null?!1:e.goBackwards,this._stateful=e.stateful==null?!1:e.stateful,this.unroll=e.unroll==null?!1:e.unroll,this.supportsMasking=!0,this.inputSpec=[new fn({ndim:3})],this.stateSpec=null,this.states_=null,this.numConstants=null,this.keptStates=[]}getStates(){if(this.states_==null){const e=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;return ai(0,e).map(t=>null)}else return this.states_}setStates(e){this.states_=e}computeOutputShape(e){Nw(e)&&(e=e[0]),e=e;let t=this.cell.stateSize;Array.isArray(t)||(t=[t]);const n=t[0];let s;if(this.returnSequences?s=[e[0],e[1],n]:s=[e[0],n],this.returnState){const i=[];for(const o of t)i.push([e[0],o]);return[s].concat(i)}else return s}computeMask(e,t){return ee(()=>{Array.isArray(t)&&(t=t[0]);const n=this.returnSequences?t:null;if(this.returnState){const s=this.states.map(i=>null);return[n].concat(s)}else return n})}get states(){if(this.states_==null){const e=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1,t=[];for(let n=0;na.shape[a.shape.length-1]),o))throw new j(`An initialState was passed that is not compatible with cell.stateSize. Received stateSpec=${this.stateSpec}; However cell.stateSize is ${this.cell.stateSize}`)}else this.stateSpec=o.map(a=>new fn({shape:[null,a]}));this.stateful&&this.resetStates()}resetStates(e,t=!1){ee(()=>{if(!this.stateful)throw new rr("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape[0];if(n==null)throw new j("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.states_==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(s=>ct([n,s])):this.states_=[ct([n,this.cell.stateSize])];else if(e==null)qe(this.states_),this.keptStates!=null&&(qe(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(s=>ct([n,s])):this.states_[0]=ct([n,this.cell.stateSize]);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new j(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);t===!0?this.keptStates.push(this.states_.slice()):qe(this.states_);for(let s=0;sRn(s.clone()))})}apply(e,t){let n=t==null?null:t.initialState,s=t==null?null:t.constants;t==null&&(t={});const i=ON(e,n,s,this.numConstants);e=i.inputs,n=i.initialState,s=i.constants;let o=[],a=[];if(n!=null){t.initialState=n,o=o.concat(n),this.stateSpec=[];for(const u of n)this.stateSpec.push(new fn({shape:u.shape}));a=a.concat(this.stateSpec)}s!=null&&(t.constants=s,o=o.concat(s),this.numConstants=s.length);const c=o[0]instanceof li;if(c){const u=[e].concat(o),p=this.inputSpec.concat(a),m=this.inputSpec;this.inputSpec=p;const y=super.apply(u,t);return this.inputSpec=m,y}else return super.apply(e,t)}call(e,t){return ee(()=>{const n=t==null?null:t.mask,s=t==null?null:t.training;let i=t==null?null:t.initialState;e=Xe(e),i==null&&(this.stateful?i=this.states_:i=this.getInitialState(e));const o=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;if(i.length!==o)throw new j(`RNN Layer has ${o} state(s) but was passed ${i.length} initial state(s).`);this.unroll&&console.warn("Ignoring unroll = true for RNN layer, due to imperative backend.");const a={training:s},c=(w,I)=>{const T=this.cell.call([w].concat(I),a);return[T[0],T.slice(1)]},u=EN(c,e,i,this.goBackwards,n,null,this.unroll,this.returnSequences),p=u[0],m=u[1],y=u[2];this.stateful&&this.resetStates(y,s);const b=this.returnSequences?m:p;return this.returnState?[b].concat(y):b})}getInitialState(e){return ee(()=>{let t=ct(e.shape);return t=Ue(t,[1,2]),t=Wh(t),Array.isArray(this.cell.stateSize)?this.cell.stateSize.map(n=>n>1?bw(t,[1,n]):t):this.cell.stateSize>1?[bw(t,[1,this.cell.stateSize])]:[t]})}get trainableWeights(){return this.trainable?this.cell.trainableWeights:[]}get nonTrainableWeights(){return this.trainable?this.cell.nonTrainableWeights:this.cell.weights}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.cell!=null&&this.cell.setFastWeightInitDuringBuild(e)}getConfig(){const e=super.getConfig(),t={returnSequences:this.returnSequences,returnState:this.returnState,goBackwards:this.goBackwards,stateful:this.stateful,unroll:this.unroll};this.numConstants!=null&&(t.numConstants=this.numConstants);const n=this.cell.getConfig();return this.getClassName()===Bi.className&&(t.cell={className:this.cell.getClassName(),config:n}),Object.assign({},n,e,t)}static fromConfig(e,t,n={}){const s=t.cell,i=hi(s,n);return new e(Object.assign(t,{cell:i}))}}Bi.className="RNN",ge(Bi);class oc extends lt{}class em extends oc{constructor(e){super(e);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,mn(this.units,"units"),this.activation=Jr(e.activation==null?this.DEFAULT_ACTIVATION:e.activation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Ft(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=_t(e.kernelRegularizer),this.recurrentRegularizer=_t(e.recurrentRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.kernelConstraint=ln(e.kernelConstraint),this.recurrentConstraint=ln(e.recurrentConstraint),this.biasConstraint=ln(e.biasConstraint),this.dropout=ec([1,qr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=ec([1,qr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){e=It(e),this.kernel=this.addWeight("kernel",[e[e.length-1],this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return ee(()=>{if(e=e,e.length!==2)throw new j(`SimpleRNNCell expects 2 input Tensors, got ${e.length}.`);let n=e[1];e=e[0];const s=t.training==null?!1:t.training;0Dn(e),rate:this.dropout,training:s})),0Dn(n),rate:this.recurrentDropout,training:s}));let i;const o=this.dropoutMask,a=this.recurrentDropoutMask;o!=null?i=Wi(X(e,o),this.kernel.read()):i=Wi(e,this.kernel.read()),this.bias!=null&&(i=$i(i,this.bias.read())),a!=null&&(n=X(n,a));let c=be(i,Wi(n,this.recurrentKernel.read()));return this.activation!=null&&(c=this.activation.apply(c)),[c,c]})}getConfig(){const e=super.getConfig(),t={units:this.units,activation:Xr(this.activation),useBias:this.useBias,kernelInitializer:Vt(this.kernelInitializer),recurrentInitializer:Vt(this.recurrentInitializer),biasInitializer:Vt(this.biasInitializer),kernelRegularizer:xt(this.kernelRegularizer),recurrentRegularizer:xt(this.recurrentRegularizer),biasRegularizer:xt(this.biasRegularizer),activityRegularizer:xt(this.activityRegularizer),kernelConstraint:cn(this.kernelConstraint),recurrentConstraint:cn(this.recurrentConstraint),biasConstraint:cn(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout};return Object.assign({},e,t)}}em.className="SimpleRNNCell",ge(em);class oL extends Bi{constructor(e){e.cell=new em(e),super(e)}call(e,t){return ee(()=>{this.cell.dropoutMask!=null&&(qe(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(qe(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return new e(t)}}oL.className="SimpleRNN",ge(oL);class tm extends oc{constructor(e){super(e);if(this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",e.resetAfter)throw new j("GRUCell does not support reset_after parameter set to true.");this.units=e.units,mn(this.units,"units"),this.activation=Jr(e.activation===void 0?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=Jr(e.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Ft(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=_t(e.kernelRegularizer),this.recurrentRegularizer=_t(e.recurrentRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.kernelConstraint=ln(e.kernelConstraint),this.recurrentConstraint=ln(e.recurrentConstraint),this.biasConstraint=ln(e.biasConstraint),this.dropout=ec([1,qr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=ec([1,qr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){e=It(e);const t=e[e.length-1];this.kernel=this.addWeight("kernel",[t,this.units*3],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*3],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units*3],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return ee(()=>{if(e=e,e.length!==2)throw new j(`GRUCell expects 2 input Tensors (inputs, h, c), got ${e.length}.`);const n=t.training==null?!1:t.training;let s=e[1];e=e[0],0Dn(e),rate:this.dropout,training:n,count:3})),0Dn(s),rate:this.recurrentDropout,training:n,count:3}));const i=this.dropoutMask,o=this.recurrentDropoutMask;let a,c,u;0{this.cell.dropoutMask!=null&&(qe(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(qe(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return t.implmentation===0&&(t.implementation=1),new e(t)}}aL.className="GRU",ge(aL);class Yh extends oc{constructor(e){super(e);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,mn(this.units,"units"),this.activation=Jr(e.activation===void 0?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=Jr(e.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Ft(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.unitForgetBias=e.unitForgetBias,this.kernelRegularizer=_t(e.kernelRegularizer),this.recurrentRegularizer=_t(e.recurrentRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.kernelConstraint=ln(e.kernelConstraint),this.recurrentConstraint=ln(e.recurrentConstraint),this.biasConstraint=ln(e.biasConstraint),this.dropout=ec([1,qr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=ec([1,qr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=[this.units,this.units],this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){var t;e=It(e);const n=e[e.length-1];this.kernel=this.addWeight("kernel",[n,this.units*4],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*4],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint);let s;if(this.useBias){if(this.unitForgetBias){const i=this.biasInitializer,o=this.units;s=new(t=class extends Ps{apply(c,u){const p=i.apply([o]),m=new Op().apply([o]),y=i.apply([o*2]);return Nv(Nv(p,m),y)}},t.className="CustomInit",t)}else s=this.biasInitializer;this.bias=this.addWeight("bias",[this.units*4],null,s,this.biasRegularizer,!0,this.biasConstraint)}else this.bias=null;this.built=!0}call(e,t){return ee(()=>{const n=t.training==null?!1:t.training;if(e=e,e.length!==3)throw new j(`LSTMCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);let s=e[1];const i=e[2];e=e[0],0Dn(e),rate:this.dropout,training:n,count:4})),0Dn(s),rate:this.recurrentDropout,training:n,count:4}));const o=this.dropoutMask,a=this.recurrentDropoutMask;let c,u,p,m;0{this.cell.dropoutMask!=null&&(qe(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(qe(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return t.implmentation===0&&(t.implementation=1),new e(t)}}cL.className="LSTM",ge(cL);class nm extends oc{constructor(e){super(e);this.cells=e.cells}get stateSize(){const e=[];for(const t of this.cells.slice().reverse())Array.isArray(t.stateSize)?e.push(...t.stateSize):e.push(t.stateSize);return e}call(e,t){return ee(()=>{e=e;let n=e.slice(1);const s=[];for(const a of this.cells.slice().reverse())Array.isArray(a.stateSize)?s.push(n.splice(0,a.stateSize.length)):s.push(n.splice(0,1));s.reverse();const i=[];let o;for(let a=0;a{Po(`RNNCell_${s}`,()=>{n.build(e),Array.isArray(n.stateSize)?t=n.stateSize[0]:t=n.stateSize,e=[e[0],t]})}),this.built=!0}getConfig(){const e=super.getConfig(),t=i=>({className:i.getClassName(),config:i.getConfig()}),n=this.cells.map(t),s={cells:n};return Object.assign({},e,s)}static fromConfig(e,t,n={}){const s=[];for(const i of t.cells)s.push(hi(i,n));return new e({cells:s})}get trainableWeights(){if(!this.trainable)return[];const e=[];for(const t of this.cells)e.push(...t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.cells)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const n of this.cells)t.push(...n.trainableWeights);return t.concat(e)}return e}getWeights(){const e=[];for(const t of this.cells)e.push(...t.weights);return Cw(e)}setWeights(e){const t=[];for(const n of this.cells){const s=n.weights.length,i=e.splice(s);for(let o=0;oRv(t(),n),a=()=>Uh(o,t,s);if(!i||i<=1)return Rn(a().clone());const c=Array(i).fill(void 0).map(a);return c.map(u=>Rn(u.clone()))}var cG=function(e,t){var n={};for(var s in e)Object.prototype.hasOwnProperty.call(e,s)&&t.indexOf(s)<0&&(n[s]=e[s]);if(e!=null&&typeof Object.getOwnPropertySymbols=="function")for(var i=0,s=Object.getOwnPropertySymbols(e);i{if(this.cell.dropoutMask!=null&&(qe(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(qe(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null),t&&t.constants)throw new j("ConvRNN2D cell does not support constants");const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}computeOutputShape(e){let t=this.computeSingleOutputShape(e);return this.returnSequences||(t=[t[0],...t.slice(2)]),this.returnState&&(t=[t,...Array(2).fill([e[0],...t.slice(-3)])]),t}getInitialState(e){return ee(()=>{const{stateSize:t}=this.cell,n=e.shape,s=this.computeSingleOutputShape(n),i=[s[0],...s.slice(2)],o=ct(i);return Array.isArray(t)?Array(t.length).fill(o):[o]})}resetStates(e,t=!1){ee(()=>{if(!this.stateful)throw new rr("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape,s=this.computeSingleOutputShape(n),i=[s[0],...s.slice(2)],o=n[0];if(o==null)throw new j("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.getStates()==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>ct(i)):this.states_=[ct(i)];else if(e==null)qe(this.states_),this.keptStates!=null&&(qe(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>ct(i)):this.states_[0]=ct(i);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new j(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);t?this.keptStates.push(this.states_.slice()):qe(this.states_);for(let a=0;aRn(a.clone()))})}computeSingleOutputShape(e){const{dataFormat:t,filters:n,kernelSize:s,padding:i,strides:o,dilationRate:a}=this.cell,c=t==="channelsFirst",u=e[c?3:2],p=e[c?4:3],m=ui(u,s[0],i,o[0],a[0]),y=ui(p,s[1],i,o[1],a[1]),b=[...e.slice(0,2),...c?[n,m,y]:[m,y,n]];return b}}DN.className="ConvRNN2D";class sm extends Yh{constructor(e){const{filters:t,kernelSize:n,strides:s,padding:i,dataFormat:o,dilationRate:a}=e;super(Object.assign({},e,{units:t}));this.filters=t,mn(this.filters,"filters"),this.kernelSize=rc(n,2,"kernelSize"),this.kernelSize.forEach(c=>mn(c,"kernelSize")),this.strides=rc(s||1,2,"strides"),this.strides.forEach(c=>mn(c,"strides")),this.padding=i||"valid",Ns(this.padding),this.dataFormat=o||"channelsLast",Gt(this.dataFormat),this.dilationRate=rc(a||1,2,"dilationRate"),this.dilationRate.forEach(c=>mn(c,"dilationRate"))}build(e){var t;e=It(e);const n=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[n]==null)throw new j(`The channel dimension of the input should be defined. Found ${e[n]}`);const s=e[n],i=4,o=this.kernelSize.concat([s,this.filters*i]);this.kernel=this.addWeight("kernel",o,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint);const a=this.kernelSize.concat([this.filters,this.filters*i]);if(this.recurrentKernel=this.addWeight("recurrent_kernel",a,null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){let c;if(this.unitForgetBias){const u=this.biasInitializer,p=this.filters;c=new(t=class extends Ps{apply(y,b){const w=u.apply([p]),I=si([p]),T=u.apply([p*2]);return yw([w,I,T])}},t.className="CustomInit",t)}else c=this.biasInitializer;this.bias=this.addWeight("bias",[this.filters*i],null,c,this.biasRegularizer,!0,this.biasConstraint)}this.built=!0}call(e,t){return ee(()=>{if(e.length!==3)throw new j(`ConvLSTM2DCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);const n=t.training||!1,s=e[0],i=e[1],o=e[2],a=4;0Dn(s),rate:this.dropout,training:n,count:a}));const c=this.dropoutMask,u=(Ie,Se,Ee)=>!Se||!Se[Ee]?Ie:X(Se[Ee],Ie);let p=u(s,c,0),m=u(s,c,1),y=u(s,c,2),b=u(s,c,3);0Dn(i),rate:this.recurrentDropout,training:n,count:a}));const w=this.recurrentDropoutMask;let I=u(i,w,0),T=u(i,w,1),v=u(i,w,2),N=u(i,w,3);const E=3,[D,F,_,B]=os(this.kernel.read(),a,E),[U,Y,q,J]=this.useBias?os(this.bias.read(),a):[null,null,null,null];p=this.inputConv(p,D,U,this.padding),m=this.inputConv(m,F,Y,this.padding),y=this.inputConv(y,_,q,this.padding),b=this.inputConv(b,B,J,this.padding);const[oe,ce,ue,he]=os(this.recurrentKernel.read(),a,E);I=this.recurrentConv(I,oe),T=this.recurrentConv(T,ce),v=this.recurrentConv(v,ue),N=this.recurrentConv(N,he);const pe=this.recurrentActivation.apply(be(p,I)),le=this.recurrentActivation.apply(be(m,T)),ye=be(X(le,o),X(pe,this.activation.apply(be(y,v)))),me=X(this.recurrentActivation.apply(be(b,N)),this.activation.apply(ye));return[me,me,ye]})}getConfig(){const e=super.getConfig(),{units:t}=e,n=cG(e,["units"]),s={filters:this.filters,kernelSize:this.kernelSize,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,strides:this.strides};return Object.assign({},n,s)}inputConv(e,t,n,s){const i=er(e,t,this.strides,s||"valid",this.dataFormat==="channelsFirst"?"NCHW":"NHWC",this.dilationRate);return n?$i(i,n,this.dataFormat):i}recurrentConv(e,t){const n=1;return er(e,t,n,"same",this.dataFormat==="channelsFirst"?"NCHW":"NHWC")}}sm.className="ConvLSTM2DCell",ge(sm);class lL extends DN{constructor(e){const t=new sm(e);super(Object.assign({},e,{cell:t}))}static fromConfig(e,t){return new e(t)}}lL.className="ConvLSTM2D",ge(lL);class im extends lt{constructor(e){super(e);this.rate=Math.max(Math.min(e.rate,1),0),this.noiseShape=e.noiseShape,this.seed=e.seed,this.supportsMasking=!0}getNoiseShape(e){if(this.noiseShape==null)return this.noiseShape;const t=e.shape,n=[];for(let s=0;s{this.invokeCallHook(e,t);const n=Xe(e);if(0Rv(n,this.rate,i,this.seed),()=>n,s);return o}return e})}getConfig(){const e={rate:this.rate,noiseShape:this.noiseShape,seed:this.seed},t=super.getConfig();return Object.assign(e,t),e}dispose(){return super.dispose()}}im.className="Dropout",ge(im);class hL extends im{constructor(e){super(e);this.inputSpec=[{ndim:3}]}getNoiseShape(e){const t=e.shape;return[t[0],1,t[2]]}}hL.className="SpatialDropout1D",ge(hL);class uL extends lt{constructor(e){super(e);if(this.activation=null,this.useBias=!0,this.kernel=null,this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",e.batchInputShape==null&&e.inputShape==null&&e.inputDim!=null){let t=null;e.batchSize!=null&&(t=e.batchSize),this.batchInputShape=[t,e.inputDim]}this.units=e.units,mn(this.units,"units"),this.activation=Jr(e.activation),e.useBias!=null&&(this.useBias=e.useBias),this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelConstraint=ln(e.kernelConstraint),this.biasConstraint=ln(e.biasConstraint),this.kernelRegularizer=_t(e.kernelRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.activityRegularizer=_t(e.activityRegularizer),this.supportsMasking=!0,this.inputSpec=[{minNDim:2}]}build(e){e=It(e);const t=e[e.length-1];this.kernel==null&&(this.kernel=this.addWeight("kernel",[t,this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint))),this.inputSpec=[{minNDim:2,axes:{[-1]:t}}],this.built=!0}computeOutputShape(e){e=It(e);const t=e.slice();return t[t.length-1]=this.units,t}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=bv(this.activation.getClassName());let i;return s!=null?i=Wi(n,this.kernel.read(),s,this.bias?this.bias.read():null):(i=Wi(n,this.kernel.read()),this.bias!=null&&(i=$i(i,this.bias.read())),this.activation!=null&&(i=this.activation.apply(i))),i})}getConfig(){const e={units:this.units,activation:Xr(this.activation),useBias:this.useBias,kernelInitializer:Vt(this.kernelInitializer),biasInitializer:Vt(this.biasInitializer),kernelRegularizer:xt(this.kernelRegularizer),biasRegularizer:xt(this.biasRegularizer),activityRegularizer:xt(this.activityRegularizer),kernelConstraint:cn(this.kernelConstraint),biasConstraint:cn(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}uL.className="Dense",ge(uL);class dL extends lt{constructor(e){e=e||{},super(e),this.inputSpec=[{minNDim:3}],this.dataFormat=e.dataFormat}computeOutputShape(e){e=It(e);for(const t of e.slice(1))if(t==null)throw new j(`The shape of the input to "Flatten" is not fully defined (got ${e.slice(1)}). Make sure to pass a complete "input_shape" or "batch_input_shape" argument to the first layer in your model.`);return[e[0],Yr(e,1)]}call(e,t){return ee(()=>{this.invokeCallHook(e,t);let n=Xe(e);if(this.dataFormat==="channelsFirst"&&n.rank>1){const s=[0];for(let i=2;i{this.invokeCallHook(e,t);const n=Xe(e);return this.activation.apply(n)})}getConfig(){const e={activation:Xr(this.activation)},t=super.getConfig();return Object.assign(e,t),e}}pL.className="Activation",ge(pL);class mL extends lt{constructor(e){super(e);this.n=e.n,this.inputSpec=[{ndim:2}]}computeOutputShape(e){return[e[0],this.n,e[1]]}call(e,t){return ee(()=>(e=Xe(e),Rz(e,this.n)))}getConfig(){const e={n:this.n},t=super.getConfig();return Object.assign(e,t),e}}mL.className="RepeatVector",ge(mL);class fL extends lt{constructor(e){super(e);this.targetShape=e.targetShape;for(let t=0;t{this.invokeCallHook(e,t);const n=Xe(e),s=n.shape,i=s.slice(0,1).concat(this.fixUnknownDimension(s.slice(1),this.targetShape));return n.reshape(i)})}getConfig(){const e={targetShape:this.targetShape},t=super.getConfig();return Object.assign(e,t),e}}fL.className="Reshape",ge(fL);class gL extends lt{constructor(e){super(e);if(e.dims==null)throw new Error("Required configuration field `dims` is missing during Permute constructor call.");if(!Array.isArray(e.dims))throw new Error(`Permute constructor requires \`dims\` to be an Array, but received ${e.dims} instead.`);const t=ai(1,e.dims.length+1);if(!ot(e.dims.slice().sort(),t))throw new Error("Invalid permutation `dims`: "+JSON.stringify(e.dims)+" `dims` must contain consecutive integers starting from 1.");this.dims=e.dims,this.dimsIncludingBatch=[0].concat(this.dims),this.inputSpec=[new fn({ndim:this.dims.length+1})]}computeOutputShape(e){e=It(e);const t=e.slice();return this.dims.forEach((n,s)=>{t[s+1]=e[n]}),t}call(e,t){return Pe(Xe(e),this.dimsIncludingBatch)}getConfig(){const e={dims:this.dims},t=super.getConfig();return Object.assign(e,t),e}}gL.className="Permute",ge(gL);class yL extends lt{constructor(e){super(e==null?{}:e);this.supportsMasking=!0,e!=null?this.maskValue=e.maskValue==null?0:e.maskValue:this.maskValue=0}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={maskValue:this.maskValue};return Object.assign(t,e),t}computeMask(e,t){const n=Xe(e),s=-1;return th(Pr(n,this.maskValue),s)}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=-1,i=!0,o=th(Pr(n,this.maskValue),s,i),a=n.mul(o.asType(n.dtype));return a})}}yL.className="Masking",ge(yL);class bL extends lt{constructor(e){super(e);if(this.embeddings=null,this.DEFAULT_EMBEDDINGS_INITIALIZER="randomUniform",e.batchInputShape==null&&e.inputShape==null){let t=null;e.batchSize!=null&&(t=e.batchSize),e.inputLength==null?this.batchInputShape=[t,null]:this.batchInputShape=[t].concat(Nt(e.inputLength))}this.inputDim=e.inputDim,mn(this.inputDim,"inputDim"),this.outputDim=e.outputDim,mn(this.outputDim,"outputDim"),this.embeddingsInitializer=Ft(e.embeddingsInitializer||this.DEFAULT_EMBEDDINGS_INITIALIZER),this.embeddingsRegularizer=_t(e.embeddingsRegularizer),this.activityRegularizer=_t(e.activityRegularizer),this.embeddingsConstraint=ln(e.embeddingsConstraint),this.maskZero=e.maskZero,this.supportsMasking=e.maskZero,this.inputLength=e.inputLength}build(e){this.embeddings=this.addWeight("embeddings",[this.inputDim,this.outputDim],this.dtype,this.embeddingsInitializer,this.embeddingsRegularizer,!0,this.embeddingsConstraint),this.built=!0}warnOnIncompatibleInputShape(e){}computeMask(e,t){return ee(()=>this.maskZero?(e=Xe(e),Pr(e,et(e))):null)}computeOutputShape(e){if(e=It(e),this.inputLength==null)return[...e,this.outputDim];const t=Nt(this.inputLength);if(t.length!==e.length-1)throw new j(`"inputLength" is ${this.inputLength}, but received input shape has shape ${e}`);{let n=0;for(let s=0;s{this.invokeCallHook(e,t);let n=Xe(e);n.dtype!=="int32"&&(n=_h(n,"int32"));const s=Cv(this.embeddings.read(),n.as1D());return s.reshape(It(this.computeOutputShape(n.shape)))})}getConfig(){const e={inputDim:this.inputDim,outputDim:this.outputDim,embeddingsInitializer:Vt(this.embeddingsInitializer),embeddingsRegularizer:xt(this.embeddingsRegularizer),activityRegularizer:xt(this.activityRegularizer),embeddingsConstraint:cn(this.embeddingsConstraint),maskZero:this.maskZero,inputLength:this.inputLength},t=super.getConfig();return Object.assign(e,t),e}}bL.className="Embedding",ge(bL);class Ho extends lt{constructor(e){super(e||{});this.supportsMasking=!0}mergeFunction(e){throw new Ge}computeElementwiseOpOutputShape(e,t){if(e==null||t==null)return null;if(e.length1)throw new j(`Can not merge tensors with different batch sizes. Got tensors with shapes: ${JSON.stringify(e)}.`);let n=e[0]==null?null:e[0].slice(1);for(let i=1;ii.length);e.indexOf(null)===-1&&Hr(s).length===1?this.reshapeRequired=!1:this.reshapeRequired=!0}call(e,t){return ee(()=>{if(e=e,this.reshapeRequired){const n=[],s=e.map(i=>i.rank);if(s.indexOf(null)===-1){const i=qr(s);for(let o of e){const a=o.rank;for(let c=0;c1){const p=ai(1,u).concat([0]);n.push(Pe(c,p)),i=!0}else n.push(c)}let o=this.mergeFunction(n);const a=o.rank;if(i){if(a==null){const c=o.shape,u=c.length,p=c[u-1],m=[p].concat(c.slice(0,c.length-1));o=Pe(o.reshape([-1,p]),[1,0]).reshape(m)}else if(a>1){const c=[a-1].concat(ai(0,a-1));o=Pe(o,c)}}return o}}else return this.mergeFunction(e)})}computeOutputShape(e){e=e;let t;e[0]==null?t=null:t=e[0].slice(1);for(let s=1;s{if(t==null)return null;if(!Array.isArray(t))throw new j("`mask` should be an Array");if(!Array.isArray(e))throw new j("`inputs` should be an Array");if(t.length!==e.length)throw new j(`The Array 'inputs' and 'mask' are expected to have the same length, but have different lengths (${e.length} vs ${t.length})`);if(t.every(s=>s==null))return null;t=t.map(s=>s==null?s:Kn(s,0));let n=t[0];for(let s=1;s{let t=e[0].clone();for(let n=1;n{let t=e[0].clone();for(let n=1;n{let t=e[0].clone();for(let n=1;n{let t=e[0];for(let n=1;n{let t=e[0];for(let n=1;n1)throw new j("A `Concatenate` layer requires inputs with matching shapes except for the concat axis. Got input shapes: "+JSON.stringify(e))}mergeFunction(e){return ee(()=>yw(e,this.axis))}computeOutputShape(e){if(!(Array.isArray(e)&&Array.isArray(e[0])))throw new j("A `Concatenate` layer should be called on a list of inputs.");const t=e,n=t[0].slice(),s=this.axis<0?n.length+this.axis:this.axis;for(const i of t.slice(1)){if(n[s]==null||i[s]==null){n[s]=null;break}n[s]+=i[s]}return n}computeMask(e,t){if(t==null)return null;if(!Array.isArray(t))throw new j("`mask` should be an array for Concatenate");if(!Array.isArray(e))throw new j("`inputs` should be an array for Concatenate");if(t.length!==e.length)throw new j(`Mismatch in the length of mask (${t.length}) and the legnth of inputs (${e.length})`);return ee(()=>{let n=!0;if(t.forEach(o=>{if(o!=null){n=!1;return}}),n)return null;const s=[];for(let o=0;o3||t.shape.length>3)throw new Ge("batchDot is not implemented for tensors of 4D or higher rank yet");if(k(e.shape.length>=2,()=>`batchDot requires the rank of x to be >= 2, but got ${e.shape.length}`),k(e.shape.length>=2,()=>`batchDot requires the rank of y to be >= 2, but got ${t.shape.length}`),typeof n=="number"&&(n=[n,n]),e.dtype==="complex64"||t.dtype==="complex64")throw new Ge("batchDot is not implemented for complex64-type Tensors yet.");const s=e.shape.length,i=t.shape.length;n==null&&(n=[s-1,i-2]);const o=n;return ee(()=>{let a;if(s>i){a=s-i;const u=[];for(let p=0;ps){a=i-s;const u=[];for(let p=0;p0){let u;s>i?u=s+i-3:u=s-1;const p=[];for(let m=u;m"A `Dot` layer should be called on a list of exactly 2 inputs.");const t=e[0],n=e[1];if(t.length>3||n.length>3)throw new Ge("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);if(t[s[0]]!==n[s[1]])throw new j(`Dimension incompatibility: ${t[s[0]]} !== ${n[s[1]]}`)}mergeFunction(e){if(e.length!==2)throw new j(`A \`Dot\` layer must be called on exactly 2 inputs, but received ${e.length} input(s).`);let t=e[0],n=e[1],s;return Array.isArray(this.axes)?s=this.axes.map((i,o)=>Qh(i,e[o].shape.length)):s=[Qh(this.axes,t.shape.length),Qh(this.axes,n.shape.length)],this.normalize&&(t=zp(t,s[0]),n=zp(n,s[1])),lG(t,n,s)}interpretAxes(e,t){let n;return Array.isArray(this.axes)?n=this.axes:n=[Qh(this.axes,e.length),Qh(this.axes,t.length)],n}computeOutputShape(e){k(Array.isArray(e)&&e.length===2&&Array.isArray(e[0])&&Array.isArray(e[1]),()=>"A `Dot` layer should be called on a list of exactly 2 inputs.");const t=e[0].slice(),n=e[1].slice();if(t.length>3||n.length>3)throw new Ge("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);t.splice(s[0],1),n.splice(s[1],1),n.splice(0,1);const i=t.concat(n);return i.length===1&&i.push(1),i}computeMask(e,t){return null}getConfig(){const e={axes:this.axes,normalize:this.normalize},t=super.getConfig();return Object.assign(e,t),e}}wL.className="Dot",ge(wL);class LL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.stddev=e.stddev}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={stddev:this.stddev};return Object.assign(t,e),t}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=()=>Rp(n.shape,0,this.stddev).add(n),i=Uh(s,()=>n,t.training||!1);return i})}}LL.className="GaussianNoise",ge(LL);class SL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.rate=e.rate}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=Xe(e);if(this.rate>0&&this.rate<1){const s=()=>{const i=Math.sqrt(this.rate/(1-this.rate));return n.mul(Rp(n.shape,1,i))};return Uh(s,()=>n,t.training||!1)}return n})}}SL.className="GaussianDropout",ge(SL);class IL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.rate=e.rate,this.noiseShape=e.noiseShape}_getNoiseShape(e){return this.noiseShape||Xe(e).shape}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return ee(()=>{if(this.rate<1&&this.rate>0){const n=this._getNoiseShape(e),s=()=>{const i=Xe(e),o=1.6732632423543772,a=1.0507009873554805,c=-o*a;let u=tr(_o(n),this.rate);u=_h(u,"float32");const p=((1-this.rate)*(1+this.rate*c**2))**-.5,m=-p*c*this.rate,y=i.mul(u).add(u.add(-1).mul(c));return y.mul(p).add(m)};return Uh(s,()=>Xe(e),t.training||!1)}return e})}}IL.className="AlphaDropout",ge(IL);function eu(e,t,n,s,i,o=.001){let a;if(e.rank===2)a=nA(e,t,n,s,i,o);else if(e.rank===3)a=sA(e,t,n,s,i,o);else if(e.rank===4)a=iA(e,t,n,s,i,o);else throw new Ge(`batchNormalization is not implemented for array of rank ${e.rank} yet`);return a}function hG(e,t,n,s,i=.001){return ee(()=>{const o=np(e,s),a=o.mean,c=o.variance,u=eu(e,a,c,n,t,i);return[u,a,c]})}function uG(e,t,n,s,i=.001){return ee(()=>{const o=np(e,s),a=o.mean,c=o.variance,u=[];for(const I of ai(0,e.rank))s.indexOf(I)!==-1?u.push(1):u.push(e.shape[I]);const p=a.reshape(u),m=c.reshape(u),y=t==null?null:t.reshape(u),b=n==null?null:n.reshape(u),w=eu(e,p,m,b,y,i);return[w,a,c]})}function dG(e,t,n,s,i=.001){return ot(s.slice().sort(),ai(0,e.rank-1))?hG(e,t,n,s,i):uG(e,t,n,s,i)}class xL extends lt{constructor(e){e==null&&(e={}),super(e),this.supportsMasking=!0,this.axis=e.axis==null?-1:e.axis,this.momentum=e.momentum==null?.99:e.momentum,this.epsilon=e.epsilon==null?.001:e.epsilon,this.center=e.center==null?!0:e.center,this.scale=e.scale==null?!0:e.scale,this.betaInitializer=Ft(e.betaInitializer||"zeros"),this.gammaInitializer=Ft(e.gammaInitializer||"ones"),this.movingMeanInitializer=Ft(e.movingMeanInitializer||"zeros"),this.movingVarianceInitializer=Ft(e.movingVarianceInitializer||"ones"),this.betaConstraint=ln(e.betaConstraint),this.gammaConstraint=ln(e.gammaConstraint),this.betaRegularizer=_t(e.betaRegularizer),this.gammaRegularizer=_t(e.gammaRegularizer)}build(e){e=It(e);const t=this.axis>=0?this.axis:this.axis+e.length,n=e[t];if(n==null)throw new j(`Axis ${t} of input tensor should have a defined dimension but the layer received an input with shape ${JSON.stringify(e)}.`);this.inputSpec=[new fn({ndim:e.length,axes:{[t]:n}})];const s=[n];this.scale&&(this.gamma=this.addWeight("gamma",s,null,this.gammaInitializer,this.gammaRegularizer,!0,this.gammaConstraint)),this.center&&(this.beta=this.addWeight("beta",s,null,this.betaInitializer,this.betaRegularizer,!0,this.betaConstraint)),this.movingMean=this.addWeight("moving_mean",s,null,this.movingMeanInitializer,null,!1),this.movingVariance=this.addWeight("moving_variance",s,null,this.movingVarianceInitializer,null,!1),this.built=!0}call(e,t){return ee(()=>{const n=t.training==null?!1:t.training,s=Xe(e),i=s.shape,o=i.length,a=ai(0,o),c=this.axis>=0?this.axis:this.axis+o;a.splice(c,1);const u=Bo(1,o);u[c]=i[c];const p=a.slice();p.sort();const m=!ot(p,ai(0,o).slice(0,o-1)),y=()=>{if(m){const N=this.movingMean.read().reshape(u),E=this.movingVariance.read().reshape(u),D=this.center?this.beta.read().reshape(u):null,F=this.scale?this.gamma.read().reshape(u):null;return eu(s,N,E,D,F,this.epsilon)}else return eu(s,this.movingMean.read(),this.movingVariance.read(),this.beta==null?null:this.beta.read(),this.gamma==null?null:this.gamma.read(),this.epsilon)};if(!n)return y();const[b,w,I]=dG(s,this.gamma.read(),this.beta.read(),a,this.epsilon),T=(N,E,D)=>{ee(()=>{const F=1-D,_=N.read(),B=_.sub(E).mul(F);N.write(_.sub(B))})},v=()=>{T(this.movingMean,w,this.momentum),T(this.movingVariance,I,this.momentum)};return v(),b})}getConfig(){const e={axis:this.axis,momentum:this.momentum,epsilon:this.epsilon,center:this.center,scale:this.scale,betaInitializer:Vt(this.betaInitializer),gammaInitializer:Vt(this.gammaInitializer),movingMeanInitializer:Vt(this.movingMeanInitializer),movingVarianceInitializer:Vt(this.movingVarianceInitializer),betaRegularizer:xt(this.betaRegularizer),gammaRegularizer:xt(this.gammaRegularizer),betaConstraint:cn(this.betaConstraint),gammaConstraint:cn(this.gammaConstraint)},t=super.getConfig();return Object.assign(e,t),e}}xL.className="BatchNormalization",ge(xL);class TL extends lt{constructor(e){if(e==null&&(e={}),super(e),this.axis=e.axis==null?-1:e.axis,typeof this.axis=="number"){if(!Number.isInteger(this.axis))throw new Error(`Expected axis to be an integer, but received ${this.axis}`)}else if(Array.isArray(this.axis)){for(const t of this.axis)if(!Number.isInteger(t))throw new Error(`Expected axis to be an array of integers, but received ${JSON.stringify(this.axis)}`)}else throw new Error(`Expected axis to be an integer or an array of integers, but received ${JSON.stringify(this.axis)}`);this.epsilon=e.epsilon==null?.001:e.epsilon,this.center=e.center==null?!0:e.center,this.scale=e.scale==null?!0:e.scale,this.betaInitializer=Ft(e.betaInitializer||"zeros"),this.gammaInitializer=Ft(e.gammaInitializer||"ones"),this.betaRegularizer=_t(e.betaRegularizer),this.gammaRegularizer=_t(e.gammaRegularizer),this.supportsMasking=!0}build(e){e=It(e);const t=e.length;typeof this.axis=="number"&&(this.axis=[this.axis]);for(let i=0;i=t)throw new Error(`Invalid axis: ${i}`);if(this.axis.length!==Hr(this.axis).length)throw new Error(`Found duplicate axes in: ${this.axis}`);const n=this.axis.map(i=>e[i]),s=!0;this.scale?this.gamma=this.addWeight("gamma",n,"float32",this.gammaInitializer,this.gammaRegularizer,s):this.gamma=null,this.center?this.beta=this.addWeight("beta",n,"float32",this.betaInitializer,this.betaRegularizer,s):this.beta=null,this.built=!0}call(e,t){const n=Xe(e),s=n.shape,i=s.length;return ee(()=>{const o=!0;let{mean:a,variance:c}=np(n,this.axis,o);const u=Bo(1,i);for(const I of this.axis)u[I]=s[I];const p=I=>I!=null&&I.shape.length!==i&&this.axis!==[i-1]?I.reshape(u):I;let m=p(this.gamma.read()),y=p(this.beta.read());const b=[],w=[];for(let I=0;I{if(e.rank!==3)throw new j(`temporalPadding expects input tensor to be 3-D, but received a ${e.rank}-D tensor.`);if(t==null&&(t=[1,1]),t.length!==2)throw new j(`temporalPadding expects input padding pattern to be a length-2 array, but received a length-${t.length} array.`);const n=[[0,0],t,[0,0]];return ki(e,n)})}function pG(e,t,n){return ee(()=>{if(e.rank!==4)throw new j(`temporalPadding expects input tensor to be 4-D, but received a ${e.rank}-D tensor.`);if(t==null&&(t=[[1,1],[1,1]]),t.length!==2||t[0].length!==2||t[1].length!==2)throw new j("spatial2dPadding expects `padding` to be an Array of two Arrays, each of which is an Array of two integers.");if(n==null&&(n=ri()),n!=="channelsLast"&&n!=="channelsFirst")throw new j(`Unknown data format: ${n}. Supported data formats are 'channelsLast' and 'channelsFirst.`);let s;return n==="channelsFirst"?s=[[0,0],[0,0],t[0],t[1]]:s=[[0,0],t[0],t[1],[0,0]],ki(e,s)})}class AL extends lt{constructor(e){if(e==null&&(e={}),super(e),this.dataFormat=e.dataFormat==null?ri():e.dataFormat,e.padding==null)this.padding=[[1,1],[1,1]];else if(typeof e.padding=="number")this.padding=[[e.padding,e.padding],[e.padding,e.padding]];else{if(e.padding=e.padding,e.padding.length!==2)throw new j(`ZeroPadding2D expects padding to be a length-2 array, but received a length-${e.padding.length} array.`);let t,n;if(typeof e.padding[0]=="number")t=[e.padding[0],e.padding[0]],n=[e.padding[1],e.padding[1]];else{if(e.padding=e.padding,e.padding[0].length!==2)throw new j(`ZeroPadding2D expects height padding to be a length-2 array, but received a length-${e.padding[0].length} array.`);if(t=e.padding[0],e.padding[1].length!==2)throw new j(`ZeroPadding2D expects width padding to be a length-2 array, but received a length-${e.padding[1].length} array.`);n=e.padding[1]}this.padding=[t,n]}this.inputSpec=[new fn({ndim:4})]}computeOutputShape(e){e=It(e);let t,n;return this.dataFormat==="channelsFirst"?(e[2]!=null&&e[2]>=0?t=e[2]+this.padding[0][0]+this.padding[0][1]:t=null,e[3]!=null&&e[3]>=0?n=e[3]+this.padding[1][0]+this.padding[1][1]:n=null,[e[0],e[1],t,n]):(e[1]!=null&&e[1]>=0?t=e[1]+this.padding[0][0]+this.padding[0][1]:t=null,e[2]!=null&&e[2]>=0?n=e[2]+this.padding[1][0]+this.padding[1][1]:n=null,[e[0],t,n,e[3]])}call(e,t){return ee(()=>pG(Xe(e),this.padding,this.dataFormat))}getConfig(){const e={padding:this.padding,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}AL.className="ZeroPadding2D",ge(AL);function rm(e,t,n,s,i,o){return ee(()=>{Gt(i),Sv(o),Ns(s),n==null&&(n=[1,1]),s==null&&(s="valid"),i==null&&(i=ri()),o==null&&(o="max"),e=Zw(e,i);let a;const c=s==="same"?"same":"valid";return o==="max"?a=mh(e,t,n,c):a=oh(e,t,n,c),i==="channelsFirst"&&(a=Pe(a,[0,3,1,2])),a})}function kN(e,t,n,s,i,o){return ee(()=>{Gt(i),Sv(o),Ns(s),n==null&&(n=[1,1,1]),s==null&&(s="valid"),i==null&&(i=ri()),o==null&&(o="max"),e=vN(e,i);let a;const c=s==="same"?"same":"valid";return o==="max"?a=Nb(e,t,n,c):a=pb(e,t,n,c),i==="channelsFirst"&&(a=Pe(a,[0,4,1,2,3])),a})}class FN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=2),super(e),typeof e.poolSize=="number")this.poolSize=[e.poolSize];else if(Array.isArray(e.poolSize)&&e.poolSize.length===1&&typeof e.poolSize[0]=="number")this.poolSize=e.poolSize;else throw new j(`poolSize for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.poolSize)}`);if(mn(this.poolSize,"poolSize"),e.strides==null)this.strides=this.poolSize;else if(typeof e.strides=="number")this.strides=[e.strides];else if(Array.isArray(e.strides)&&e.strides.length===1&&typeof e.strides[0]=="number")this.strides=e.strides;else throw new j(`strides for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.strides)}`);mn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,Ns(this.padding),this.inputSpec=[new fn({ndim:3})]}computeOutputShape(e){e=It(e);const t=ui(e[1],this.poolSize[0],this.padding,this.strides[0]);return[e[0],t,e[2]]}call(e,t){return ee(()=>{this.invokeCallHook(e,t),e=Wh(Xe(e),2);const n=this.poolingFunction(Xe(e),[this.poolSize[0],1],[this.strides[0],1],this.padding,"channelsLast");return zr(n,[2])})}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides},t=super.getConfig();return Object.assign(e,t),e}}class vL extends FN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),rm(e,t,n,s,i,"max")}}vL.className="MaxPooling1D",ge(vL);class NL extends FN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),rm(e,t,n,s,i,"avg")}}NL.className="AveragePooling1D",ge(NL);class _N extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=[2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize],e.strides==null)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(e.strides.length!==2)throw new j(`If the strides property of a 2D pooling layer is an Array, it is expected to have a length of 2, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides];mn(this.poolSize,"poolSize"),mn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,Gt(this.dataFormat),Ns(this.padding),this.inputSpec=[new fn({ndim:4})]}computeOutputShape(e){e=It(e);let t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2];return t=ui(t,this.poolSize[0],this.padding,this.strides[0]),n=ui(n,this.poolSize[1],this.padding,this.strides[1]),this.dataFormat==="channelsFirst"?[e[0],e[1],t,n]:[e[0],t,n,e[3]]}call(e,t){return ee(()=>(this.invokeCallHook(e,t),this.poolingFunction(Xe(e),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class CL extends _N{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),rm(e,t,n,s,i,"max")}}CL.className="MaxPooling2D",ge(CL);class RL extends _N{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),rm(e,t,n,s,i,"avg")}}RL.className="AveragePooling2D",ge(RL);class WN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=[2,2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize,e.poolSize],e.strides==null)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(e.strides.length!==3)throw new j(`If the strides property of a 3D pooling layer is an Array, it is expected to have a length of 3, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides,e.strides];mn(this.poolSize,"poolSize"),mn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,Gt(this.dataFormat),Ns(this.padding),this.inputSpec=[new fn({ndim:5})]}computeOutputShape(e){e=It(e);let t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2],s=this.dataFormat==="channelsFirst"?e[4]:e[3];return t=ui(t,this.poolSize[0],this.padding,this.strides[0]),n=ui(n,this.poolSize[1],this.padding,this.strides[1]),s=ui(s,this.poolSize[2],this.padding,this.strides[2]),this.dataFormat==="channelsFirst"?[e[0],e[1],t,n,s]:[e[0],t,n,s,e[4]]}call(e,t){return ee(()=>(this.invokeCallHook(e,t),this.poolingFunction(Xe(e),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class OL extends WN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),kN(e,t,n,s,i,"max")}}OL.className="MaxPooling3D",ge(OL);class EL extends WN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),kN(e,t,n,s,i,"avg")}}EL.className="AveragePooling3D",ge(EL);class $N extends lt{constructor(e){super(e);this.inputSpec=[new fn({ndim:3})]}computeOutputShape(e){return[e[0],e[2]]}call(e,t){throw new Ge}}class DL extends $N{constructor(e){super(e||{})}call(e,t){return ee(()=>{const n=Xe(e);return zt(n,1)})}}DL.className="GlobalAveragePooling1D",ge(DL);class kL extends $N{constructor(e){super(e||{})}call(e,t){return ee(()=>{const n=Xe(e);return Xn(n,1)})}}kL.className="GlobalMaxPooling1D",ge(kL);class UN extends lt{constructor(e){super(e);this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,Gt(this.dataFormat),this.inputSpec=[new fn({ndim:4})]}computeOutputShape(e){return e=e,this.dataFormat==="channelsLast"?[e[0],e[3]]:[e[0],e[1]]}call(e,t){throw new Ge}getConfig(){const e={dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class FL extends UN{call(e,t){return ee(()=>{const n=Xe(e);return this.dataFormat==="channelsLast"?zt(n,[1,2]):zt(n,[2,3])})}}FL.className="GlobalAveragePooling2D",ge(FL);class _L extends UN{call(e,t){return ee(()=>{const n=Xe(e);return this.dataFormat==="channelsLast"?Xn(n,[1,2]):Xn(n,[2,3])})}}_L.className="GlobalMaxPooling2D",ge(_L);class BN extends lt{constructor(e){super(e);this.layer=e.layer}build(e){this.built=!0}get trainable(){return this.layer!=null?this.layer.trainable:!1}set trainable(e){this.layer!=null&&(this.layer.trainable=e)}get trainableWeights(){return this.layer.trainableWeights}get nonTrainableWeights(){return this.layer.nonTrainableWeights}get updates(){return this.layer._updates}get losses(){return this.layer.losses}getWeights(){return this.layer.getWeights()}setWeights(e){this.layer.setWeights(e)}getConfig(){const e={layer:{className:this.layer.getClassName(),config:this.layer.getConfig()}},t=super.getConfig();return Object.assign(e,t),e}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.layer!=null&&this.layer.setFastWeightInitDuringBuild(e)}static fromConfig(e,t,n={}){const s=t.layer,i=hi(s,n);delete t.layer;const o={layer:i};return Object.assign(o,t),new e(o)}}class WL extends BN{constructor(e){super(e);this.supportsMasking=!0}build(e){if(e=It(e),e.length<3)throw new j(`TimeDistributed layer expects an input shape >= 3D, but received input shape ${JSON.stringify(e)}`);this.inputSpec=[{shape:e}];const t=[e[0]].concat(e.slice(2));this.layer.built||(this.layer.build(t),this.layer.built=!0),super.build(e)}computeOutputShape(e){e=It(e);const t=[e[0]].concat(e.slice(2)),n=this.layer.computeOutputShape(t),s=e[1];return[n[0],s].concat(n.slice(1))}call(e,t){return ee(()=>{e=Xe(e);const n=(o,a)=>{const c=Xe(this.layer.call(o,t));return[c,[]]},s=EN(n,e,[],!1,null,null,!1,!0),i=s[1];return i})}}WL.className="TimeDistributed",ge(WL);function mG(e){Za(xz,"BidirectionalMergeMode",e)}const fG="concat";class $L extends BN{constructor(e){super(e);const t=e.layer.getConfig(),n={};n.className=e.layer.getClassName(),n.config=t,this.forwardLayer=hi(n),t.goBackwards=!(t.goBackwards===!0);const s={};if(s.className=e.layer.getClassName(),s.config=t,this.backwardLayer=hi(s),this.forwardLayer.name="forward_"+this.forwardLayer.name,this.backwardLayer.name="backward_"+this.backwardLayer.name,this.mergeMode=e.mergeMode===void 0?fG:e.mergeMode,mG(this.mergeMode),e.weights)throw new Ge("weights support is not implemented for Bidirectional layer yet.");this._stateful=e.layer.stateful,this.returnSequences=e.layer.returnSequences,this.returnState=e.layer.returnState,this.supportsMasking=!0,this._trainable=!0,this.inputSpec=e.layer.inputSpec,this.numConstants=null}get trainable(){return this._trainable}set trainable(e){this._trainable=e,this.forwardLayer!=null&&(this.forwardLayer.trainable=e),this.backwardLayer!=null&&(this.backwardLayer.trainable=e)}getWeights(){return this.forwardLayer.getWeights().concat(this.backwardLayer.getWeights())}setWeights(e){const t=e.length,n=Math.floor(t/2);this.forwardLayer.setWeights(e.slice(0,n)),this.backwardLayer.setWeights(e.slice(n))}computeOutputShape(e){let t=this.forwardLayer.computeOutputShape(e);Array.isArray(t)&&Array.isArray(t[0])||(t=[t]),t=t;let n,s,i;return this.returnState&&(i=t.slice(1)),n=t[0],n=n,this.mergeMode==="concat"?(n[n.length-1]*=2,s=[n]):this.mergeMode==null?s=[n,n.slice()]:s=[n],this.returnState?this.mergeMode==null?s.concat(i).concat(i.slice()):[n].concat(i).concat(i.slice()):Jn(s)}apply(e,t){let n=t==null?null:t.initialState,s=t==null?null:t.constants;t==null&&(t={});const i=ON(e,n,s,this.numConstants);if(e=i.inputs,n=i.initialState,s=i.constants,Array.isArray(e)&&(n=e.slice(1),e=e[0]),(n==null||n.length===0)&&s==null)return super.apply(e,t);const o=[],a=[];if(n!=null){const u=n.length;if(u%2>0)throw new j("When passing `initialState` to a Bidrectional RNN, the state should be an Array containing the states of the underlying RNNs.");t.initialState=n,o.push(...n);const p=n.map(m=>new fn({shape:m.shape}));this.forwardLayer.stateSpec=p.slice(0,u/2),this.backwardLayer.stateSpec=p.slice(u/2),a.push(...p)}if(s!=null)throw new Ge("Support for constants in Bidirectional layers is not implemented yet.");const c=o[0]instanceof li;for(const u of o)if(u instanceof li!==c)throw new j("The initial state of a Bidirectional layer cannot be specified as a mix of symbolic and non-symbolic tensors");if(c){const u=[e].concat(o),p=this.inputSpec.concat(a),m=this.inputSpec;this.inputSpec=p;const y=super.apply(u,t);return this.inputSpec=m,y}else return super.apply(e,t)}call(e,t){return ee(()=>{const n=t.initialState;let s,i;if(n==null)s=this.forwardLayer.call(e,t),i=this.backwardLayer.call(e,t);else{const c=n.slice(0,n.length/2),u=n.slice(n.length/2);s=this.forwardLayer.call(e,Object.assign(t,{initialState:c})),i=this.backwardLayer.call(e,Object.assign(t,{initialState:u}))}let o;this.returnState&&(Array.isArray(s)&&(o=s.slice(1).concat(i.slice(1))),s=s[0],i=i[0]),this.returnSequences&&(i=As(i,1));let a;return this.mergeMode==="concat"?a=yw([s,i]):this.mergeMode==="sum"?a=be(s,i):this.mergeMode==="ave"?a=X(.5,be(s,i)):this.mergeMode==="mul"?a=X(s,i):this.mergeMode==null&&(a=[s,i]),this.returnState?this.mergeMode==null?a.concat(o):[a].concat(o):a})}resetStates(e){this.forwardLayer.resetStates(),this.backwardLayer.resetStates()}build(e){Po(this.forwardLayer.name,()=>{this.forwardLayer.build(e)}),Po(this.backwardLayer.name,()=>{this.backwardLayer.build(e)}),this.built=!0}computeMask(e,t){Array.isArray(t)&&(t=t[0]);let n;if(this.returnSequences?this.mergeMode==null?n=[t,t]:n=t:this.mergeMode==null?n=[null,null]:n=null,this.returnState){const s=this.forwardLayer.states,i=s.map(o=>null);return Array.isArray(n)?n.concat(i).concat(i):[n].concat(i).concat(i)}else return n}get trainableWeights(){return this.forwardLayer.trainableWeights.concat(this.backwardLayer.trainableWeights)}get nonTrainableWeights(){return this.forwardLayer.nonTrainableWeights.concat(this.backwardLayer.nonTrainableWeights)}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.forwardLayer!=null&&this.forwardLayer.setFastWeightInitDuringBuild(e),this.backwardLayer!=null&&this.backwardLayer.setFastWeightInitDuringBuild(e)}getConfig(){const e={mergeMode:this.mergeMode},t=super.getConfig();return Object.assign(e,t),e}static fromConfig(e,t){const n=hi(t.layer);if(delete t.layer,t.numConstants!=null)throw new Ge("Deserialization of a Bidirectional layer with numConstants present is not supported yet.");const s=t;return s.layer=n,new e(s)}}$L.className="Bidirectional",ge($L);function gG(e){return new tc(e)}function yG(e){return new Kw(e)}function bG(e){return new Yw(e)}function wG(e){return new qw(e)}function LG(e){return new jw(e)}function SG(e){return new Jw(e)}function IG(e){return new Xw(e)}function xG(e){return new Qp(e)}function TG(e){return new Hh(e)}function AG(e){return new tL(e)}function vG(e){return new Zp(e)}function NG(e){return new nL(e)}function CG(e){return new sL(e)}function RG(e){return new iL(e)}function OG(e){return new rL(e)}function EG(e){return new pL(e)}function DG(e){return new uL(e)}function kG(e){return new im(e)}function FG(e){return new hL(e)}function _G(e){return new dL(e)}function WG(e){return new mL(e)}function $G(e){return new fL(e)}function UG(e){return new gL(e)}function BG(e){return new bL(e)}function MG(e){return new qh(e)}function PG(e){return new Kh(e)}function zG(e){return new Zh(e)}function GG(e){return new Xh(e)}function VG(e){return new Jh(e)}function HG(e){return new jh(e)}function YG(e){return new wL(e)}function qG(e){return new xL(e)}function jG(e){return new TL(e)}function KG(e){return new AL(e)}function UL(e){return new NL(e)}function XG(e){return UL(e)}function JG(e){return UL(e)}function BL(e){return new RL(e)}function ZG(e){return BL(e)}function QG(e){return BL(e)}function ML(e){return new EL(e)}function eV(e){return ML(e)}function tV(e){return ML(e)}function nV(e){return new DL(e)}function sV(e){return new FL(e)}function MN(e){return new kL(e)}function PN(e){return new _L(e)}function zN(e){return new vL(e)}function GN(e){return new CL(e)}function iV(e){return new OL(e)}function rV(e){return new aL(e)}function oV(e){return new tm(e)}function aV(e){return new cL(e)}function cV(e){return new Yh(e)}function lV(e){return new oL(e)}function hV(e){return new em(e)}function uV(e){return new lL(e)}function dV(e){return new sm(e)}function pV(e){return new Bi(e)}function mV(e){return new nm(e)}function fV(e){return new $L(e)}function gV(e){return new WL(e)}const yV=MN,bV=PN,wV=zN,LV=GN;function SV(e){return new LL(e)}function IV(e){return new SL(e)}function xV(e){return new IL(e)}function TV(e){return new yL(e)}var AV=Object.freeze({__proto__:null,inputLayer:gG,elu:yG,reLU:bG,leakyReLU:wG,prelu:LG,softmax:SG,thresholdedReLU:IG,conv1d:xG,conv2d:TG,conv2dTranspose:AG,conv3d:vG,separableConv2d:NG,cropping2D:CG,upSampling2d:RG,depthwiseConv2d:OG,activation:EG,dense:DG,dropout:kG,spatialDropout1d:FG,flatten:_G,repeatVector:WG,reshape:$G,permute:UG,embedding:BG,add:MG,average:PG,concatenate:zG,maximum:GG,minimum:VG,multiply:HG,dot:YG,batchNormalization:qG,layerNormalization:jG,zeroPadding2d:KG,averagePooling1d:UL,avgPool1d:XG,avgPooling1d:JG,averagePooling2d:BL,avgPool2d:ZG,avgPooling2d:QG,averagePooling3d:ML,avgPool3d:eV,avgPooling3d:tV,globalAveragePooling1d:nV,globalAveragePooling2d:sV,globalMaxPooling1d:MN,globalMaxPooling2d:PN,maxPooling1d:zN,maxPooling2d:GN,maxPooling3d:iV,gru:rV,gruCell:oV,lstm:aV,lstmCell:cV,simpleRNN:lV,simpleRNNCell:hV,convLstm2d:uV,convLstm2dCell:dV,rnn:pV,stackedRNNCells:mV,bidirectional:fV,timeDistributed:gV,globalMaxPool1d:yV,globalMaxPool2d:bV,maxPool1d:wV,maxPool2d:LV,Layer:lt,RNN:Bi,RNNCell:oc,input:hN,gaussianNoise:SV,gaussianDropout:IV,alphaDropout:xV,masking:TV});function vV(e,t){return kw(e,t)}function NV(e,t){return Hv(e,t)}function CV(e,t){return Yv(e,t)}function RV(e,t){return Fw(e,t)}function OV(e,t){return _w(e,t)}function EV(e,t){return Vv(e,t)}function DV(e,t){return b3(e,t)}function kV(e,t){return Hp(e,t)}function FV(e,t){return sc(e,t)}function _V(e,t){return Kr(e,t)}function WV(e,t){return Kr(e,t)}function $V(e,t){return Kr(e,t)}function UV(e,t){return ar(e,t)}function BV(e,t){return ar(e,t)}function MV(e,t){return ar(e,t)}var PV=Object.freeze({__proto__:null,binaryAccuracy:vV,binaryCrossentropy:NV,sparseCategoricalAccuracy:CV,categoricalAccuracy:RV,categoricalCrossentropy:OV,precision:EV,recall:DV,cosineProximity:kV,meanAbsoluteError:FV,meanAbsolutePercentageError:_V,MAPE:WV,mape:$V,meanSquaredError:UV,MSE:BV,mse:MV});var zV=Object.freeze({__proto__:null,modelFromJSON:J3});function GV(e){return new Gh(e)}function VV(e){return rG(e)}function HV(e){return oG(e)}var YV=Object.freeze({__proto__:null,l1l2:GV,l1:VV,l2:HV});class VN extends nc{constructor(){super(...arguments);this.model=null}setModel(e){if(!(e instanceof cr))throw new Error("model must be a LayersModel, not some other Container");this.model=e}}function om(e,t){return et}class YN extends VN{constructor(e){super();if(e==null&&(e={}),e.restoreBestWeights)throw new Ge("restoreBestWeights = True is not implemented in EarlyStopping yet.");this.monitor=e.monitor||"val_loss",this.minDelta=Math.abs(e.minDelta||0),this.patience=e.patience||0,this.verbose=e.verbose||0,this.mode=e.mode||"auto",this.baseline=e.baseline,["auto","min","max"].indexOf(this.mode)===-1&&(console.warn(`EarlyStopping mode '${this.mode}' is invalid. Falling back to mode 'auto'.`),this.mode="auto"),this.mode==="min"?this.monitorFunc=om:this.mode==="max"?this.monitorFunc=HN:this.monitor.indexOf("acc")!==-1?this.monitorFunc=HN:this.monitorFunc=om,this.monitorFunc===om&&(this.minDelta*=-1)}async onTrainBegin(e){this.wait=0,this.stoppedEpoch=0,this.baseline!=null?this.best=this.baseline:this.best=this.monitorFunc===om?Infinity:-Infinity}async onEpochEnd(e,t){await jr(t);const n=this.getMonitorValue(t);if(n==null)return;this.monitorFunc(n-this.minDelta,this.best)?(this.best=n,this.wait=0):(this.wait++,this.wait>=this.patience&&(this.stoppedEpoch=e,this.model.stopTraining=!0))}async onTrainEnd(e){this.stoppedEpoch>0&&this.verbose&&console.log(`Epoch ${this.stoppedEpoch}: early stopping.`)}getMonitorValue(e){e==null&&(e={});const t=e[this.monitor];return t==null&&console.warn(`Metric for EarlyStopping ${this.monitor} is not available. Available metrics are: ${Object.keys(e)}`),t}}function qV(e){return new YN(e)}const jV={earlyStopping:qV};var di;(function(e){e[e.DT_INVALID=0]="DT_INVALID",e[e.DT_FLOAT=1]="DT_FLOAT",e[e.DT_DOUBLE=2]="DT_DOUBLE",e[e.DT_INT32=3]="DT_INT32",e[e.DT_UINT8=4]="DT_UINT8",e[e.DT_INT16=5]="DT_INT16",e[e.DT_INT8=6]="DT_INT8",e[e.DT_STRING=7]="DT_STRING",e[e.DT_COMPLEX64=8]="DT_COMPLEX64",e[e.DT_INT64=9]="DT_INT64",e[e.DT_BOOL=10]="DT_BOOL",e[e.DT_QINT8=11]="DT_QINT8",e[e.DT_QUINT8=12]="DT_QUINT8",e[e.DT_QINT32=13]="DT_QINT32",e[e.DT_BFLOAT16=14]="DT_BFLOAT16",e[e.DT_FLOAT_REF=101]="DT_FLOAT_REF",e[e.DT_DOUBLE_REF=102]="DT_DOUBLE_REF",e[e.DT_INT32_REF=103]="DT_INT32_REF",e[e.DT_UINT8_REF=104]="DT_UINT8_REF",e[e.DT_INT16_REF=105]="DT_INT16_REF",e[e.DT_INT8_REF=106]="DT_INT8_REF",e[e.DT_STRING_REF=107]="DT_STRING_REF",e[e.DT_COMPLEX64_REF=108]="DT_COMPLEX64_REF",e[e.DT_INT64_REF=109]="DT_INT64_REF",e[e.DT_BOOL_REF=110]="DT_BOOL_REF",e[e.DT_QINT8_REF=111]="DT_QINT8_REF",e[e.DT_QUINT8_REF=112]="DT_QUINT8_REF",e[e.DT_QINT32_REF=113]="DT_QINT32_REF",e[e.DT_BFLOAT16_REF=114]="DT_BFLOAT16_REF"})(di||(di={}));var qN;(function(e){let t;(function(n){n[n.LEGACY=0]="LEGACY",n[n.V1=1]="V1",n[n.V2=2]="V2"})(t=e.CheckpointFormatVersion||(e.CheckpointFormatVersion={}))})(qN||(qN={}));const PL={};function KV(e,t){const n={tfOpName:e,category:"custom",inputs:[],attrs:[],customExecutor:t};PL[e]=n}function jN(e){return PL[e]}function XV(e){delete PL[e]}function R(e,t,n,s){const i=t.inputParams[e];if(i&&i.inputIndexStart!==void 0){const a=i.inputIndexStart,c=i.inputIndexEnd===0?void 0:i.inputIndexEnd===void 0?a+1:i.inputIndexEnd;if(i.type==="tensor")return Qn(t.inputNames[i.inputIndexStart],n,s);if(i.type==="tensors"){const m=t.inputNames.slice(a,c);return m.map(y=>Qn(y,n,s))}const u=Qn(t.inputNames.slice(a)[0],n,s),p=u.dataSync();return i.type==="number"?p[0]:Ls(u.shape,p)}const o=t.attrParams[e];return o&&o.value}function Qn(e,t,n){const[s,i]=ls(e),o=n.currentContextIds.find(a=>!!t[am(s,a)]);return o!==void 0?t[am(s,o)][i]:void 0}function JV(e,t,n){return t[am(e,n.currentContextId)]}function lr(e,t){const[n,s]=ls(e);return[am(n,t&&t.currentContextId),s]}function am(e,t){return t?`${e}-${t}`:e}function ls(e){const t=e.split(":");if(t.length===1)return[e,0];const n=t[0];return[n,Number(t[t.length-1])]}function $ee(e,t){const n=[];for(let s=0;sn.json));this.opMappers=t.reduce((n,s)=>(n[s.tfOpName]=s,n),{})}transformGraph(e,t={}){const n=e.node,s=[],i=[],o=[],a=n.reduce((I,T)=>(I[T.name]=this.mapNode(T),T.op.startsWith("Placeholder")?s.push(I[T.name]):T.op==="Const"?i.push(I[T.name]):(T.input==null||T.input.length===0)&&o.push(I[T.name]),I),{});let c=[];const u=[];let p={},m={};t!=null&&(p=this.mapSignatureEntries(t.inputs),m=this.mapSignatureEntries(t.outputs));const y=Object.keys(a);y.forEach(I=>{const T=a[I];T.inputNames.forEach(v=>{const[N]=lr(v);T.inputs.push(a[N]),a[N].children.push(T)})}),Object.keys(m).length===0?y.forEach(I=>{const T=a[I];T.children.length===0&&u.push(T)}):Object.keys(m).forEach(I=>{const[T]=lr(I),v=a[T];v!=null&&(v.signatureKey=m[I],u.push(v))}),Object.keys(p).length>0?Object.keys(p).forEach(I=>{const[T]=lr(I),v=a[T];v&&(v.signatureKey=p[I],c.push(v))}):c=s;let b={};e.library!=null&&e.library.function!=null&&(b=e.library.function.reduce((I,T)=>(I[T.signature.name]=this.mapFunction(T),I),{}));const w={nodes:a,inputs:c,outputs:u,weights:i,placeholders:s,signature:t,functions:b};return o.length>0&&(w.initNodes=o),w}mapSignatureEntries(e){return Object.keys(e||{}).reduce((t,n)=>(t[e[n].name]=n,t),{})}mapNode(e){const t=jN(e.op)||this.opMappers[e.op]||{};e.attr==null&&(e.attr={});const n={name:e.name,op:e.op,category:t.category,inputNames:(e.input||[]).map(s=>s.startsWith("^")?s.substr(1):s),inputs:[],children:[],inputParams:{},attrParams:{},rawAttrs:e.attr};return t.inputs!=null&&(n.inputParams=t.inputs.reduce((s,i)=>(s[i.name]={type:i.type,inputIndexStart:i.start,inputIndexEnd:i.end},s),{})),t.attrs!=null&&(n.attrParams=t.attrs.reduce((s,i)=>{const o=i.type;let a;switch(i.type){case"string":a=zL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=zL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"string[]":a=XL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=XL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"number":a=VL(e.attr,i.tfName,i.defaultValue||0),a===void 0&&!!i.tfDeprecatedName&&(a=VL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"number[]":a=KL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=KL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"bool":a=GL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=GL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"bool[]":a=ZL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=ZL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"shape":a=jL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=jL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"shape[]":a=JL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=JL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"dtype":a=YL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=YL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"dtype[]":a=qL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=qL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"func":a=JN(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=JN(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"tensor":case"tensors":break;default:throw new Error(`Unsupported param type: ${i.type} for op: ${e.op}`)}return s[i.name]={value:a,type:o},s},{})),n}mapFunction(e){const t=e.nodeDef,n=[],s=[];let i={};t!=null&&(i=t.reduce((m,y)=>(m[y.name]=this.mapNode(y),y.op==="Const"&&s.push(m[y.name]),m),{}));const o=[],a=[];e.signature.inputArg.forEach(m=>{const[y]=lr(m.name),b={name:y,op:"Placeholder",inputs:[],inputNames:[],category:"graph",inputParams:{},attrParams:{dtype:{value:HL(m.type),type:"dtype"}},children:[]};b.signatureKey=m.name,o.push(b),i[y]=b});const c=Object.keys(i);c.forEach(m=>{const y=i[m];y.inputNames.forEach(b=>{const[w]=lr(b);y.inputs.push(i[w]),i[w].children.push(y)})});const u=e.ret;e.signature.outputArg.forEach(m=>{const[y,b]=lr(u[m.name]),w=i[y];w!=null&&(w.defaultOutput=b,a.push(w))});const p=this.mapArgsToSignature(e);return{nodes:i,inputs:o,outputs:a,weights:s,placeholders:n,signature:p}}mapArgsToSignature(e){return{methodName:e.signature.name,inputs:e.signature.inputArg.reduce((t,n)=>(t[n.name]=this.mapArgToTensorInfo(n),t),{}),outputs:e.signature.outputArg.reduce((t,n)=>(t[n.name]=this.mapArgToTensorInfo(n,e.ret),t),{})}}mapArgToTensorInfo(e,t){let n=e.name;return t!=null&&(n=t[n]),{name:n,dtype:e.type}}}function OH(e){const t=C().global;if(typeof t.atob!="undefined")return t.atob(e);if(typeof Buffer!="undefined")return new Buffer(e,"base64").toString();throw new Error("Unable to decode base64 in this environment. Missing built-in atob() or Buffer()")}function XN(e,t){const n=Array.isArray(e)?String.fromCharCode.apply(null,e):OH(e);return t?n:n.toLowerCase()}function zL(e,t,n,s=!1){const i=e[t];return i!=null?XN(i.s,s):n}function GL(e,t,n){const s=e[t];return s?s.b:n}function VL(e,t,n){const s=e[t]||{},i=s.i!=null?s.i:s.f!=null?s.f:n;return typeof i=="number"?i:parseInt(i,10)}function HL(e){typeof e=="string"&&(e=di[e]);switch(e){case di.DT_FLOAT:return"float32";case di.DT_INT32:case di.DT_INT64:case di.DT_INT8:case di.DT_UINT8:return"int32";case di.DT_BOOL:return"bool";case di.DT_DOUBLE:return"float32";case di.DT_STRING:return"string";default:return null}}function JN(e,t,n){const s=e[t];return s&&s.func?s.func.name:n}function YL(e,t,n){const s=e[t];return s&&s.type?HL(s.type):n}function qL(e,t,n){const s=e[t];return s&&s.list&&s.list.type?s.list.type.map(i=>HL(i)):n}function ZN(e){return e.unknownRank?void 0:e.dim!=null?e.dim.map(t=>typeof t.size=="number"?t.size:parseInt(t.size,10)):[]}function jL(e,t,n){const s=e[t];return s&&s.shape?ZN(s.shape):n}function KL(e,t,n){const s=e[t];return s?((s.list.f&&s.list.f.length?s.list.f:s.list.i)||[]).map(i=>typeof i=="number"?i:parseInt(i,10)):n}function XL(e,t,n,s=!1){const i=e[t];return i&&i.list&&i.list.s?i.list.s.map(o=>XN(o,s)):n}function JL(e,t,n){const s=e[t];return s&&s.list&&s.list.shape?s.list.shape.map(i=>ZN(i)):n}function ZL(e,t,n){const s=e[t];return s&&s.list&&s.list.b?s.list.b:n}class EH{constructor(e,t,n){this.node=e,this.tensorMap=t,this.context=n,this.inputs=[],this.attrs={},this.inputs=e.inputNames.map(s=>this.getInput(s)),e.rawAttrs!=null&&(this.attrs=Object.keys(e.rawAttrs).reduce((s,i)=>(s[i]=this.getAttr(i),s),{}))}getInput(e){return Qn(e,this.tensorMap,this.context)}getAttr(e,t){const n=this.node.rawAttrs[e];if(n.tensor!=null)return Qn(e,this.tensorMap,this.context);if(n.i!=null||n.f!=null)return VL(this.node.rawAttrs,e,t);if(n.s!=null)return zL(this.node.rawAttrs,e,t);if(n.b!=null)return GL(this.node.rawAttrs,e,t);if(n.shape!=null)return jL(this.node.rawAttrs,e,t);if(n.type!=null)return YL(this.node.rawAttrs,e,t);if(n.list!=null){if(n.list.i!=null||n.list.f!=null)return KL(this.node.rawAttrs,e,t);if(n.list.s!=null)return XL(this.node.rawAttrs,e,t);if(n.list.shape!=null)return JL(this.node.rawAttrs,e,t);if(n.list.b!=null)return ZL(this.node.rawAttrs,e,t);if(n.list.type!=null)return qL(this.node.rawAttrs,e,t)}return t}}const DH=(e,t,n)=>{switch(e.op){case"BiasAdd":case"AddV2":case"Add":return[be(R("a",e,t,n),R("b",e,t,n))];case"AddN":return[eA(R("tensors",e,t,n))];case"FloorMod":case"Mod":return[tp(R("a",e,t,n),R("b",e,t,n))];case"Mul":return[X(R("a",e,t,n),R("b",e,t,n))];case"RealDiv":case"Div":return[_e(R("a",e,t,n),R("b",e,t,n))];case"DivNoNan":return[Lb(R("a",e,t,n),R("b",e,t,n))];case"FloorDiv":return[Md(R("a",e,t,n),R("b",e,t,n))];case"Sub":return[Ce(R("a",e,t,n),R("b",e,t,n))];case"Minimum":return[Eo(R("a",e,t,n),R("b",e,t,n))];case"Maximum":return[Us(R("a",e,t,n),R("b",e,t,n))];case"Pow":return[ii(R("a",e,t,n),R("b",e,t,n))];case"SquaredDifference":return[Sh(R("a",e,t,n),R("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Uee="arithmetic";const kH=(e,t,n)=>{switch(e.op){case"Abs":case"ComplexAbs":return[sn(R("x",e,t,n))];case"Acos":return[nb(R("x",e,t,n))];case"Acosh":return[sb(R("x",e,t,n))];case"Asin":return[ob(R("x",e,t,n))];case"Asinh":return[ab(R("x",e,t,n))];case"Atan":return[cb(R("x",e,t,n))];case"Atan2":return[lb(R("x",e,t,n),R("y",e,t,n))];case"Atanh":return[hb(R("x",e,t,n))];case"Ceil":return[fb(R("x",e,t,n))];case"Complex":return[Ci(R("real",e,t,n),R("imag",e,t,n))];case"Cos":return[lh(R("x",e,t,n))];case"Cosh":return[qd(R("x",e,t,n))];case"Elu":return[Oo(R("x",e,t,n))];case"Erf":return[Sb(R("x",e,t,n))];case"Exp":return[xs(R("x",e,t,n))];case"Expm1":return[Ib(R("x",e,t,n))];case"Floor":return[Ba(R("x",e,t,n))];case"Log":return[is(R("x",e,t,n))];case"Log1p":return[Jd(R("x",e,t,n))];case"Imag":return[Pa(R("x",e,t,n))];case"Neg":return[Pt(R("x",e,t,n))];case"Reciprocal":return[Eb(R("x",e,t,n))];case"Real":return[Do(R("x",e,t,n))];case"Relu":return[Fi(R("x",e,t,n))];case"Round":return[kb(R("x",e,t,n))];case"Selu":return[rp(R("x",e,t,n))];case"Sigmoid":return[Ei(R("x",e,t,n))];case"Sin":return[op(R("x",e,t,n))];case"Sign":return[_b(R("x",e,t,n))];case"Sinh":return[ap(R("x",e,t,n))];case"Softplus":return[za(R("x",e,t,n))];case"Sqrt":return[Sn(R("x",e,t,n))];case"Square":return[Lt(R("x",e,t,n))];case"Tanh":return[Ua(R("x",e,t,n))];case"Tan":return[Ub(R("x",e,t,n))];case"Relu6":case"ClipByValue":return[jn(R("x",e,t,n),R("clipValueMin",e,t,n),R("clipValueMax",e,t,n))];case"Rsqrt":return[ip(Qn(e.inputNames[0],t,n))];case"Prod":return[sp(R("x",e,t,n),R("axes",e,t,n))];case"LeakyRelu":return[Xd(R("x",e,t,n),R("alpha",e,t,n))];case"Prelu":return[gh(R("x",e,t,n),R("alpha",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Bee="basic_math";function Gs(e,t,n=""){k(FH(e,t),()=>n+` Shapes ${e} and ${t} must match`)}function FH(e,t){if(e.length!==t.length)return!1;for(let n=0;n{(e==null||!e.has(t.tensor.id))&&t.tensor.dispose()}),this.tensors=[],this.closed_=!0,this.idTensor.dispose()}size(){return this.tensors.length}read(e){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||e>=this.size())throw new Error(`Tried to read from index ${e}, but array size is: ${this.size()}`);const t=this.tensors[e];if(t.cleared)throw new Error(`TensorArray ${this.name}: Could not read index ${e} twice because it was cleared after a previous read (perhaps try setting clear_after_read = false?).`);return this.clearAfterRead&&(t.cleared=!0),t.read=!0,t.tensor}readMany(e){return e.map(t=>this.read(t))}write(e,t){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||!this.dynamicSize&&e>=this.maxSize)throw new Error(`Tried to write to index ${e}, but array is not resizeable and size is: ${this.maxSize}`);const n=this.tensors[e]||{};if(t.dtype!==this.dtype)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, +2. The custom ${s} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);if(u!=null){const p={};for(const w of Object.keys(Ms))p[w]=Ms[w];for(const w of Object.keys(n))p[w]=n[w];const m=o.config;m.customObjects=p;const y=Object.assign({},Ms);for(const w of Object.keys(n))Ms[w]=n[w];lw(o.config);const b=u(c,o.config,n,i);return Ms=Object.assign({},y),b}else{const p=Object.assign({},Ms);for(const y of Object.keys(n))Ms[y]=n[y];const m=new c(o.config);return Ms=Object.assign({},p),m}}}function dz(e,t){return et?1:0}function vp(e,t){return-1*dz(e,t)}function BQ(e){switch(e){case"float32":return"float32";default:throw new j(`Invalid dtype: ${e}`)}}function MQ(e,t){if(e==null||t==null)return e===t;if(e.length!==t.length)return!1;for(let n=0;n=0),vs(s>=n),Array.isArray(e)&&e.length>=n&&e.length<=s&&e.every(i=>typeof i===t)}function mn(e,t){Array.isArray(e)?(k(e.length>0,()=>`${t} is unexpectedly an empty array.`),e.forEach((n,s)=>mn(n,`element ${s+1} of ${t}`))):k(Number.isInteger(e)&&e>0,()=>`Expected ${t} to be a positive integer, but got ${yv(e)}.`)}function yv(e){return e===null?"null":Array.isArray(e)?"["+e.map(t=>yv(t)).join(",")+"]":typeof e=="string"?`"${e}"`:`${e}`}function mz(e,t){let n=qn(),s;const i=(...o)=>{const a=qn();return a-n0,"arrayOfValues is empty");for(const t of e)vs(Array.isArray(t),"one of the values is not an array"),vs(t.length>0,"one of the values is empty");return e.reduce((t,n)=>t.length===0?n.map(s=>[s]):n.map(s=>t.map(i=>[...i,s])).reduce((s,i)=>s.concat(i),[]),[])}function uw(e,t){return ee(()=>Sn(Ue(X(e,e),t,!0)))}class kh extends No{getConfig(){return{}}}class dw extends kh{constructor(e){super();this.defaultMaxValue=2,this.defaultAxis=0,this.maxValue=e.maxValue!=null?e.maxValue:this.defaultMaxValue,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return ee(()=>{const t=uw(e,this.axis),n=jn(t,0,this.maxValue);return X(e,_e(n,be(cn(),t)))})}getConfig(){return{maxValue:this.maxValue,axis:this.axis}}}dw.className="MaxNorm",ge(dw);class pw extends kh{constructor(e){super();this.defaultAxis=0,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return ee(()=>_e(e,be(cn(),uw(e,this.axis))))}getConfig(){return{axis:this.axis}}}pw.className="UnitNorm",ge(pw);class mw extends kh{apply(e){return Fi(e)}}mw.className="NonNeg",ge(mw);class fw extends kh{constructor(e){super();this.defaultMinValue=0,this.defaultMaxValue=1,this.defaultRate=1,this.defaultAxis=0,this.minValue=e.minValue!=null?e.minValue:this.defaultMinValue,this.maxValue=e.maxValue!=null?e.maxValue:this.defaultMaxValue,this.rate=e.rate!=null?e.rate:this.defaultRate,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return ee(()=>{const t=uw(e,this.axis),n=be(X(this.rate,jn(t,this.minValue,this.maxValue)),X(1-this.rate,t));return X(e,_e(n,be(cn(),t)))})}getConfig(){return{minValue:this.minValue,maxValue:this.maxValue,rate:this.rate,axis:this.axis}}}fw.className="MinMaxNorm",ge(fw);const wv={maxNorm:"MaxNorm",minMaxNorm:"MinMaxNorm",nonNeg:"NonNeg",unitNorm:"UnitNorm"};function ln(e){return cw(e)}function Lv(e,t={}){return Dh(e,Ws.getMap().classNameMap,t,"constraint")}function hn(e){if(e==null)return null;if(typeof e=="string"){const t=e in wv?wv[e]:e,n={className:t,config:{}};return Lv(n)}else return e instanceof kh?e:Lv(e)}function fz(e){return new dw(e)}function gz(e){return new pw(e)}function yz(){return new mw}function bz(e){return new fw(e)}var wz=Object.freeze({__proto__:null,maxNorm:fz,unitNorm:gz,nonNeg:yz,minMaxNorm:bz});const Lz=["channelsFirst","channelsLast"],Sz=["valid","same","causal"],Iz=["max","avg"],xz=["sum","mul","concat","ave"],zQ=["temporal"];const tc=new Map;function Gt(e){ec(Lz,"DataFormat",e)}function Ns(e){ec(Sz,"PaddingMode",e)}function Sv(e){ec(Iz,"PoolMode",e)}const Fh=[],Iv="/";function Go(e,t){Fh.push(e);try{const n=t();return Fh.pop(),n}catch(n){throw Fh.pop(),n}}function Tz(){return Fh.length===0?"":Fh.join(Iv)+Iv}function xv(e){if(!Av(e))throw new Error("Not a valid tensor name: '"+e+"'");return Tz()+e}function Tv(e){if(!Av(e))throw new Error("Not a valid tensor name: '"+e+"'");tc.has(e)||tc.set(e,0);const t=tc.get(e);if(tc.set(e,tc.get(e)+1),t>0){const n=`${e}_${t}`;return tc.set(n,1),n}else return e}const Az=new RegExp(/^[A-Za-z0-9][-A-Za-z0-9\._\/]*$/);function Av(e){return!!e.match(Az)}function vz(e){return e===parseInt(e.toString(),10)}function Yr(e,t,n){t==null&&(t=0),n==null&&(n=e.length);let s=1;for(let i=t;ii-o),n=Math.floor((t.length-1)/2),s=Math.ceil((t.length-1)/2);return n===s?t[n]:(t[n]+t[s])/2}function ai(e,t){if(t0?t.reduce((n,s)=>n*s):1}function _h(e,t){return e.asType(t)}function Wh(e,t=-1){const n=e.shape.slice();return t<0&&(t=n.length+t+1),n.splice(t,0,1),e.reshape(n)}function Rz(e,t){return ee(()=>{if(e.shape.length!==2)throw new j(`repeat() expects a rank-2 tensor, but received a rank-${e.shape.length} tensor.`);const n=Wh(e,1);return bw(n,[1,t,1])})}function Oz(e){const t=[Yr(e.shape)];return e.reshape(t)}function Ez(e){if(e.rank<=1)throw new j(`batchFlatten requires a minimum rank of 2. Got rank: ${e.rank}.`);const t=[e.shape[0],Yr(e.shape,1)];return e.reshape(t)}function Vo(e,t,n){return ee(()=>{switch(e.rank){case 1:return cp(e,t,n);case 2:return Wb(e,[t,0],[n,e.shape[1]]);case 3:return lp(e,[t,0,0],[n,e.shape[1],e.shape[2]]);case 4:return bh(e,[t,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3]]);case 5:return st(e,[t,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4]]);case 6:return st(e,[t,0,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4],e.shape[5]]);default:throw new j(`sliceAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}})}function gw(e,t,n){return ee(()=>{switch(e.rank){case 1:return cp(e,t,n);case 2:return Wb(e,[0,t],[e.shape[0],n]);case 3:return lp(e,[0,0,t],[e.shape[0],e.shape[1],n]);case 4:return bh(e,[0,0,0,t],[e.shape[0],e.shape[1],e.shape[2],n]);default:throw new j(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}})}function Cp(e,t,n,s){return ee(()=>{switch(e.rank){case 1:return cp(e,t,n);case 2:switch(s){case 1:return Vo(e,t,n);case 2:return gw(e,t,n);default:throw new j(`The axis is not within the rank of the tensor ${s}`)}case 3:switch(s){case 1:return Vo(e,t,n);case 2:return lp(e,[0,t,0],[e.shape[0],n,e.shape[2]]);case 3:return gw(e,t,n);default:throw new j(`The axis is not within the rank of the tensor ${s}`)}case 4:switch(s){case 1:return Vo(e,t,n);case 2:return bh(e,[0,t,0,0],[e.shape[0],n,e.shape[2],e.shape[3]]);case 3:return bh(e,[0,0,t,0],[e.shape[0],e.shape[1],n,e.shape[3]]);case 4:return gw(e,t,n);default:throw new j(`The axis is not within the rank of the tensor ${s}`)}default:throw new j(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}})}function yw(e,t=-1){let n;return t<0&&(n=e[0].rank,n!==0?t=n:t=0),t===e[0].rank&&(t=-1),Mt(e,t)}function Nv(e,t){switch(e.rank){case 1:return rA([e,t]);case 2:return oA([e,t],0);case 3:return aA([e,t],0);case 4:return cA([e,t],0);default:throw new j(`concatAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}}function bw(e,t){if(Array.isArray(t)||(t=[t]),e.rank!==t.length)throw new j(`The length of input n (${t.length}) does not match the number of dimensions in input x (${e.rank})`);return Br(e,t)}function Rp(e,t=0,n=1,s,i){return Ob(e,t,n,s,i)}function Wi(e,t,n,s){if(e.rank<2||t.rank<2)throw new Ge(`dot requires both inputs to be rank >= 2 but got x shape = ${e.shape} and y shape = ${t.shape}`);if(t.rank>=3){const i=e.shape.slice(-1)[0],o=t.shape.slice(-2)[0];if(i!==o)throw new Ge(`If rank y >= 3, then the second last dim of y must equal the last dim of x but got x shape = ${e.shape} and y shape = ${t.shape}`)}if(e.rank===2&&t.rank===2){const i=!1,o=!1;return bp({a:e,b:t,transposeA:i,transposeB:o,bias:s?ww(e.rank,s,ri()):null,activation:n})}else{const i=e.shape.slice(),o=i.pop();e=e.reshape([-1,o]);const a=t.shape.slice(),c=a.pop(),u=a.pop(),p=[...a,c],m=Array.from({length:t.rank},(I,T)=>T===0?t.rank-2:T<=t.rank-2?T-1:T);t=t.transpose(m).reshape([u,-1]);const y=[...i,...p],b=!1,w=!1;return bp({a:e,b:t,transposeA:b,transposeB:w,bias:s?ww(e.rank,s,ri()):null,activation:n}).reshape(y)}}function KQ(e){return ee(()=>{const t=et(e),n=Dn(e);return $n(ni(e,t),t,$n(Ts(e,et(e)),n,X(-1,n)))})}function XQ(e,t){return ee(()=>{if(e.rank!==1)throw new Error("Only 1D one-hot tensors are supported in the deeplearn backend, at present.");return e=e.toInt(),vo(e,t).toFloat()})}function Cv(e,t,n){return ee(()=>(Array.isArray(t)?t=rs(t,"int32"):t=t.toInt(),za(e,t,n)))}function $h(e){return X(e,e)}function JQ(e,t){return ee(()=>{if(typeof t=="number"&&(t=Ne(Math.round(t),"int32")),t.dtype!=="int32")throw new Ge(`Non-int32 dtype (${t.dtype}) is not supported by pow() yet`);return ii(e,t)})}function ww(e,t,n){const s=t.shape;if(t.rank!==1&&t.rank!==e)throw new j(`Unexpected bias dimensions: ${t.rank}; expected it to be 1 or ${e}`);if(e===5){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1,1,1]):t.reshape([1,s[3],s[0],s[1],s[2]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,1,1,s[0]]):t.reshape([1].concat(s))}else if(e===4){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1,1]):t.reshape([1,s[2],s[0],s[1]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,1,s[0]]):t.reshape([1].concat(s))}else if(e===3){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1]):t.reshape([1,s[1],s[0]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,s[0]]):t.reshape([1].concat(s))}else if(e<3)return t;throw new j(`Unsupported input rank by biasAdd: ${t.rank}`)}function $i(e,t,n){return ee(()=>(n==null&&(n=ri()),Gt(n),e.add(ww(e.rank,t,n))))}function Dz(e,t=1){if(t!==1)throw new Ge(`Support for alpha values other than 1 (${t}) is not implemented yet.`);return Do(e)}function kz(e){return ee(()=>_e(e,rn(e).add(1)))}function Rv(e,t,n,s){return ee(()=>zA(e,t,n,s))}function Fz(e){return ee(()=>{const t=be(.5,X(.2,e));return jn(t,0,1)})}function Uh(e,t,n=!1){return n?e():t()}const _z=["fanIn","fanOut","fanAvg"],Wz=["normal","uniform","truncatedNormal"],ZQ=["Zeros","Ones","Constant","RandomNormal","RandomUniform","TruncatedNormal","VarianceScaling","Orthogonal","Identity"];function $z(e){ec(_z,"FanMode",e)}function Uz(e){ec(Wz,"Distribution",e)}class Ps extends No{fromConfigUsesCustomObjects(){return!1}getConfig(){return{}}}class Lw extends Ps{apply(e,t){return ct(e,t)}}Lw.className="Zeros",ge(Lw);class Op extends Ps{apply(e,t){return si(e,t)}}Op.className="Ones",ge(Op);class Sw extends Ps{constructor(e){super();if(typeof e!="object")throw new j(`Expected argument of type ConstantConfig but got ${e}`);if(e.value===void 0)throw new j(`config must have value set but got ${e}`);this.value=e.value}apply(e,t){return ee(()=>X(Ne(this.value),si(e,t)))}getConfig(){return{value:this.value}}}Sw.className="Constant",ge(Sw);class Iw extends Ps{constructor(e){super();this.DEFAULT_MINVAL=-.05,this.DEFAULT_MAXVAL=.05,this.minval=e.minval||this.DEFAULT_MINVAL,this.maxval=e.maxval||this.DEFAULT_MAXVAL,this.seed=e.seed}apply(e,t){return $o(e,this.minval,this.maxval,t)}getConfig(){return{minval:this.minval,maxval:this.maxval,seed:this.seed}}}Iw.className="RandomUniform",ge(Iw);class xw extends Ps{constructor(e){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Ge(`randomNormal does not support dType ${t}.`);return Rp(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}xw.className="RandomNormal",ge(xw);class Tw extends Ps{constructor(e){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Ge(`truncatedNormal does not support dType ${t}.`);return Ih(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}Tw.className="TruncatedNormal",ge(Tw);class Aw extends Ps{constructor(e){super();this.gain=e.gain!=null?e.gain:1}apply(e,t){return ee(()=>{if(e.length!==2||e[0]!==e[1])throw new j("Identity matrix initializer can only be used for 2D square matrices.");return X(this.gain,Kd(e[0]))})}getConfig(){return{gain:this.gain}}}Aw.className="Identity",ge(Aw);function Bz(e,t="channelsLast"){let n,s;if(Gt(t),e.length===2)n=e[0],s=e[1];else if([3,4,5].indexOf(e.length)!==-1){if(t==="channelsFirst"){const i=Yr(e,2);n=e[1]*i,s=e[0]*i}else if(t==="channelsLast"){const i=Yr(e,0,e.length-2);n=e[e.length-2]*i,s=e[e.length-1]*i}}else{const i=Yr(e);n=Math.sqrt(i),s=Math.sqrt(i)}return[n,s]}class Zn extends Ps{constructor(e){super();if(e.scale<0)throw new j(`scale must be a positive float. Got: ${e.scale}`);this.scale=e.scale==null?1:e.scale,this.mode=e.mode==null?"fanIn":e.mode,$z(this.mode),this.distribution=e.distribution==null?"normal":e.distribution,Uz(this.distribution),this.seed=e.seed}apply(e,t){const n=Bz(e),s=n[0],i=n[1];let o=this.scale;if(this.mode==="fanIn"?o/=Math.max(1,s):this.mode==="fanOut"?o/=Math.max(1,i):o/=Math.max(1,(s+i)/2),this.distribution==="normal"){const a=Math.sqrt(o);if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Ge(`${this.getClassName()} does not support dType ${t}.`);return Ih(e,0,a,t,this.seed)}else{const a=Math.sqrt(3*o);return $o(e,-a,a,t)}}getConfig(){return{scale:this.scale,mode:this.mode,distribution:this.distribution,seed:this.seed}}}Zn.className="VarianceScaling",ge(Zn);class Ep extends Zn{constructor(e){super({scale:1,mode:"fanAvg",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}Ep.className="GlorotUniform",ge(Ep);class Dp extends Zn{constructor(e){super({scale:1,mode:"fanAvg",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}Dp.className="GlorotNormal",ge(Dp);class kp extends Zn{constructor(e){super({scale:2,mode:"fanIn",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}kp.className="HeNormal",ge(kp);class Fp extends Zn{constructor(e){super({scale:2,mode:"fanIn",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}Fp.className="HeUniform",ge(Fp);class _p extends Zn{constructor(e){super({scale:1,mode:"fanIn",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}_p.className="LeCunNormal",ge(_p);class Wp extends Zn{constructor(e){super({scale:1,mode:"fanIn",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return Zn.className}}Wp.className="LeCunNormal",ge(Wp);class vw extends Ps{constructor(e){super();if(this.DEFAULT_GAIN=1,this.gain=e.gain==null?this.DEFAULT_GAIN:e.gain,this.seed=e.seed,this.seed!=null)throw new Ge("Random seed is not implemented for Orthogonal Initializer yet.")}apply(e,t){return ee(()=>{if(e.length<2)throw new Ge("Shape must be at least 2D.");e[0]*e[1]>2e3&&console.warn(`Orthogonal initializer is being called on a matrix with more than 2000 (${e[0]*e[1]}) elements: Slowness may result.`);const n=e[0]>e[1]?[e[1],e[0]]:e,s=Rp(n,0,1,"float32");let i=QA.gramSchmidt(s);return e[0]>e[1]&&(i=i.transpose()),X(this.gain,i)})}getConfig(){return{gain:this.gain,seed:this.seed}}}vw.className="Orthogonal",ge(vw);const Ov={constant:"Constant",glorotNormal:"GlorotNormal",glorotUniform:"GlorotUniform",heNormal:"HeNormal",heUniform:"HeUniform",identity:"Identity",leCunNormal:"LeCunNormal",leCunUniform:"LeCunUniform",ones:"Ones",orthogonal:"Orthogonal",randomNormal:"RandomNormal",randomUniform:"RandomUniform",truncatedNormal:"TruncatedNormal",varianceScaling:"VarianceScaling",zeros:"Zeros"};function Ev(e,t={}){return Dh(e,Ws.getMap().classNameMap,t,"initializer")}function Vt(e){return cw(e)}function Ft(e){if(typeof e=="string"){const t=e in Ov?Ov[e]:e;if(t==="GlorotNormal")return new Dp;if(t==="GlorotUniform")return new Ep;if(t==="HeNormal")return new kp;if(t==="HeUniform")return new Fp;if(t==="LeCunNormal")return new _p;if(t==="LeCunUniform")return new Wp;{const n={};return n.className=t,n.config={},Ev(n)}}else return e instanceof Ps?e:Ev(e)}function Mz(){return new Lw}function Pz(){return new Op}function zz(e){return new Sw(e)}function Gz(e){return new Iw(e)}function Vz(e){return new xw(e)}function Hz(e){return new Tw(e)}function Yz(e){return new Aw(e)}function qz(e){return new Zn(e)}function jz(e){return new Ep(e)}function Kz(e){return new Dp(e)}function Xz(e){return new kp(e)}function Jz(e){return new Fp(e)}function Zz(e){return new _p(e)}function Qz(e){return new Wp(e)}function e3(e){return new vw(e)}var t3=Object.freeze({__proto__:null,zeros:Mz,ones:Pz,constant:zz,randomUniform:Gz,randomNormal:Vz,truncatedNormal:Hz,identity:Yz,varianceScaling:qz,glorotUniform:jz,glorotNormal:Kz,heNormal:Xz,heUniform:Jz,leCunNormal:Zz,leCunUniform:Qz,orthogonal:e3});let n3=0;function Dv(){return n3++}const $p={};function Up(e=""){return e in $p||($p[e]=0),$p[e]+=1,e+$p[e].toString()}function Nw(e){return Array.isArray(e)&&Array.isArray(e[0])}function Bp(e){return e.length===0?[]:Array.isArray(e[0])?e:[e]}function Xe(e){let t;if(Array.isArray(e)){if(e.length!==1)throw new j(`Expected Tensor length to be 1; got ${e.length}`);t=e[0]}else t=e;return t}function It(e){if(Array.isArray(e)&&Array.isArray(e[0])){if(e.length===1)return e=e,e[0];throw new j(`Expected exactly 1 Shape; got ${e.length}`)}else return e}function Mp(e){let t=0;for(const n of e)n.shape.length===0?t+=1:t+=n.shape.reduce((s,i)=>s*i);return t}const kv="Variable";class ci{constructor(e,t="float32",n=kv,s=!0,i=null){this.dtype=t==null?"float32":t,this.shape=e.shape,this.id=Dv(),n=n==null?kv:n,this.originalName=xv(n),this.name=Tv(this.originalName),this.trainable_=s,this.constraint=i,this.val=xA(e,this.trainable_,this.name,this.dtype)}read(){return this.assertNotDisposed(),this.val}write(e){return this.assertNotDisposed(),s3(this.val,e),this.val.id!==e.id&&(this.val.assign(e),this.constraint!=null&&this.val.assign(this.constraint.apply(this.val))),this}dispose(){this.assertNotDisposed(),this.val.dispose()}assertNotDisposed(){if(this.val.isDisposed)throw new Error(`LayersVariable ${this.name} is already disposed.`)}get trainable(){return this.trainable_}set trainable(e){this.trainable_=e,this.val.trainable=e}}function s3(e,t){if(e.shape.toString()!==t.shape.toString())throw new Error("Shape mismatch: "+JSON.stringify(e.shape)+" vs. "+JSON.stringify(t.shape))}function QQ(e,t,n,s){return new ci(e,t,n,!0,s)}function eee(e,t,n){return new ci(ct(e),t,n)}function tee(e,t,n){return new ci(et(e),t,n)}function nee(e,t,n){const s=si(e);return new ci(s,t,n)}function see(e,t,n){const s=Dn(e);return new ci(s,t,n)}function iee(e,t,n){return new ci(Kd(e),t,n)}function ree(e,t,n,s,i,o="randomUniform"){return new ci($o(e,t,n,s),s,o)}function oee(e,t=0,n=1,s,i,o="truncatedNormal"){if(s=s||"float32",s!=="float32"&&s!=="int32")throw new Ge(`randomNormal does not support dType ${s}.`);return new ci(Ih(e,t,n,s,i),s,o)}function aee(e,t=0,n=1,s,i,o="randomNormal"){if(s=s||"float32",s!=="float32"&&s!=="int32")throw new Ge(`randomNormalVariable does not support dType ${s}.`);return new ci(Ob(e,t,n,s,i),s,o)}function cee(e,t){return e.write(t)}function lee(e,t){return e.write(be(e.read(),t))}function hee(e,t){return e.write(Ce(e.read(),t))}function Cw(e){return e.map(t=>t.read())}function Rw(e){e.forEach(t=>{const n=t[0];n.write(t[1])})}function uee(e,t){const n=t.map(i=>i.read()),s=Ab(e,n);return t.map(i=>s.grads[i.name])}class fn{constructor(e){this.dtype=e.dtype,this.shape=e.shape,e.shape!=null?this.ndim=e.shape.length:this.ndim=e.ndim,this.maxNDim=e.maxNDim,this.minNDim=e.minNDim,this.axes=e.axes||{}}}class li{constructor(e,t,n,s,i,o,a){this.dtype=e,this.shape=t,this.sourceLayer=n,this.inputs=s,this.callArgs=i,this.outputTensorIndex=a,this.id=Dv(),o!=null&&(this.originalName=xv(o),this.name=Tv(this.originalName)),this.rank=t.length}}let i3=0;class Pp{constructor(e,t){this.callArgs=t,this.id=i3++,this.outboundLayer=e.outboundLayer,this.inboundLayers=e.inboundLayers,this.nodeIndices=e.nodeIndices,this.tensorIndices=e.tensorIndices,this.inputTensors=e.inputTensors,this.outputTensors=e.outputTensors,this.inputMasks=e.inputMasks,this.outputMasks=e.outputMasks,this.inputShapes=e.inputShapes,this.outputShapes=e.outputShapes;for(const n of e.inboundLayers)n!=null&&n.outboundNodes.push(this);e.outboundLayer.inboundNodes.push(this)}getConfig(){const e=[];for(const t of this.inboundLayers)t!=null?e.push(t.name):e.push(null);return{outboundLayer:this.outboundLayer?this.outboundLayer.name:null,inboundLayers:e,nodeIndices:this.nodeIndices,tensorIndices:this.tensorIndices}}}let r3=0;class lt extends No{constructor(e={}){super();this._callHook=null,this._addedWeightNames=[],this._stateful=!1,this.id=r3++,this.activityRegularizer=null,this.inputSpec=null,this.supportsMasking=!1,this._trainableWeights=[],this._nonTrainableWeights=[],this._losses=[],this._updates=[],this._built=!1,this.inboundNodes=[],this.outboundNodes=[];let t=e.name;if(!t){const n=this.getClassName();t=or(n)+"_"+Up(n)}if(this.name=t,this.trainable_=e.trainable==null?!0:e.trainable,e.inputShape!=null||e.batchInputShape!=null){let n;if(e.batchInputShape!=null)n=e.batchInputShape;else if(e.inputShape!=null){let i=null;e.batchSize!=null&&(i=e.batchSize),n=[i].concat(e.inputShape)}this.batchInputShape=n;let s=e.dtype;s==null&&(s=e.inputDType),s==null&&(s="float32"),this.dtype=s}e.weights!=null?this.initialWeights=e.weights:this.initialWeights=null,this._refCount=null,this.fastWeightInitDuringBuild=!1}static nodeKey(e,t){return e.name+"_ib-"+t.toString()}getNodeAtIndex(e,t){if(this.inboundNodes.length===0)throw new oi(`The layer has never been called and thus has no defined ${t}.`);if(this.inboundNodes.length<=e)throw new j(`Asked to get ${t} at node ${e}, but the layer has only ${this.inboundNodes.length} inbound nodes.`);return this.inboundNodes[e]}getInputAt(e){return Jn(this.getNodeAtIndex(e,"input").inputTensors)}getOutputAt(e){return Jn(this.getNodeAtIndex(e,"output").outputTensors)}get input(){if(this.inboundNodes.length>1)throw new rr(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer input" is ill-defined. Use \`getInputAt(nodeIndex)\` instead.`);if(this.inboundNodes.length===0)throw new rr(`Layer ${this.name} is not connected, no input to return.`);return Jn(this.getNodeAtIndex(0,"input").inputTensors)}get output(){if(this.inboundNodes.length===0)throw new rr(`Layer ${this.name} has no inbound nodes.`);if(this.inboundNodes.length>1)throw new rr(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer output" is ill-defined. Use \`getOutputAt(nodeIndex)\` instead.`);return Jn(this.getNodeAtIndex(0,"output").outputTensors)}get losses(){return this._losses}calculateLosses(){return this.losses.map(e=>e())}get updates(){return this._updates}get built(){return this._built}set built(e){this._built=e}get trainable(){return this.trainable_}set trainable(e){this._trainableWeights.forEach(t=>t.trainable=e),this.trainable_=e}get trainableWeights(){return this.trainable_?this._trainableWeights.filter(e=>e.trainable):[]}set trainableWeights(e){this._trainableWeights=e}get nonTrainableWeights(){return this.trainable?this._trainableWeights.filter(e=>!e.trainable).concat(this._nonTrainableWeights):this._trainableWeights.concat(this._nonTrainableWeights)}set nonTrainableWeights(e){this._nonTrainableWeights=e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}get stateful(){return this._stateful}resetStates(){if(!this.stateful)throw new Error("Cannot call the resetStates() method of a non-stateful Layer object.")}assertInputCompatibility(e){if(e=Nt(e),this.inputSpec==null||this.inputSpec.length===0)return;const t=Nt(this.inputSpec);if(e.length!==t.length)throw new j(`Layer ${this.name} expects ${t.length} inputs, but it received ${e.length} input tensors. Input received: ${e}`);for(let n=0;ni.maxNDim)throw new j(`Input ${n} is incompatible with layer ${this.name}: expected max_ndim=${i.maxNDim}, found ndim=${o}`);if(i.minNDim!=null&&o=0?a[u]:a[a.length+u];if(p!=null&&[p,null].indexOf(m)===-1)throw new j(`Input ${n} is incompatible with layer ${this.name}: expected axis ${u} of input shape to have value ${p} but got shape ${a}.`)}}if(i.shape!=null)for(let a=0;a{if(!this.built){this.assertInputCompatibility(e);const o=[];for(const a of Nt(e))o.push(a.shape);this.build(Jn(o)),this.built=!0,this.initialWeights&&this.setWeights(this.initialWeights),this._refCount===null&&i&&(this._refCount=1)}if(this.assertInputCompatibility(e),i){let o=this.call(e,t);const a=Nt(o),c=[];for(let u of a)n.indexOf(u)!==-1&&(u=u.clone()),c.push(u);if(o=Jn(c),this.activityRegularizer!=null)throw new Ge("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return o}else{const o=o3(e),a=this.computeOutputShape(o);let c;const u=a3(e);if(this.warnOnIncompatibleInputShape(Array.isArray(e)?o[0]:o),a!=null&&a.length>0&&Array.isArray(a[0])?c=a.map((p,m)=>new li(u,p,this,Nt(e),t,this.name,m)):c=new li(u,a,this,Nt(e),t,this.name),this.addInboundNode(e,c,null,null,o,a,t),this._refCount++,this.activityRegularizer!=null)throw new Ge("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return c}})}warnOnIncompatibleInputShape(e){if(this.batchInputShape==null)return;if(e.length!==this.batchInputShape.length)console.warn(`The rank of the input tensor provided (shape: ${JSON.stringify(e)}) does not match that of the batchInputShape (${JSON.stringify(this.batchInputShape)}) of the layer ${this.name}`);else{let t=!1;this.batchInputShape.forEach((n,s)=>{n!=null&&e[s]!=null&&e[s]!==n&&(t=!0)}),t&&console.warn(`The shape of the input tensor (${JSON.stringify(e)}) does not match the expectation of layer ${this.name}: ${JSON.stringify(this.batchInputShape)}`)}}get outputShape(){if(this.inboundNodes==null||this.inboundNodes.length===0)throw new rr(`The layer ${this.name} has never been called and thus has no defined output shape.`);const e=[];for(const t of this.inboundNodes){const n=JSON.stringify(t.outputShapes);e.indexOf(n)===-1&&e.push(n)}if(e.length===1){const t=this.inboundNodes[0].outputShapes;return Array.isArray(t)&&Array.isArray(t[0])&&t.length===1?t[0]:t}else throw new rr(`The layer ${this.name} has multiple inbound nodes with different output shapes. Hence the notion of "output shape" is ill-defined for the layer.`)}countParams(){if(!this.built)throw new oi(`You tried to call countParams() on ${this.name}, but the layer is not built yet. Build it first by calling build(batchInputShape).`);return Mp(this.weights)}build(e){this.built=!0}getWeights(e=!1){return Cw(e?this.trainableWeights:this.weights)}setWeights(e){ee(()=>{const t=this.weights;if(t.length!==e.length)throw new j(`You called setWeights(weights) on layer "${this.name}" with a weight list of length ${e.length}, but the layer was expecting ${t.length} weights. Provided weights: ${e}...`);if(t.length===0)return;const n=[],s=Cw(t);for(let i=0;ii.apply(u.read())),o==null&&(o=!0),o?this._trainableWeights.push(u):this._nonTrainableWeights.push(u),u}setFastWeightInitDuringBuild(e){this.fastWeightInitDuringBuild=e}addLoss(e){if(e==null||Array.isArray(e)&&e.length===0)return;e=Nt(e),this._losses!==void 0&&this._losses!==null&&this.losses.push(...e)}computeOutputShape(e){return e}computeMask(e,t){if(!this.supportsMasking){if(t!=null)if(Array.isArray(t))t.forEach(n=>{if(n!=null)throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`)});else throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`);return null}return t}addInboundNode(e,t,n,s,i,o,a=null){const c=Nt(e);t=Nt(t),n=Nt(n),s=Nt(s),i=Bp(i),o=Bp(o);const u=[],p=[],m=[];for(const y of c)u.push(y.sourceLayer),p.push(y.nodeIndex),m.push(y.tensorIndex);new Pp({outboundLayer:this,inboundLayers:u,nodeIndices:p,tensorIndices:m,inputTensors:c,outputTensors:t,inputMasks:n,outputMasks:s,inputShapes:i,outputShapes:o},a);for(let y=0;ye.dispose()),this.weights.length}assertNotDisposed(){if(this._refCount===0)throw new Error(`Layer '${this.name}' is already disposed.`)}dispose(){if(!this.built)throw new Error(`Cannot dispose Layer ${this.name} because it has not been built yet.`);if(this._refCount===null)throw new Error(`Cannot dispose Layer ${this.name} because it has not been used yet.`);this.assertNotDisposed();let e=0;return--this._refCount===0&&(e=this.disposeWeights()),{refCountAfterDispose:this._refCount,numDisposedVariables:e}}}function o3(e){e=Nt(e);const t=[];for(const n of e)t.push(n.shape);return Jn(t)}function a3(e){return"float32"}function Fv(e,t,n){if((t==null||n!=null&&n>0)&&(t=e.sourceLayer,n=e.nodeIndex),t.inboundNodes.length===0)return[e];{const s=t.inboundNodes[n];if(s.inboundLayers.length===0)return s.inputTensors;{const i=[];for(let o=0;o0){const i=await Promise.all(t);for(let o=0;obe(this.totals[s],X(i,n)));this.totals[s]=a,o!=null&&o.dispose()}}}async onEpochEnd(e,t){if(t!=null)for(const n of this.params.metrics){if(this.totals[n]==null)continue;typeof this.totals[n]=="number"?t[n]=this.totals[n]/this.seen:ee(()=>{const s=X(_e(1,this.seen),this.totals[n]);t[n]=s,this.totals[n].dispose(),Rn(t[n])})}}}class Bv extends ic{async onTrainBegin(e){this.epoch=[],this.history={}}async onEpochEnd(e,t){t==null&&(t={}),this.epoch.push(e);for(const n in t)this.history[n]==null&&(this.history[n]=[]),this.history[n].push(t[n])}async syncData(){const e=[],t=[],n=[];for(const i in this.history){const o=this.history[i];for(let a=0;anew Mv(s,t))}class zs{constructor(){}static registerCallbackConstructor(e,t){k(e>=0&&Number.isInteger(e),()=>`Verbosity level is expected to be an integer >= 0, but got ${e}`),zs.checkForDuplicate(t),zs.constructors[e]==null&&(zs.constructors[e]=[]),zs.constructors[e].push(t)}static checkForDuplicate(e){for(const t in zs.constructors){const n=zs.constructors[+t];n.forEach(s=>{if(s===e)throw new j("Duplicate callback constructor.")})}}static clear(){zs.constructors={}}static createCallbacks(e){const t=[];for(const n in zs.constructors){const s=+n;e>=s&&t.push(...zs.constructors[s])}return t.map(n=>new n)}}zs.constructors={};function zv(e,t,n,s,i,o,a,c,u){const p=new Bv,m=[new l3,...zs.createCallbacks(t)];e!=null&&m.push(...e),m.push(p);const y=new Uv(m);return y.setParams({epochs:n,initialEpoch:s,samples:i,steps:o,batchSize:a,verbose:t,doValidation:c,metrics:u}),{callbackList:y,history:p}}function hi(e,t={},n=!1){return Dh(e,Ws.getMap().classNameMap,t,"layer",n)}function zp(e,t){return ee(()=>{e.dtype!=="float32"&&(e=e.asType("float32"));const n=Ue($h(e),t,!0),s=hh(n.shape,cn()),i=Sn(Us(n,s));return _e(e,i)})}function ar(e,t){return ee(()=>zt($h(Ce(t,e)),-1))}function rc(e,t){return ee(()=>zt(rn(Ce(t,e)),-1))}function Kr(e,t){return ee(()=>{const n=Ce(e,t),s=jn(rn(e),cn(),Number.MAX_VALUE),i=rn(_e(n,s));return X(100,zt(i,-1))})}function Ow(e,t){return ee(()=>{const n=jn(t,cn(),Number.MAX_VALUE),s=is(be(1,n)),i=jn(e,cn(),Number.MAX_VALUE),o=is(be(1,i));return zt($h(Ce(s,o)),-1)})}function h3(e,t){return ee(()=>{const n=Us(0,Ce(1,X(e,t)));return zt($h(n),-1)})}function u3(e,t){return ee(()=>{const n=Us(0,Ce(1,X(e,t)));return zt(n,-1)})}function d3(e,t){return ee(()=>{const n=Ue(X(e,t),-1),s=Xn(X(Ce(1,e),t),-1);return Us(0,be(1,Ce(s,n)))})}function p3(e,t){return ee(()=>{const n=Math.log(2),s=Ce(t,e),i=Ce(be(s,Va(X(-2,s))),n);return zt(i,-1)})}function Bh(e,t,n=!1){return ee(()=>{if(n)t=Uo(t);else{const s=Ue(t,t.shape.length-1,!0);t=_e(t,s)}return t=jn(t,cn(),1-cn()),Pt(Ue(X(e.toFloat(),is(t)),t.shape.length-1))})}function Gp(e,t,n=!1){return ee(()=>{const s=Pa(Oz(e)).toInt();t=jn(t,cn(),1-cn());const i=t.shape,o=vo(s,i[i.length-1]).reshape(i);return Bh(o,t,n)})}function m3(e,t){if(!ot(e.shape,t.shape))throw new j(`logits and labels must have the same shape, but got shapes ${JSON.stringify(e.shape)} and ${JSON.stringify(t.shape)}`);return ee(()=>{const n=t.relu(),s=t.abs().neg();return n.sub(t.mul(e)).add(s.exp().log1p())})}function Vp(e,t){return ee(()=>{let n;return n=jn(t,cn(),1-cn()),n=is(_e(n,Ce(1,n))),zt(m3(e,n),-1)})}function Ew(e,t){return ee(()=>{const n=jn(e,cn(),1),s=jn(t,cn(),1);return Ue(X(e,is(_e(n,s))),-1)})}function f3(e,t){return ee(()=>{const n=is(be(cn(),t));return zt(Ce(t,X(e,n)),-1)})}function Hp(e,t){return ee(()=>{const n=zp(e,-1),s=zp(t,-1),i=X(n,s);return Pt(Ue(i,-1))})}const dee=ar,pee=ar,mee=rc,fee=rc,gee=Kr,yee=Kr,bee=Ow,wee=Ow,Lee=Ew,See=Ew,Iee=Hp,Yp={meanSquaredError:ar,meanAbsoluteError:rc,meanAbsolutePercentageError:Kr,meanSquaredLogarithmicError:Ow,squaredHinge:h3,hinge:u3,categoricalHinge:d3,logcosh:p3,categoricalCrossentropy:Bh,sparseCategoricalCrossentropy:Gp,binaryCrossentropy:Vp,kullbackLeiblerDivergence:Ew,poisson:f3,cosineProximity:Hp};function Dw(e){if(typeof e=="string"){if(e in Yp)return Yp[e];let t=`Unknown loss ${e}`;throw e.toLowerCase().includes("softmaxcrossentropy")&&(t=`Unknown loss ${e}. Use "categoricalCrossentropy" as the string name for tf.losses.softmaxCrossEntropy`),new j(t)}else return e}function kw(e,t){return ee(()=>{const n=X(.5,Dn(t)),s=_h(Ts(t,n),e.dtype);return zt(ni(e,s),-1)})}function Fw(e,t){return ee(()=>_h(ni(nh(e,-1),nh(t,-1)),"float32"))}function Gv(e,t){return ee(()=>Bs(e.equal(1),t.equal(1)).sum().cast("float32"))}function g3(e,t){return ee(()=>Bs(e.equal(1),t.equal(0)).sum().cast("float32"))}function y3(e,t){return ee(()=>Bs(e.equal(0),t.equal(1)).sum().cast("float32"))}function Vv(e,t){return ee(()=>{const n=Gv(e,t),s=y3(e,t),i=n.add(s);return $n(Ts(i,0),n.div(i),0).cast("float32")})}function b3(e,t){return ee(()=>{const n=Gv(e,t),s=g3(e,t),i=n.add(s);return $n(Ts(i,0),n.div(i),0).cast("float32")})}function Hv(e,t){return Vp(e,t)}function Yv(e,t){return e.rank===t.rank&&(e=e.squeeze([e.rank-1])),t=t.argMax(-1),t.dtype!==e.dtype&&(t=t.asType(e.dtype)),ni(e,t).asType("float32")}function xee(e,t){throw new Ge}function Tee(e,t){throw new Ge}const w3=ar,L3=ar,S3=rc,I3=rc,x3=Kr,T3=Kr,_w=Bh,A3=Hp,qv=Gp,qp={binaryAccuracy:kw,categoricalAccuracy:Fw,precision:Vv,categoricalCrossentropy:_w,sparseCategoricalCrossentropy:qv,mse:w3,MSE:L3,mae:S3,MAE:I3,mape:x3,MAPE:T3,cosine:A3};function v3(e){if(typeof e=="string"&&e in qp)return qp[e];if(typeof e!="string"&&e!=null)return e;throw new j(`Unknown metric ${e}`)}function jp(e){if(vs(e!==null,`Unknown LossOrMetricFn ${e}`),typeof e=="string")return e;{let t;for(const n of Object.keys(Yp))if(Yp[n]===e){t=n;break}if(t!==void 0)return t;for(const n of Object.keys(qp))if(qp[n]===e){t=n;break}return t!==void 0?t:e.name}}function N3(e){const t={Adagrad:()=>Mo.adagrad(.01),Adadelta:()=>Mo.adadelta(1,.95,cn()),Adam:()=>Mo.adam(.001,.9,.999,cn()),Adamax:()=>Mo.adamax(.002,.9,.999,cn(),0),RMSProp:()=>Mo.rmsprop(.001,.9,0,cn()),SGD:()=>Mo.sgd(.01)};if(t.adagrad=t.Adagrad,t.adadelta=t.Adadelta,t.adam=t.Adam,t.adamax=t.Adamax,t.rmsprop=t.RMSProp,t.sgd=t.SGD,e in t)return t[e]();throw new j(`Unknown Optimizer ${e}`)}const jv=1*1024*1024;function Kv(e,t,n=!1){if(e==null||typeof e!="object"||Object.getPrototypeOf(e)!==Object.prototype||!Ww(e))throw new Error("User-defined metadata is expected to be a JSON object, but is not.");if(n){const s=JSON.stringify(e);s.length>jv&&console.warn(`User-defined metadata of model "${t}" is too large in size (length=${s.length} when serialized). It is not recommended to store such large objects in user-defined metadata. Please make sure its serialized length is <= ${jv}.`)}}function Ww(e){if(e===null)return!0;if(typeof e=="object")if(Object.getPrototypeOf(e)===Object.prototype){const t=Object.keys(e);for(const n of t){if(typeof n!="string")return!1;if(!Ww(e[n]))return!1}return!0}else if(Array.isArray(e)){for(const t of e)if(!Ww(t))return!1;return!0}else return!1;else{const t=typeof e;return t==="string"||t==="number"||t==="boolean"}}function C3(e,t,n,s=console.log){const i=O3(e),o=["Layer (type)","Output shape","Param #"];i?(t=t||65,n=n||[.45,.85,1]):(t=t||98,n=n||[.33,.55,.67,1]),n[n.length-1]<=1&&(n=n.map(m=>Math.floor(t*m)));let a;if(!i){o.push("Receives inputs"),a=[];for(const m in e.nodesByDepth)a.push(...e.nodesByDepth[m])}s("_".repeat(t)),Kp(o,n,s),s("=".repeat(t));const c=e.layers;for(let m=0;m1||i.length===1&&i[0].inboundLayers.length>1){t=!1;break}s.push(...i)}if(t)for(const i of e.layers){let o=!1;for(const a of i.inboundNodes)if(s.indexOf(a)!==-1)if(o){t=!1;break}else o=!0;if(!t)break}return t}function Kp(e,t,n=console.log){let s="";for(let i=0;i0&&(s=s.slice(0,s.length-1)+" "),s+=e[i],s=s.slice(0,t[i]),s+=" ".repeat(t[i]-s.length);n(s)}function E3(e,t,n){let s;try{s=JSON.stringify(e.outputShape)}catch(c){s="multiple"}const i=e.name,o=e.getClassName(),a=[`${i} (${o})`,s,e.countParams().toString()];Kp(a,t,n)}function D3(e,t,n,s){let i;try{i=JSON.stringify(e.outputShape)}catch(m){i="multiple"}const o=[];for(const m of e.inboundNodes){if(n!=null&&n.length>0&&n.indexOf(m)===-1)continue;for(let y=0;yI.name),u=[],p=t.names();for(const I of c)p.indexOf(I)!==-1?u.push(t.getValue(I)):u.push(null);s!=null&&(s.maxNumTensors=-Infinity,s.minNumTensors=Infinity);const m=c.join(",")+"|"+t.names().join(",");let y,b;if(Uw[m]==null){const I=F3(a,t);y=I.sorted,b=I.recipientCounts,Uw[m]=y,Jv[m]=b}y=Uw[m],b={},i||Object.assign(b,Jv[m]);const w=new Ho(t);for(let I=0;Is.maxNumTensors&&(s.maxNumTensors=q),q0,()=>"Expected at least one fetch, got none");let n=[],s={};if(e.length===1){const i=Zv(e[0],t);n=i.sorted,s=i.recipientMap}else{const i=new Set;for(const o of e){const{sorted:a,recipientMap:c}=Zv(o,t);for(const u of a)i.has(u.name)||(n.push(u),i.add(u.name));for(const u in c)s[u]==null&&(s[u]=new Set),c[u].forEach(p=>s[u].add(p))}}return{sorted:n,recipientCounts:_3(s)}}function _3(e){const t={};for(const n in e)t[n]=e[n].size;return t}function Zv(e,t){const n=new Set,s=[],i={};for(const c of t.names())n.add(c);const o=[],a=[];for(o.push(e);o.length>0;){const c=o[o.length-1];if(n.has(c.name)){o.pop();continue}const u=a[a.length-1]===o.length-1;if(c.inputs.length===0||u)o.pop(),s.push(c),n.add(c.name),u&&a.pop();else{a.push(o.length-1);for(const p of c.inputs){if(i[p.name]==null&&(i[p.name]=new Set),i[p.name].add(c.name),n.has(p.name))continue;o.push(p)}}}return{sorted:s,recipientMap:i}}function W3(e){let t;if(e.sourceLayer.inboundNodes.length===1)t=e.sourceLayer.output;else{let n=null;for(let s=0;sN.name)}`);Hr(this.outputs).length!==this.outputs.length&&console.warn(`The list of outputs passed to the model is redundant. All outputs should only appear once. Found: ${this.outputs.map(N=>N.name)}`),this.inputLayers=[],this.inputLayersNodeIndices=[],this.inputLayersTensorIndices=[],this.outputLayers=[],this.outputLayersNodeIndices=[],this.outputLayersTensorIndices=[],this.layers=[],this.internalContainerRefs=[];for(const N of this.outputs){const E=N.sourceLayer,D=N.nodeIndex,F=N.tensorIndex;this.outputLayers.push(E),this.outputLayersNodeIndices.push(D),this.outputLayersTensorIndices.push(F)}for(const N of this.inputs){const E=N.sourceLayer,D=N.nodeIndex,F=N.tensorIndex;vs(D===0,"input layer has >1 nodes"),vs(F===0,"input layer has >1 tensors"),this.inputLayers.push(E),this.inputLayersNodeIndices.push(D),this.inputLayersTensorIndices.push(F)}this.inputNames=[],this.outputNames=[],this.feedInputShapes=[],this.feedInputNames=[],this.feedOutputNames=[];for(let N=0;NN.shape),this.internalOutputShapes=this.outputs.map(N=>N.shape);const t={},n={},s={},i={},o={},a=[],c=(N,E,D,F,_,B)=>{(F==null||_==null||B==null)&&(F=N.sourceLayer,_=N.nodeIndex,B=N.tensorIndex);const U=F.inboundNodes[_];if(D.indexOf(U)!==-1)throw new oi(`The tensor ${N.name} at layer "${F.name}" is part of a cycle.`);if(E.indexOf(U)!==-1)return;this.containerNodes.add(Ui.nodeKey(F,_)),F.id in o||(o[F.id]=Object.keys(o).length),D.indexOf(U)===-1&&D.push(U);const Y=U.inboundLayers.length;for(let q=0;q=0;)D.splice(D.indexOf(U),1);a.push(U)},u=[],p=[];for(const N of this.outputs)c(N,u,p);const m=a.slice().reverse();for(const N of m){n[N.id]=N,N.id in t||(t[N.id]=0);let E=t[N.id];const D=s[N.outboundLayer.id]==null?0:s[N.outboundLayer.id];E=Math.max(E,D),s[N.outboundLayer.id]=E,i[N.outboundLayer.id]=N.outboundLayer,t[N.id]=E;for(let F=0;FparseInt(N,10)).sort(vp);this.layers=[];for(const N of w){const E=b[N];E.sort((D,F)=>{const _=o[D.id],B=o[F.id];return _B?1:0});for(const D of E)D instanceof Ui&&this.internalContainerRefs.push(D),this.layers.push(D)}this.layersByDepth=b,w=Object.keys(y).map(N=>parseInt(N,10)).sort(vp);const I=this.inputs.slice(),T=[];for(const N of w)for(const E of y[N]){const D=E.outboundLayer;if(D!=null){for(const F of E.inputTensors)if(I.indexOf(F)===-1)throw new oi(`Graph disconnected: cannot obtain value for tensor ${F} at layer "${D.name}". The following previous layers were accessed without issue: ${T}`);for(const F of E.outputTensors)I.push(F);T.push(D.name)}}this.nodesByDepth=y;const v=this.layers.map(N=>N.name);for(const N of v){const E=v.filter(D=>D===N).length;if(E!==1)throw new oi(`The name "${N}" is used ${E} times in the model. All layer names should be unique. Layer names: `+JSON.stringify(v))}this.outboundNodes=[],this.inboundNodes=[],new Pp({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:this.inputs.map(N=>null),outputMasks:this.outputs.map(N=>null),inputShapes:this.inputs.map(N=>N.shape),outputShapes:this.outputs.map(N=>N.shape)}),this.built=!0,this._refCount=1}assertNotDisposed(){if(this._refCount===0)throw new Error(`Container '${this.name}' is already disposed.`)}dispose(){this.assertNotDisposed();const e={refCountAfterDispose:null,numDisposedVariables:0};if(--this._refCount===0){for(const t of this.layers)e.numDisposedVariables+=t.dispose().numDisposedVariables;for(const t of this.internalContainerRefs)e.numDisposedVariables+=t.dispose().numDisposedVariables}return e.refCountAfterDispose=this._refCount,e}get trainable(){return this.trainable_}set trainable(e){this.layers.forEach(t=>{t._trainableWeights.forEach(n=>n.trainable=e)}),this.trainable_=e}get trainableWeights(){if(this._trainableWeights.length>0)throw new j("Container instance unexpectedly contains _trainableWeights.The trainable weights of a Container are a union of the trainable weights of its consituent Layers. Its own _trainableWeights must remain an empty Array.");if(!this.trainable)return[];let e=[];for(const t of this.layers)e=e.concat(t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.layers)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const n of this.layers)t.push(...n.trainableWeights);return t.concat(e)}return e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}loadWeights(e,t=!0){const n={};let s=0;for(const o of this.layers)for(const a of o.weights){if(n[a.originalName]!=null)throw new j(`Duplicate weight name: ${a.originalName}`);n[a.originalName]=a,s++}const i=[];for(const o in e){let a=o;if(n[o]==null){const c=o.split("/"),u=c.slice(0,-2).concat([c[c.length-1]]);a=u.join("/")}if(n[a]!=null)i.push([n[a],e[o]]);else if(t)throw new j(`Provided weight data has no target variable: ${o}`);delete n[a]}if(t){const o=[];for(const a in n)o.push(a);if(o.length>0)throw new j(`${o.length} of ${s} weights are not set: ${o}`)}Rw(i)}updatedConfig(){const e=this.getConfig(),t={};return t.className=this.getClassName(),t.config=e,t.kerasVersion=`tfjs-layers ${Xp}`,t.backend="TensorFlow.js",t}toJSON(e,t=!0){const n=$w(this.updatedConfig());return t?JSON.stringify(n):n}call(e,t){return ee(()=>{e=Nt(e);const n=new Ho;for(let s=0;s{e=Nt(e);let n;return t==null?n=Po(null,e.length):n=Nt(t),this.runInternalGraph(e,n)[1]})}computeOutputShape(e){const t=Bp(e);if(t.length!==this.inputLayers.length)throw new j(`Invalid inputShape argument ${e}: model has ${this.inputLayers.length} tensor inputs.`);const n={};for(let a=0;aparseInt(a,10)).sort(vp);if(s.length>1)for(const a of s){const c=this.nodesByDepth[a];for(const u of c){const p=u.outboundLayer;if(this.inputLayers.map(I=>I.id).indexOf(p.id)!==-1)continue;const m=[];for(let I=0;IparseInt(c,10)).sort(vp);for(const c of s){const u=this.nodesByDepth[c];for(const p of u){const m=p.outboundLayer,y=p.inputTensors,b=p.outputTensors,w=new Array;for(const I of y)I.id in n&&w.push(n[I.id]);if(w.length===y.length){let I={},T,v,N,E;if(p.callArgs!=null&&(I=p.callArgs),w.length===1){const[D,F]=w[0];I.mask==null&&(I.mask=F),N=Nt(m.call(D,I)),E=Nt(m.computeMask(D,F)),T=[D],v=[F]}else T=w.map(D=>D[0]),v=w.map(D=>D[1]),I.mask==null&&(I.mask=v),N=Nt(m.call(T,I)),E=Nt(m.computeMask(T,v));if(m.activityRegularizer)throw new Ge("LayersModel invocation with concrete Tensor value(s) in the presence of activity regularizer(s) is not supported yet.");for(let D=0;D{const e=[];for(const t of this.layers)for(let n=0;n0){const I=[];for(let T=0;T0&&T.apply(Jn(N),E)}function u(T){const v=T.name,N=hi(T,t.customObjects!=null?t.customObjects:{});N.setFastWeightInitDuringBuild(s),i[v]=N;const E=T.inboundNodes;E.forEach(D=>{if(!(D instanceof Array))throw new j(`Corrupted configuration, expected array for nodeData: ${D}`);a(N,D)})}const p=t.name,m=t.layers;for(const T of m)u(T);for(;!pz(o);)for(const T of m){const v=i[T.name];if(v.name in o){const N=o[v.name];delete o[v.name];for(const E of N)c(v,E)}}const y=[],b=[],w=t.inputLayers;for(const T of w){const v=T[0],N=T[1],E=T[2];vs(v in i);const D=i[v],F=D.inboundNodes[N].outputTensors;y.push(F[E])}const I=t.outputLayers;for(const T of I){const v=T[0],N=T[1],E=T[2];vs(v in i);const D=i[v],F=D.inboundNodes[N].outputTensors;b.push(F[E])}return new e({inputs:y,outputs:b,name:p})}get stateful(){if(this._stateful)throw new j("Container instance unexpectedly has _stateful = true. The statefulness of a Container is determined by the Layers it contains. Its _stateful property must remain the default false.");for(const e of this.layers)if(e.stateful)return!0;return!1}resetStates(){ee(()=>{this.layers.forEach(e=>{e.stateful&&e.resetStates()})})}}function Qv(e,t,n){const s=t.length;if(e==null||Array.isArray(e)&&e.length===0)return t.map(i=>null);if(s===1)return Array.isArray(e)&&e.length===1?e:typeof e=="object"&&t[0]in e?[e[t[0]]]:[e];if(Array.isArray(e)){if(e.length!==s)throw new Error(`Provided ${n} is an array of ${e.length} element(s), but the model has ${s} outputs. Make sure a set of weights is provided for each model output.`);return e}else if(typeof e=="object"&&Object.keys(e).length>0&&typeof e[Object.keys(e)[0]]=="object"){const i=[];return t.forEach(o=>{o in e?i.push(e[o]):i.push(null)}),i}else throw new Error(`The model has multiple (${s}) outputs, so ${n} must be either an array with ${s} elements or an object with ${t} keys. Provided ${n} not understood: ${JSON.stringify(e)}`)}function eN(e,t){return Qv(e,t,"classWeight")}function Aee(e,t){return Qv(e,t,"sampleWeight")}async function tN(e,t,n,s){if(t!=null||s!=null)throw new Error("Support sampleWeight is not implemented yet");if(n!=null){const i=ee(()=>{if(e.shape.length===1)return e.clone();if(e.shape.length===2)if(e.shape[1]>1){const c=1;return e.argMax(c)}else{if(e.shape[1]===1)return e.reshape([e.shape[0]]);throw new Error(`Encountered unexpected last-dimension size (${e.shape[1]}) during handling of class weights. The size is expected to be >= 1.`)}else throw new Error(`Unexpected rank of target (y) tensor (${e.rank}) during handling of class weights. The rank is expected to be 1 or 2.`)}),o=Array.from(await i.data());qe(i);const a=[];return o.forEach(c=>{if(n[c]==null)throw new Error(`classWeight must contain all classes in the training data. The class ${c} exists in the data but not in classWeight`);a.push(n[c])}),rs(a,"float32")}else return null}function $3(e,t){return X(e,t)}const U3=32;function nN(e,t){let n,s;const i=t;n=i.xs,s=i.ys,k(n!=null&&s!=null,()=>`A Dataset iterator for fitDataset() is expected to generate objects of the form \`{xs: xVal, ys: yVal}\`, where the two values may be \`tf.Tensor\`, an array of Tensors, or a map of string to Tensor. The provided Dataset instead generates ${t}`);const o=sN("input",e.inputNames,n),a=sN("output",e.outputNames,s),c=o[0].shape[0];k(o.length===e.inputs.length,()=>`LayersModel has ${e.inputs.length} inputs, but the dataset provides ${o.length} inputs. (Expected input keys: ${JSON.stringify(e.inputNames)})`),k(a.length===e.outputs.length,()=>`LayersModel has ${e.outputs.length} outputs, but the dataset provides ${a.length} outputs. (Expected output keys: ${JSON.stringify(e.outputNames)})`);for(let u=0;u`Batch size mismatch: input ${e.inputNames[u]} has ${o[u].shape[0]}; expected ${c} based on input ${e.inputNames[0]}.`);for(let u=0;u`Batch size mismatch: output ${e.outputNames[u]} has ${a[u].shape[0]}; expected ${c} based on input ${e.inputNames[0]}.`);return{xs:o,ys:a}}function sN(e,t,n){if(n instanceof Q)return[n];if(Array.isArray(n))return k(n.length===t.length,()=>`Received an array of ${n.length} Tensors, but expected ${t.length} to match the ${e} keys ${t}.`),n;{const s=[];for(const i of t){if(n[i]==null)throw new j(`The feature data generated by the dataset lacks the required ${e} key '${i}'.`);s.push(n[i])}return s}}function B3(e){if(e.length===3)throw new Ge("Validation with sample weights is not implemented yet.");return{xs:e[0],ys:e[1]}}async function M3(e,t,n){const s=n.batchesPerEpoch!=null;if(k(e.optimizer!=null,()=>"You must compile a model before training/testing. Use LayersModel.compile(modelCompileConfig)."),k(n!=null,()=>"For fitDataset(), the 2nd argument (config) is required, but it is not provided in this call."),k(n.epochs!=null&&n.epochs>0&&Number.isInteger(n.epochs),()=>`For fitDataset(), config.epochs is expected to be a positive integer, but got ${n.epochs}`),k(!s||n.batchesPerEpoch>0&&Number.isInteger(n.batchesPerEpoch),()=>`For fitDataset(), config.batchesPerEpoch is expected to be a positive integer if specified, but got ${n.batchesPerEpoch}`),k(n.validationSplit==null,()=>"`validationSplit` is not supported by `fitDataset()`. Use validationData instead."),e.isTraining)throw new Error("Cannot start training because another fit() call is ongoing.");e.isTraining=!0;try{const i=n.validationData!=null;let o,a;if(i)if(iN(n.validationData))k(n.validationBatches==null||n.validationBatches>0&&Number.isInteger(n.validationBatches),()=>`For fitDataset() with dataset-based validation, config.validationBatches is expected not to be provided, or to be a positive integer, but got ${n.validationBatches}`);else{const v=B3(n.validationData);o=v.xs,a=v.ys}const c=e.makeTrainFunction(),u=e.getDedupedMetricsNames();let p;i?p=u.slice().concat(u.map(v=>"val_"+v)):p=u.slice();const m=Pv(n.callbacks,n.yieldEvery),y=n.verbose==null?1:n.verbose,{callbackList:b,history:w}=zv(m,y,n.epochs,null,null,P3(t,n),null,i,p);b.setModel(e),e.history=w,await b.onTrainBegin(),e.stopTraining_=!1;let I=n.initialEpoch==null?0:n.initialEpoch,T=await t.iterator();for(;I=n.batchesPerEpoch:D.done){if(i){let F;iN(n.validationData)?F=Nt(await e.evaluateDataset(n.validationData,{batches:n.validationBatches})):F=Nt(e.evaluate(o,a,{batchSize:n.validationBatchSize==null?U3:n.validationBatchSize,verbose:0}));for(let _=0;_0)throw new Ge("Verbose mode is not implemented yet.");k(!s||n.batches>0&&Number.isInteger(n.batches),()=>`Test loop expects \`batches\` to be a positive integer, but received ${JSON.stringify(n.batches)}`);const a=z3(t)?t:await t.iterator();let c=0,u=0;for(;s?u{if(p.value){const{xs:m,ys:y}=nN(e,p.value),b=m.concat(y),w=ee(()=>i(b));if(qe(b),u===0)for(let T=0;Tbe(o[T],X(I,v))),u>0&&qe(N)}qe(w),c+=I,++u}return o}),p.done){s&&console.warn(`Your dataset iterator ran out of data during evaluateDataset(). Interrupting evalution. Make sure that your dataset can generate at least \`batches\` batches (in this case, ${n.batches} batches). You may need to use the repeat() function when building your dataset.`);break}}for(let p=0;p0&&Number.isInteger(e),()=>`batchSize is required to be a positive integer, but got ${e}`)}function zh(e,t,n){return e==null?[null]:Array.isArray(e)?e.map(s=>Vo(s,t,n-t)):Vo(e,t,n-t)}function Mw(e,t){return ee(()=>e==null?null:Array.isArray(e)?e.map(n=>Mw(n,t)):Cv(e,t.dtype==="int32"?t:t.toInt()))}function Pw(e,t){const n=[];let s=0,i=null;for(;s=e&&(i=e),n.push([s,i]),s=i;return n}async function V3(e,t,n,s,i,o,a,c,u,p,m,y,b,w,I){i==null&&(i=32),o==null&&(o=1),m==null&&(m=!0),b==null&&(b=0);let T=!1;if(u!=null&&p!=null&&(T=!0),I!=null&&(T=!0,w==null))throw new j("Can only use `validationSteps` when doing step-wise training, i.e., `stepsPerEpoch` must be set.");const v=e.checkNumSamples(n,i,w,"steps_per_epoch");let N;v!=null&&(N=ai(0,v)),a==null&&(a=1);const{callbackList:E,history:D}=zv(c,a,o,b,v,w,i,T,y);E.setModel(e),e.history=D,await E.onTrainBegin(),e.stopTraining_=!1;for(let F=b;F{const J=U[Y][0],oe=U[Y][1],ce=Vo(B,J,oe-J);q.batch=Y,q.size=oe-J;const ue=Mw(n,ce),he=t(ue);for(let pe=0;pe0){if(I=!0,s.validationData.length===2)a=s.validationData[0],c=s.validationData[1];else throw s.validationData.length===3?new Ge("validationData including sample weights is not supported yet."):new j(`When passing validation data, it must contain 2 (valX, valY) or 3 (valX, valY, valSampleWeight) items; ${s.validationData} is invalid.`);const U=!0,Y=await e.standardizeUserData(a,c,null,null,U,y);u=Y[0],p=Y[1],T=u.concat(p)}else if(s.validationSplit!=null&&s.validationSplit>0&&s.validationSplit<1){I=!0;const U=Math.floor(i[0].shape[0]*(1-s.validationSplit)),Y=i[0].shape[0];u=zh(i,U,Y),i=zh(i,0,U),p=zh(o,U,Y),o=zh(o,0,U),T=u.concat(p)}else s.validationSteps!=null&&(I=!0);const v=i.concat(o).concat(m);e.checkTrainableWeightsConsistency();const N=e.makeTrainFunction(),E=e.getDedupedMetricsNames();let D,F;I?(e.makeTestFunction(),D=e.testFunction,F=E.slice().concat(E.map(U=>"val_"+U))):(D=null,T=[],F=E.slice());const _=Pv(s.callbacks,s.yieldEvery),B=await V3(e,N,v,E,y,s.epochs,s.verbose,_,D,T,s.shuffle,F,s.initialEpoch,null,null);return B}finally{e.isTraining=!1,Yo(i,t),Yo(o,n),Yo(u,a),Yo(p,c),m!=null&&qe(m)}}function rN(e){const t=[];e instanceof Q&&(e=[e]);for(let n=0;nn.push(i.id));else if(t!=null)for(const i in t){const o=t[i];n.push(o.id)}const s=[];if(e instanceof Q)n.indexOf(e.id)===-1&&s.push(e);else if(Array.isArray(e))e.forEach(i=>{n.indexOf(i.id)===-1&&s.push(i)});else if(e!=null)for(const i in e){const o=e[i];n.indexOf(o.id)===-1&&s.push(o)}s.forEach(i=>{i.isDisposed||i.dispose()})}function Y3(e){return e instanceof Q}function zw(e){return Array.isArray(e)}function oN(e){return!Y3(e)&&!zw(e)}function aN(e,t,n,s=!0,i=""){if(t==null||t.length===0){if(e!=null){let a=!1;if(zw(e)&&e.length>0)a=!0;else if(oN(e)){for(const c in e)if(e.hasOwnProperty(c)){a=!0;break}}else a=!0;if(a)throw new j(`Error when checking model ${i} expected no data, but got ${e}`)}return[]}if(e==null)return t.map(a=>null);let o;if(oN(e)){e=e,o=[];for(const a of t){if(e[a]==null)throw new j(`No data provided for "${a}". Need data for each key in: ${t}`);o.push(e[a])}}else if(zw(e)){if(e=e,e.length!==t.length)throw new j(`Error when checking model ${i}: the Array of Tensors that you are passing to your model is not the size the model expected. Expected to see ${t.length} Tensor(s), but instead got the following list of Tensor(s): ${e}`);o=e}else{if(e=e,t.length>1)throw new j(`The model ${i} expects ${t.length} Tensor(s), but only received one Tensor. Found: Tensor with shape ${e.shape}`);o=[e]}if(o=rN(o),n!=null)for(let a=0;a=0&&p!==m)throw new j(`Error when checking ${i}: expected ${t[a]} to have shape [${n[a]}], but got array with shape [${c.shape}].`)}}return o}function q3(e,t,n){const s=Hr(e.map(o=>o.shape[0]));s.sort();const i=Hr(t.map(o=>o.shape[0]));if(i.sort(),s.length>1)throw new j(`All input Tensors (x) should have the same number of samples. Got array shapes: ${JSON.stringify(e.map(o=>o.shape))}`);if(i.length>1)throw new j(`All target Tensors (y) should have the same number of samples. Got array shapes: ${JSON.stringify(t.map(o=>o.shape))}`);if(s.length>0&&i.length>0&&!ot(s,i))throw new j(`Input Tensors should have the same number of samples as target Tensors. Found ${s[0]} input sample(s) and ${i[0]} target sample(s).`)}function j3(e,t,n){const s=[ar,Vp,Bh];for(let i=0;i1)throw new j(`The model expects ${t.length} ${i} Tensors, but only received one Tensor. Found: array with shape ${JSON.stringify(e.shape)}.`);o=[e]}if(n!=null)for(let a=0;a[]);let n;if(typeof e=="string"||typeof e=="function")n=[e];else if(Array.isArray(e)||typeof e=="object")n=e;else throw new TypeError(`Type of metrics argument not understood. Expected an string,function, Array, or Object, found: ${e}`);if(Array.isArray(n))return t.map(s=>n);{const s=[];for(const i of t){let o=n.hasOwnProperty(i)?n[i]:[];Array.isArray(o)||(o=[o]),s.push(o)}return s}}const X3="layers-model";class cr extends Ui{constructor(e){super(e);this.isTraining=!1}summary(e,t,n=console.log){if(!this.built)throw new j("This model has never been called, thus its weights have not been created yet. So no summary can be displayed. Build the model first (e.g., by calling it on some test data).");C3(this,e,t,n)}compile(e){if(e.loss==null&&(e.loss=[]),this.loss=e.loss,typeof e.optimizer=="string")this.optimizer_=N3(e.optimizer),this.isOptimizerOwned=!0;else{if(!(e.optimizer instanceof sr))throw new j("User-defined optimizer must be an instance of tf.Optimizer.");this.optimizer_=e.optimizer,this.isOptimizerOwned=!1}let t=[];if(!Array.isArray(e.loss)&&typeof e.loss!="string"&&typeof e.loss!="function"){e.loss=e.loss;for(const o in e.loss)if(this.outputNames.indexOf(o)===-1)throw new j(`Unknown entry in loss dictionary: "${o}". Only expected the following keys: ${this.outputNames}`);for(const o of this.outputNames)e.loss[o]==null&&console.warn(`Output "${o}" is missing from loss dictionary. We assume this was done on purpose, and we will not be expecting data to be passed to ${o} during training`),t.push(Dw(e.loss[o]))}else if(Array.isArray(e.loss)){if(e.loss.length!==this.outputs.length)throw new j(`When passing an Array as loss, it should have one entry per model output. The model has ${this.outputs.length} output(s), but you passed loss=${e.loss}.`);const o=e.loss;t=o.map(a=>Dw(a))}else{const o=Dw(e.loss);this.outputs.forEach(a=>{t.push(o)})}this.lossFunctions=t,this.feedOutputNames=[],this.feedOutputShapes=[],this.feedLossFns=[];for(let o=0;o{for(let o=0;o1&&(this.metricsTensors.push([a,o]),this.metricsNames.push(this.outputNames[o]+"_loss"))}});const s=K3(e.metrics,this.outputNames),i=(o,a,c)=>{this.outputNames.length>1&&(a=this.outputNames[o]+"_"+a),this.metricsNames.push(a),this.metricsTensors.push([c,o])};Go("metric",()=>{for(let o=0;o{const p="";let m,y,b;for(const w of u){if(typeof w=="string"&&["accuracy","acc","crossentropy","ce"].indexOf(w)!==-1){const T=this.internalOutputShapes[o];T[T.length-1]===1||this.lossFunctions[o]===Vp?["accuracy","acc"].indexOf(w)!==-1?y=kw:["crossentropy","ce"].indexOf(w)!==-1&&(y=Hv):this.lossFunctions[o]===Gp?["accuracy","acc"].indexOf(w)!==-1?y=Yv:["crossentropy","ce"].indexOf(w)!==-1&&(y=qv):["accuracy","acc"].indexOf(w)!==-1?y=Fw:["crossentropy","ce"].indexOf(w)!==-1&&(y=_w);let v;["accuracy","acc"].indexOf(w)!==-1?v="acc":["crossentropy","ce"].indexOf(w)!==-1&&(v="ce"),b=y,m=p+v}else{const T=v3(w);b=T,m=p+jp(w)}let I;Go(m,()=>{I=b}),i(o,m,I)}};c(a)}}),this.collectedTrainableWeights=this.trainableWeights}checkTrainableWeightsConsistency(){if(this.collectedTrainableWeights==null)return;this.trainableWeights.length!==this.collectedTrainableWeights.length&&console.warn("Discrepancy between trainableweights and collected trainable weights. Did you set `model.trainable` without calling `model.compile()` afterwards?")}evaluate(e,t,n={}){const s=n.batchSize==null?32:n.batchSize;Bw(s);const i=!0,o=this.standardizeUserDataXY(e,t,i,s);try{const a=o[0].concat(o[1]);this.makeTestFunction();const c=this.testFunction,u=this.testLoop(c,a,s,n.verbose,n.steps);return Jn(u)}finally{Yo(o[0],e),Yo(o[1],t)}}async evaluateDataset(e,t){return this.makeTestFunction(),G3(this,e,t)}checkNumSamples(e,t,n,s="steps"){let i;if(n!=null){if(i=null,t!=null)throw new j(`If ${s} is set, batchSize must be null or undefined.Got batchSize = ${t}`)}else if(e!=null)Array.isArray(e)?i=e[0].shape[0]:i=e.shape[0];else throw new j(`Either the input data should have a defined shape, or ${s} shoud be specified.`);return i}execute(e,t){if(Array.isArray(t)&&t.length===0)throw new j("`outputs` is an empty Array, which is not allowed.");const n=Array.isArray(t),s=n?t:[t],i=this.retrieveSymbolicTensors(s),o=new Ho;if(e instanceof Q&&(e=[e]),Array.isArray(e)){if(e.length!==this.inputs.length)throw new j(`The number of inputs provided (${e.length}) does not match the number of inputs of this model (${this.inputs.length}).`);for(let c=0;ca.name);for(let a=0;a0){const s=[];throw t.forEach((i,o)=>{i==null&&s.push(e[o])}),new j(`Cannot find SymbolicTensors for output name(s): ${JSON.stringify(s)}`)}return t}predictLoop(e,t=32,n=!1){return ee(()=>{const s=this.checkNumSamples(e);if(n)throw new Ge("Verbose predictLoop() is not implemented yet.");const i=Pw(s,t),o=this.outputs.map(a=>[]);for(let a=0;a{const u=i[a][0],p=i[a][1],m=zh(e,u,p),y=[];if(Array.isArray(m))for(let w=0;wo[p].push(u))}return Jn(o.map(a=>Mt(a,0)))})}predict(e,t={}){const n=rN(e);cN(n,this.inputNames,this.feedInputShapes,!1);try{const s=t.batchSize==null?32:t.batchSize;return Bw(s),this.predictLoop(n,s)}finally{Yo(n,e)}}predictOnBatch(e){cN(e,this.inputNames,this.feedInputShapes,!0);const t=(Array.isArray(e)?e[0]:e).shape[0];return this.predictLoop(e,t)}standardizeUserDataXY(e,t,n=!0,s){if(this.optimizer_==null)throw new oi("You must compile a model before training/testing. Use LayersModel.compile(modelCompileArgs).");const i=[];for(let o=0;o0&&e[0].shape[0]%s!==0)throw new j(`In a stateful network, you should only pass inputs with a number of samples that is divisible by the batch size ${s}. Found: ${e[0].shape[0]} sample(s).`);return[e,t]}async standardizeUserData(e,t,n,s,i=!0,o){const[a,c]=this.standardizeUserDataXY(e,t,i,o);if(n!=null)throw new Error("sample weight is not supported yet.");let u=null;if(s!=null){const p=eN(s,this.outputNames);u=[];for(let m=0;m{const o=this.checkNumSamples(t,n,i,"steps"),a=[];if(s>0)throw new Ge("Verbose mode is not implemented yet.");if(i!=null)throw new Ge("steps mode in testLoop() is not implemented yet");{const c=Pw(o,n),u=rs(ai(0,o));for(let p=0;p1){const o=gv(e.slice(0,n),s);i+=`_${o}`}t.push(i)}return t}makeTrainFunction(){return e=>{const t=[],n=e.slice(0,this.inputs.length),s=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),i=e.slice(this.inputs.length+this.outputs.length,this.inputs.length+this.outputs.length*2),o=[],a=()=>{const m=[];for(let I=0;I1&&I{w=be(w,I)}),w},c=this.collectedTrainableWeights.map(m=>m.read()),u=!0,p=this.optimizer_.minimize(a,u,c);return[p].concat(o)}}makeTestFunction(){this.testFunction=e=>ee(()=>{const t=[];let n;const s=e.slice(0,this.inputs.length),i=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),o=[];for(let u=0;uor(t))}else{const t=Object.keys(this.loss);e={};const n=this.loss;for(const s of t)if(typeof n[s]=="string")e[s]=or(n[s]);else throw new Error("Serialization of non-string loss is not supported.")}return e}getMetricIdentifiers(){if(typeof this.metrics=="string"||typeof this.metrics=="function")return[or(jp(this.metrics))];if(Array.isArray(this.metrics))return this.metrics.map(e=>or(jp(e)));{const e={};for(const t in this.metrics)e[t]=or(jp(this.metrics[t]));return e}}getTrainingConfig(){return{loss:this.getLossIdentifiers(),metrics:this.getMetricIdentifiers(),optimizer_config:{class_name:this.optimizer.getClassName(),config:this.optimizer.getConfig()}}}loadTrainingConfig(e){if(e.weighted_metrics!=null)throw new Error("Loading weight_metrics is not supported yet.");if(e.loss_weights!=null)throw new Error("Loading loss_weights is not supported yet.");if(e.sample_weight_mode!=null)throw new Error("Loading sample_weight_mode is not supported yet.");const t=Mh(e.optimizer_config),n=hi(t);let s;if(typeof e.loss=="string")s=zo(e.loss);else if(Array.isArray(e.loss))s=e.loss.map(o=>zo(o));else if(e.loss!=null){s={};for(const o in e.loss)s[o]=zo(e.loss[o])}let i;if(Array.isArray(e.metrics))i=e.metrics.map(o=>zo(o));else if(e.metrics!=null){i={};for(const o in e.metrics)i[o]=zo(e.metrics[o])}this.compile({loss:s,metrics:i,optimizer:n})}async save(e,t){if(typeof e=="string"){const u=Uy(e);if(u.length===0)throw new j(`Cannot find any save handlers for URL '${e}'`);if(u.length>1)throw new j(`Found more than one (${u.length}) save handlers for URL '${e}'`);e=u[0]}if(e.save==null)throw new j("LayersModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");const n=await Wy(this.getNamedWeights(t)),s=!1,i=null,o=this.toJSON(i,s),a={modelTopology:o,format:X3,generatedBy:`TensorFlow.js tfjs-layers v${Xp}`,convertedBy:null},c=t==null?!1:t.includeOptimizer;if(c&&this.optimizer!=null){a.trainingConfig=this.getTrainingConfig();const u="optimizer",{data:p,specs:m}=await Wy(await this.optimizer.getWeights(),u);n.specs.push(...m),n.data=Od([n.data,p])}if(this.userDefinedMetadata!=null){const u=!0;Kv(this.userDefinedMetadata,this.name,u),a.userDefinedMetadata=this.userDefinedMetadata}return a.weightData=n.data,a.weightSpecs=n.specs,e.save(a)}setUserDefinedMetadata(e){Kv(e,this.name),this.userDefinedMetadata=e}getUserDefinedMetadata(){return this.userDefinedMetadata}}cr.className="Model",ge(cr);class lN extends cr{}lN.className="Functional",ge(lN);async function J3(e,t){"modelTopology"in e||(e={modelTopology:e}),e=e;let n=e.modelTopology;n.model_config!=null&&(n=n.model_config);const s=Mh(n),i=hi(s,t);if(e.weightsManifest!=null){const o=await _T(e.weightsManifest,e.pathPrefix,i.weights.map(c=>c.originalName)),a={};for(const c of i.weights)a[c.originalName]=o[c.originalName];i.loadWeights(a),qe(o)}return i}async function Z3(e,t){if(t==null&&(t={}),typeof e=="string"){const n=By(e,t);if(n.length===0)n.push(kd(e,t));else if(n.length>1)throw new j(`Found more than one (${n.length}) load handlers for URL '${e}'`);e=n[0]}return Q3(e,void 0,t)}async function Q3(e,t,n){if(n==null&&(n={}),e.load==null)throw new j("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const s=await e.load();let i=s.modelTopology;i.model_config!=null&&(i=i.model_config);const o=n.strict==null?!0:n.strict,a=s.weightData!=null&&s.weightSpecs!=null&&o,c=hi(Mh(i),t,a),u=s.trainingConfig;if(u!=null&&c.loadTrainingConfig(u),s.userDefinedMetadata!=null&&c.setUserDefinedMetadata(s.userDefinedMetadata),s.weightData!=null){if(s.weightSpecs==null)throw new j("LayersModel artifacts contains weight data, but not weight specs. Therefore loading of weights cannot proceed.");const{modelWeights:p,optimizerWeights:m}=eG(s.weightData,s.weightSpecs);c.loadWeights(p,o),c.optimizer!=null&&m.length>0&&await c.optimizer.setWeights(m),qe(p),qe(m.map(y=>y.tensor))}return c}function eG(e,t){const n=Rd(e,t),s={},i=[];return t.forEach(o=>{o.group==="optimizer"?i.push({name:o.name,tensor:n[o.name]}):s[o.name]=n[o.name]}),{modelWeights:s,optimizerWeights:i}}class oc extends cr{constructor(e){super({inputs:[],outputs:[]});if(e=e||{},this.trainable=!0,this.built=!1,this.name=e.name!=null?e.name:Up("sequential_"),e.layers!=null)for(const t of e.layers)this.add(t)}checkShape(e){const t=e.inboundNodes[0].outputTensors[0].shape;if(t.some(n=>n<0))throw new j(`Negative dimension size caused by adding layer ${e.name} with input shape [${e.inboundNodes[0].inputTensors[0].shape}]`)}add(e){const t=e instanceof oc||e instanceof cr;let n;if(t){if(n=e,n.outputs.length!==1)throw new j("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");if(n.inputs.length!==1)throw new j("All layers in a Sequential model should have a single input tensor. For multi-input layers, use the functional API.")}if(this.outputs.length===0){if(e.inboundNodes.length===0){if(e.batchInputShape==null)throw new j("The first layer in a Sequential model must get an `inputShape` or `batchInputShape` argument.");const s=_v({batchShape:e.batchInputShape,dtype:e.dtype,name:e.name+"_input"});e.apply(s)}if(t)this.outputs=n.outputs,this.inputs=n.inputs;else{if(e.inboundNodes.length!==1)throw new j(`A layer added to a Sequential model must not already be connected somewhere else. LayersModel received layer ${e.name} which has ${e.inboundNodes.length} pre-existing inbound connections.`);if(e.inboundNodes[0].outputTensors.length!==1)throw new j("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[e.inboundNodes[0].outputTensors[0]],this.inputs=Fv(this.outputs[0])}this.inboundNodes=[],new Pp({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:Po(null,this.inputs.length),outputMasks:[null],inputShapes:this.inputs.map(s=>s.shape),outputShapes:this.outputs[0].shape})}else{const s=e.apply(this.outputs[0]);if(Array.isArray(s))throw new TypeError("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[s],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}this.layers.push(e),this.built=!1}pop(){if(this.layers.length===0)throw new TypeError("There are no layers in the model.");if(this.layers.pop(),this.layers.length===0)this.outputs=[],this.inboundNodes=[],this.outboundNodes=[];else{const e=this.layers.length-1;this.layers[e].outboundNodes=[],this.outputs=[this.layers[e].output],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}}call(e,t){return this.model==null&&this.build(),this.model.call(e,t)}build(e){if(It(e),this.inputs.length===0||this.outputs.length===0)throw new TypeError("Sequential model cannot be built: model is empty. Add some layers first.");this.model=new cr({inputs:this.inputs,outputs:this.outputs[0],name:this.name+"_model"}),this.model.trainable=this.trainable,this.supportsMasking=this.model.supportsMasking,this.inputLayers=this.model.inputLayers,this.inputLayersNodeIndices=this.model.inputLayersNodeIndices,this.inputLayersTensorIndices=this.model.inputLayersTensorIndices,this.outputLayers=this.model.outputLayers,this.outputLayersNodeIndices=this.model.outputLayersNodeIndices,this.outputLayersTensorIndices=this.model.outputLayersTensorIndices,this.nodesByDepth=this.model.nodesByDepth,this.containerNodes=this.model.containerNodes,this.outputNames=this.model.outputNames,this.inputNames=this.model.inputNames,this.built=!0}countParams(){return this.built||this.build(),super.countParams()}summary(e,t,n=console.log){this.built||this.build(),super.summary(e,t,n)}setWeights(e){this.model==null&&this.build(),this.model.setWeights(e)}evaluate(e,t,n={}){if(!this.built)throw new oi("The model needs to be compiled before being used.");return this.model.evaluate(e,t,n)}async evaluateDataset(e,t){if(!this.built)throw new oi("The model needs to be compiled before being used.");return this.model.evaluateDataset(e,t)}predict(e,t={}){return this.model==null&&this.build(),this.model.predict(e,t)}predictOnBatch(e){return this.model==null&&this.build(),this.model.predictOnBatch(e)}compile(e){this.build(),this.model.compile(e),this.optimizer_=this.model.optimizer,this.isOptimizerOwned=this.model.isOptimizerOwned,this.loss=this.model.loss,this.metrics=this.model.metrics,this.metricsTensors=this.model.metricsTensors,this.metricsNames=this.model.metricsNames}get optimizer(){return this.model==null?void 0:this.model.optimizer}set optimizer(e){this.model.optimizer=e}async fit(e,t,n={}){if(!this.built)throw new oi("The model needs to be compiled before being used.");return this.model.fit(e,t,n)}async fitDataset(e,t){if(!this.built)throw new oi("The model needs to be compiled before being used.");return this.model.fitDataset(e,t)}async trainOnBatch(e,t){return this.model.trainOnBatch(e,t)}static fromConfig(e,t,n={},s=!1){let i,o={};if(t instanceof Array){if(!(t[0].className!=null)||t[0].className==="Merge")throw new j("Legacy serialization format not supported yet.");i=t}else k(t.layers!=null,()=>"When the config data for a Sequential model is not an Array, it must be an Object that contains the 'layers' field."),i=t.layers,delete t.layers,o=t;const a=new e(o);if(!(a instanceof oc))throw new Ge(`Sequential.fromConfig called on non-Sequential input: ${a}`);for(const c of i){const u=void 0,p=hi(c,u,s);s&&p.setFastWeightInitDuringBuild(!0),a.add(p)}return a}set stopTraining(e){if(this.model==null)throw new j("Cannot set the stopTraining property of a sequential model before it is compiled.");this.model.stopTraining=e}get stopTraining(){if(this.model==null)throw new j("Cannot get the stopTraining property of a sequential model before it is compiled.");return this.model.stopTraining}getConfig(){const e=[];for(const t of this.layers){const n={};n.className=t.getClassName(),n.config=t.getConfig(),e.push(n)}return{name:this.name,layers:e}}}oc.className="Sequential",ge(oc);function tG(e){return new cr(e)}function nG(e){return new oc(e)}function sG(e,t){return t==null&&(t={}),Z3(e,t)}function hN(e){return _v(e)}function iG(e,t){zs.registerCallbackConstructor(e,t)}class cs extends No{getConfig(){return{}}}class uN extends cs{apply(e,t=1){return Dz(e,t)}}uN.className="elu",ge(uN);class dN extends cs{apply(e){return rp(e)}}dN.className="selu",ge(dN);class pN extends cs{apply(e){return Fi(e)}}pN.className="relu",ge(pN);class mN extends cs{apply(e){return ee(()=>ko(6,Fi(e)))}}mN.className="relu6",ge(mN);class fN extends cs{apply(e){return e}}fN.className="linear",ge(fN);class gN extends cs{apply(e){return Ei(e)}}gN.className="sigmoid",ge(gN);class yN extends cs{apply(e){return Fz(e)}}yN.className="hardSigmoid",ge(yN);class bN extends cs{apply(e){return Va(e)}}bN.className="softplus",ge(bN);class wN extends cs{apply(e){return kz(e)}}wN.className="softsign",ge(wN);class LN extends cs{apply(e){return Ma(e)}}LN.className="tanh",ge(LN);class Gw extends cs{apply(e,t=-1){return Uo(e,t)}}Gw.className="softmax",ge(Gw);class SN extends cs{apply(e,t=-1){return Qd(e,t)}}SN.className="logSoftmax",ge(SN);class IN extends cs{apply(e,t=1){return ee(()=>Ei(e.mul(t)).mul(e))}}IN.className="swish",ge(IN);function Xr(e){return e.getClassName()}function Vw(e,t={}){return Dh(e,Ws.getMap().classNameMap,t,"activation")}function Jr(e){if(e==null){const t={};return t.className="linear",t.config={},Vw(t)}if(typeof e=="string"){const t={};return t.className=e,t.config={},Vw(t)}else return e instanceof cs?e:Vw(e)}function Hw(e){if(e!=null&&typeof e!="object")throw new Error(`Argument to L1L2 regularizer's constructor is expected to be an object, but received: ${e}`)}class xN extends No{}class Gh extends xN{constructor(e){super();Hw(e),this.l1=e==null||e.l1==null?.01:e.l1,this.l2=e==null||e.l2==null?.01:e.l2,this.hasL1=this.l1!==0,this.hasL2=this.l2!==0}apply(e){return ee(()=>{let t=ct([1]);return this.hasL1&&(t=be(t,Ue(X(this.l1,rn(e))))),this.hasL2&&(t=be(t,Ue(X(this.l2,$h(e))))),t.asScalar()})}getConfig(){return{l1:this.l1,l2:this.l2}}static fromConfig(e,t){return new e({l1:t.l1,l2:t.l2})}}Gh.className="L1L2",ge(Gh);function rG(e){return Hw(e),new Gh({l1:e!=null?e.l1:null,l2:0})}function oG(e){return Hw(e),new Gh({l2:e!=null?e.l2:null,l1:0})}const TN={l1l2:"L1L2"};function xt(e){return cw(e)}function AN(e,t={}){return Dh(e,Ws.getMap().classNameMap,t,"regularizer")}function _t(e){if(e==null)return null;if(typeof e=="string"){const t=e in TN?TN[e]:e,n={className:t,config:{}};return AN(n)}else return e instanceof xN?e:AN(e)}class Yw extends lt{constructor(e){super(e==null?{}:e);this.supportsMasking=!0,e!=null&&(this.maxValue=e.maxValue)}call(e,t){e=Xe(e);let n=Fi(e);return this.maxValue!=null&&(n=jn(n,0,this.maxValue)),n}computeOutputShape(e){return e}getConfig(){const e={maxValue:this.maxValue},t=super.getConfig();return Object.assign(e,t),e}}Yw.className="ReLU",ge(Yw);class qw extends lt{constructor(e){super(e==null?{}:e);this.DEFAULT_ALPHA=.3,e==null&&(e={}),this.alpha=e.alpha==null?this.DEFAULT_ALPHA:e.alpha}call(e,t){const n=Xe(e);return Xd(n,this.alpha)}computeOutputShape(e){return e}getConfig(){const e={alpha:this.alpha},t=super.getConfig();return Object.assign(e,t),e}}qw.className="LeakyReLU",ge(qw);class jw extends lt{constructor(e){super(e==null?{}:e);if(this.DEFAULT_ALPHA_INITIALIZER="zeros",e==null&&(e={}),this.supportsMasking=!0,this.alphaInitializer=Ft(e.alphaInitializer||this.DEFAULT_ALPHA_INITIALIZER),this.alphaRegularizer=_t(e.alphaRegularizer),this.alphaConstraint=hn(e.alphaConstraint),e.sharedAxes==null)this.sharedAxes=null;else if(Array.isArray(e.sharedAxes))this.sharedAxes=e.sharedAxes;else if(typeof e.sharedAxes=="number")this.sharedAxes=[e.sharedAxes];else throw new j(`Expected sharedAxes to be a number or an array of numbers, but got ${e.sharedAxes}`)}build(e){e=It(e);const t=e.slice(1);if(this.sharedAxes!=null)for(const s of this.sharedAxes)t[s-1]=1;this.alpha=this.addWeight("alpha",t,"float32",this.alphaInitializer,this.alphaRegularizer,!0,this.alphaConstraint);const n={};if(this.sharedAxes!=null)for(let s=1;s(Gt(t),t==="channelsFirst"?Pe(e,[0,2,3,1]):e))}function vN(e,t){return ee(()=>(Gt(t),t==="channelsFirst"?Pe(e,[0,2,3,4,1]):e))}function NN(e,t,n,s=1,i="valid",o,a=1){return ee(()=>{if(o==null&&(o=ri()),Gt(o),e.shape.length!==3)throw new j(`The input of a conv1dWithBias operation should be 3, but is ${e.shape.length} instead.`);if(t.shape.length!==3)throw new j(`The kernel for a conv1dWithBias operation should be 3, but is ${t.shape.length} instead`);if(n!=null&&n.shape.length!==1)throw new j(`The bias for a conv1dWithBias operation should be 1, but is ${t.shape.length} instead`);if(o==="channelsFirst"&&(e=Pe(e,[0,2,1])),i==="causal")throw new Ge("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");let c=Hd(e,t,s,i==="same"?"same":"valid","NWC",a);return n!=null&&(c=$i(c,n)),c})}function vee(e,t,n=1,s="valid",i,o=1){return ee(()=>(Gt(i),NN(e,t,null,n,s,i,o)))}function Nee(e,t,n=[1,1],s="valid",i,o){return ee(()=>(Gt(i),Qw(e,t,null,n,s,i,o)))}function Qw(e,t,n,s=[1,1],i="valid",o,a,c=null){return ee(()=>{if(o==null&&(o=ri()),Gt(o),e.rank!==3&&e.rank!==4)throw new j(`conv2dWithBiasActivation expects input to be of rank 3 or 4, but received ${e.rank}.`);if(t.rank!==3&&t.rank!==4)throw new j(`conv2dWithBiasActivation expects kernel to be of rank 3 or 4, but received ${e.rank}.`);let u=Zw(e,o);if(i==="causal")throw new Ge("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");return u=Hb({x:u,filter:t,strides:s,pad:i==="same"?"same":"valid",dilations:a,dataFormat:"NHWC",bias:n,activation:c}),o==="channelsFirst"&&(u=Pe(u,[0,3,1,2])),u})}function Cee(e,t,n=[1,1,1],s="valid",i,o){return ee(()=>(Gt(i),CN(e,t,null,n,s,i,o)))}function CN(e,t,n,s=[1,1,1],i="valid",o,a){return ee(()=>{if(o==null&&(o=ri()),Gt(o),e.rank!==4&&e.rank!==5)throw new j(`conv3dWithBias expects input to be of rank 4 or 5, but received ${e.rank}.`);if(t.rank!==4&&t.rank!==5)throw new j(`conv3dWithBias expects kernel to be of rank 4 or 5, but received ${e.rank}.`);let c=vN(e,o);if(i==="causal")throw new Ge("The support for CAUSAL padding mode in conv3dWithBias is not implemented yet.");return c=yb(c,t,s,i==="same"?"same":"valid","NDHWC",a),n!=null&&(c=$i(c,n)),o==="channelsFirst"&&(c=Pe(c,[0,4,1,2,3])),c})}class eL extends lt{constructor(e,t){super(t);if(this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",eL.verifyArgs(t),this.rank=e,mn(this.rank,"rank"),this.rank!==1&&this.rank!==2&&this.rank!==3)throw new Ge(`Convolution layer for rank other than 1, 2, or 3 (${this.rank}) is not implemented yet.`);if(this.kernelSize=ac(t.kernelSize,e,"kernelSize"),this.strides=ac(t.strides==null?1:t.strides,e,"strides"),this.padding=t.padding==null?"valid":t.padding,Ns(this.padding),this.dataFormat=t.dataFormat==null?"channelsLast":t.dataFormat,Gt(this.dataFormat),this.activation=Jr(t.activation),this.useBias=t.useBias==null?!0:t.useBias,this.biasInitializer=Ft(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.biasConstraint=hn(t.biasConstraint),this.biasRegularizer=_t(t.biasRegularizer),this.activityRegularizer=_t(t.activityRegularizer),this.dilationRate=ac(t.dilationRate==null?1:t.dilationRate,e,"dilationRate"),this.rank===1&&Array.isArray(this.dilationRate)&&this.dilationRate.length!==1)throw new j(`dilationRate must be a number or an array of a single number for 1D convolution, but received ${JSON.stringify(this.dilationRate)}`);if(this.rank===2){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==2)throw new j(`dilationRate must be a number or array of two numbers for 2D convolution, but received ${JSON.stringify(this.dilationRate)}`)}else if(this.rank===3){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==3)throw new j(`dilationRate must be a number or array of three numbers for 3D convolution, but received ${JSON.stringify(this.dilationRate)}`)}}static verifyArgs(e){if(vs("kernelSize"in e,"required key 'kernelSize' not in config"),typeof e.kernelSize!="number"&&!hw(e.kernelSize,"number",1,3))throw new j(`BaseConv expects config.kernelSize to be number or number[] with length 1, 2, or 3, but received ${JSON.stringify(e.kernelSize)}.`)}getConfig(){const e={kernelSize:this.kernelSize,strides:this.strides,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,activation:Xr(this.activation),useBias:this.useBias,biasInitializer:Vt(this.biasInitializer),biasRegularizer:xt(this.biasRegularizer),activityRegularizer:xt(this.activityRegularizer),biasConstraint:ln(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}class Vh extends eL{constructor(e,t){super(e,t);this.kernel=null,Vh.verifyArgs(t),this.filters=t.filters,mn(this.filters,"filters"),this.kernelInitializer=Ft(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.kernelConstraint=hn(t.kernelConstraint),this.kernelRegularizer=_t(t.kernelRegularizer)}build(e){e=It(e);const t=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[t]==null)throw new j(`The channel dimension of the input should be defined. Found ${e[t]}`);const n=e[t],s=this.kernelSize.concat([n,this.filters]);this.kernel=this.addWeight("kernel",s,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[{ndim:this.rank+2,axes:{[t]:n}}],this.built=!0}call(e,t){return ee(()=>{e=Xe(e);let n;const s=this.bias==null?null:this.bias.read(),i=bv(this.activation.getClassName());if(i!=null&&this.rank===2)n=Qw(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate,i);else{if(this.rank===1)n=NN(e,this.kernel.read(),s,this.strides[0],this.padding,this.dataFormat,this.dilationRate[0]);else if(this.rank===2)n=Qw(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate);else if(this.rank===3)n=CN(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate);else throw new Ge("convolutions greater than 3D are not implemented yet.");this.activation!=null&&(n=this.activation.apply(n))}return n})}computeOutputShape(e){e=It(e);const t=[],n=this.dataFormat==="channelsLast"?e.slice(1,e.length-1):e.slice(2);for(let i=0;i 0 but got ${JSON.stringify(e.filters)}`)}}class Hh extends Vh{constructor(e){super(2,e);Hh.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!hw(e.kernelSize,"number",1,2))throw new j(`Conv2D expects config.kernelSize to be number or number[] with length 1 or 2, but received ${JSON.stringify(e.kernelSize)}.`)}}Hh.className="Conv2D",ge(Hh);class Zp extends Vh{constructor(e){super(3,e);Zp.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!(Array.isArray(e.kernelSize)&&(e.kernelSize.length===1||e.kernelSize.length===3)))throw new j(`Conv3D expects config.kernelSize to be number or [number, number, number], but received ${JSON.stringify(e.kernelSize)}.`)}}Zp.className="Conv3D",ge(Zp);class tL extends Hh{constructor(e){super(e);if(this.inputSpec=[new fn({ndim:4})],this.padding!=="same"&&this.padding!=="valid")throw new j(`Conv2DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`)}build(e){if(e=It(e),e.length!==4)throw new j("Input should have rank 4; Received input shape: "+JSON.stringify(e));const t=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[t]==null)throw new j("The channel dimension of the inputs should be defined. Found `None`.");const n=e[t],s=this.kernelSize.concat([this.filters,n]);this.kernel=this.addWeight("kernel",s,"float32",this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[new fn({ndim:4,axes:{[t]:n}})],this.built=!0}call(e,t){return ee(()=>{let n=Xe(e);if(n.shape.length!==4)throw new j(`Conv2DTranspose.call() expects input tensor to be rank-4, but received a tensor of rank-${n.shape.length}`);const s=n.shape,i=s[0];let o,a;this.dataFormat==="channelsFirst"?(o=2,a=3):(o=1,a=2);const c=s[o],u=s[a],p=this.kernelSize[0],m=this.kernelSize[1],y=this.strides[0],b=this.strides[1],w=Jp(c,y,p,this.padding),I=Jp(u,b,m,this.padding),T=[i,w,I,this.filters];this.dataFormat!=="channelsLast"&&(n=Pe(n,[0,2,3,1]));let v=Yd(n,this.kernel.read(),T,this.strides,this.padding);return this.dataFormat!=="channelsLast"&&(v=Pe(v,[0,3,1,2])),this.bias!=null&&(v=$i(v,this.bias.read(),this.dataFormat)),this.activation!=null&&(v=this.activation.apply(v)),v})}computeOutputShape(e){e=It(e);const t=e.slice();let n,s,i;this.dataFormat==="channelsFirst"?(n=1,s=2,i=3):(n=3,s=1,i=2);const o=this.kernelSize[0],a=this.kernelSize[1],c=this.strides[0],u=this.strides[1];return t[n]=this.filters,t[s]=Jp(t[s],c,o,this.padding),t[i]=Jp(t[i],u,a,this.padding),t}getConfig(){const e=super.getConfig();return delete e.dilationRate,e}}tL.className="Conv2DTranspose",ge(tL);class RN extends Vh{constructor(e,t){super(e,t);if(this.DEFAULT_DEPTHWISE_INITIALIZER="glorotUniform",this.DEFAULT_POINTWISE_INITIALIZER="glorotUniform",this.depthwiseKernel=null,this.pointwiseKernel=null,t.filters==null)throw new j("The `filters` configuration field is required by SeparableConv, but is unspecified.");if(t.kernelInitializer!=null||t.kernelRegularizer!=null||t.kernelConstraint!=null)throw new j("Fields kernelInitializer, kernelRegularizer and kernelConstraint are invalid for SeparableConv2D. Use depthwiseInitializer, depthwiseRegularizer, depthwiseConstraint, pointwiseInitializer, pointwiseRegularizer and pointwiseConstraint instead.");if(t.padding!=null&&t.padding!=="same"&&t.padding!=="valid")throw new j(`SeparableConv${this.rank}D supports only padding modes: 'same' and 'valid', but received ${JSON.stringify(t.padding)}`);this.depthMultiplier=t.depthMultiplier==null?1:t.depthMultiplier,this.depthwiseInitializer=Ft(t.depthwiseInitializer||this.DEFAULT_DEPTHWISE_INITIALIZER),this.depthwiseRegularizer=_t(t.depthwiseRegularizer),this.depthwiseConstraint=hn(t.depthwiseConstraint),this.pointwiseInitializer=Ft(t.depthwiseInitializer||this.DEFAULT_POINTWISE_INITIALIZER),this.pointwiseRegularizer=_t(t.pointwiseRegularizer),this.pointwiseConstraint=hn(t.pointwiseConstraint)}build(e){if(e=It(e),e.length{e=Xe(e);let n;if(this.rank===1)throw new Ge("1D separable convolution is not implemented yet.");return this.rank===2&&(this.dataFormat==="channelsFirst"&&(e=Pe(e,[0,2,3,1])),n=Fb(e,this.depthwiseKernel.read(),this.pointwiseKernel.read(),this.strides,this.padding,this.dilationRate,"NHWC")),this.useBias&&(n=$i(n,this.bias.read(),this.dataFormat)),this.activation!=null&&(n=this.activation.apply(n)),this.dataFormat==="channelsFirst"&&(n=Pe(n,[0,3,1,2])),n})}getConfig(){const e=super.getConfig();return delete e.rank,delete e.kernelInitializer,delete e.kernelRegularizer,delete e.kernelConstraint,e.depthwiseInitializer=Vt(this.depthwiseInitializer),e.pointwiseInitializer=Vt(this.pointwiseInitializer),e.depthwiseRegularizer=xt(this.depthwiseRegularizer),e.pointwiseRegularizer=xt(this.pointwiseRegularizer),e.depthwiseConstraint=ln(this.depthwiseConstraint),e.pointwiseConstraint=ln(this.pointwiseConstraint),e}}RN.className="SeparableConv";class nL extends RN{constructor(e){super(2,e)}}nL.className="SeparableConv2D",ge(nL);class Qp extends Vh{constructor(e){super(1,e);Qp.verifyArgs(e),this.inputSpec=[{ndim:3}]}getConfig(){const e=super.getConfig();return delete e.rank,delete e.dataFormat,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!hw(e.kernelSize,"number",1,1))throw new j(`Conv1D expects config.kernelSize to be number or number[] with length 1, but received ${JSON.stringify(e.kernelSize)}.`)}}Qp.className="Conv1D",ge(Qp);class sL extends lt{constructor(e){super(e);typeof e.cropping=="number"?this.cropping=[[e.cropping,e.cropping],[e.cropping,e.cropping]]:typeof e.cropping[0]=="number"?this.cropping=[[e.cropping[0],e.cropping[0]],[e.cropping[1],e.cropping[1]]]:this.cropping=e.cropping,this.dataFormat=e.dataFormat===void 0?"channelsLast":e.dataFormat,this.inputSpec=[{ndim:4}]}computeOutputShape(e){return this.dataFormat==="channelsFirst"?[e[0],e[1],e[2]-this.cropping[0][0]-this.cropping[0][1],e[3]-this.cropping[1][0]-this.cropping[1][1]]:[e[0],e[1]-this.cropping[0][0]-this.cropping[0][1],e[2]-this.cropping[1][0]-this.cropping[1][1],e[3]]}call(e,t){return ee(()=>{if(e=Xe(e),this.dataFormat==="channelsLast"){const n=Cp(e,this.cropping[0][0],e.shape[1]-this.cropping[0][0]-this.cropping[0][1],2);return Cp(n,this.cropping[1][0],e.shape[2]-this.cropping[1][1]-this.cropping[1][0],3)}else{const n=Cp(e,this.cropping[0][0],e.shape[2]-this.cropping[0][0]-this.cropping[0][1],3);return Cp(n,this.cropping[1][0],e.shape[3]-this.cropping[1][1]-this.cropping[1][0],4)}})}getConfig(){const e={cropping:this.cropping,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}sL.className="Cropping2D",ge(sL);class iL extends lt{constructor(e){super(e);this.DEFAULT_SIZE=[2,2],this.inputSpec=[{ndim:4}],this.size=e.size==null?this.DEFAULT_SIZE:e.size,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat}computeOutputShape(e){if(this.dataFormat==="channelsFirst"){const t=e[2]==null?null:this.size[0]*e[2],n=e[3]==null?null:this.size[1]*e[3];return[e[0],e[1],t,n]}else{const t=e[1]==null?null:this.size[0]*e[1],n=e[2]==null?null:this.size[1]*e[2];return[e[0],t,n,e[3]]}}call(e,t){return ee(()=>{let n=Xe(e);const s=n.shape;if(this.dataFormat==="channelsFirst"){n=Pe(n,[0,2,3,1]);const i=this.size[0]*s[2],o=this.size[1]*s[3],a=n.resizeNearestNeighbor([i,o]);return Pe(a,[0,3,1,2])}else{const i=this.size[0]*s[1],o=this.size[1]*s[2];return n.resizeNearestNeighbor([i,o])}})}getConfig(){const e={size:this.size,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}iL.className="UpSampling2D",ge(iL);function aG(e,t,n=[1,1],s="valid",i,o){return ee(()=>{i==null&&(i=ri()),Gt(i);let a=Zw(e,i);if(e.rank!==4)throw new j(`Input for depthwiseConv2d is required to be 4-D, but is instead ${e.rank}-D`);if(t.rank!==4)throw new j(`depthwiseKernel is required to be 4-D, but is instead ${t.rank}-D`);return a=Oo(a,t,n,s==="same"?"same":"valid","NHWC",o),i==="channelsFirst"&&(a=Pe(a,[0,3,1,2])),a})}class rL extends eL{constructor(e){super(2,e);this.depthwiseKernel=null,this.depthMultiplier=e.depthMultiplier==null?1:e.depthMultiplier,this.depthwiseInitializer=Ft(e.depthwiseInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.depthwiseConstraint=hn(e.depthwiseConstraint),this.depthwiseRegularizer=_t(e.depthwiseRegularizer)}build(e){if(e=It(e),e.length<4)throw new j(`Inputs to DepthwiseConv2D should have rank 4. Received input shape: ${JSON.stringify(e)}.`);const t=this.dataFormat==="channelsFirst"?1:3;if(e[t]==null||e[t]<0)throw new j(`The channel dimension of the inputs to DepthwiseConv2D should be defined, but is not (${e[t]}).`);const n=e[t],s=[this.kernelSize[0],this.kernelSize[1],n,this.depthMultiplier];this.depthwiseKernel=this.addWeight("depthwise_kernel",s,null,this.depthwiseInitializer,this.depthwiseRegularizer,!0,this.depthwiseConstraint),this.useBias?this.bias=this.addWeight("bias",[n*this.depthMultiplier],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return ee(()=>{e=Xe(e);let n=aG(e,this.depthwiseKernel.read(),this.strides,this.padding,this.dataFormat,null);return this.useBias&&(n=$i(n,this.bias.read(),this.dataFormat)),this.activation!=null&&(n=this.activation.apply(n)),n})}computeOutputShape(e){e=It(e);const t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2],s=this.dataFormat==="channelsFirst"?e[1]*this.depthMultiplier:e[3]*this.depthMultiplier,i=ui(t,this.kernelSize[0],this.padding,this.strides[0]),o=ui(n,this.kernelSize[1],this.padding,this.strides[1]);return this.dataFormat==="channelsFirst"?[e[0],s,i,o]:[e[0],i,o,s]}getConfig(){const e=super.getConfig();return e.depthMultiplier=this.depthMultiplier,e.depthwiseInitializer=Vt(this.depthwiseInitializer),e.depthwiseRegularizer=xt(this.depthwiseRegularizer),e.depthwiseConstraint=ln(this.depthwiseRegularizer),e}}rL.className="DepthwiseConv2D",ge(rL);function ON(e,t,n,s){if(Array.isArray(e)){if(t!=null||n!=null)throw new j("When inputs is an array, neither initialState or constants should be provided");s!=null&&(n=e.slice(e.length-s,e.length),e=e.slice(0,e.length-s)),e.length>1&&(t=e.slice(1,e.length)),e=e[0]}function i(o){return o==null||Array.isArray(o)?o:[o]}return t=i(t),n=i(n),{inputs:e,initialState:t,constants:n}}function EN(e,t,n,s=!1,i,o,a=!1,c=!1){return ee(()=>{const u=t.shape.length;if(u<3)throw new j(`Input should be at least 3D, but is ${u}D.`);const p=[1,0].concat(ai(2,u));if(t=Pe(t,p),o!=null)throw new Ge("The rnn() functoin of the deeplearn.js backend does not support constants yet.");a&&console.warn("Backend rnn(): the unroll = true option is not applicable to the imperative deeplearn.js backend."),i!=null&&(i=i.asType("bool").asType("float32"),i.rank===u-1&&(i=Kn(i,-1)),i=Pe(i,p)),s&&(t=As(t,0),i!=null&&(i=As(i,0)));const m=[];let y,b=n;const w=t.shape[0],I=_i(t);let T;i!=null&&(T=_i(i));for(let N=0;Ne(E,b));if(i==null)y=D[0],b=D[1];else{const F=ee(()=>{const _=T[N],B=Dn(_).sub(_),U=D[0].mul(_).add(b[0].mul(B)),Y=b.map((q,J)=>D[1][J].mul(_).add(q.mul(B)));return{output:U,newStates:Y}});y=F.output,b=F.newStates}c&&m.push(y)}let v;if(c){const N=1;v=as(m,N)}return[y,v,b]})}class Bi extends lt{constructor(e){super(e);let t;if(e.cell==null)throw new j("cell property is missing for the constructor of RNN.");if(Array.isArray(e.cell)?t=new nm({cells:e.cell}):t=e.cell,t.stateSize==null)throw new j("The RNN cell should have an attribute `stateSize` (tuple of integers, one integer per RNN state).");this.cell=t,this.returnSequences=e.returnSequences==null?!1:e.returnSequences,this.returnState=e.returnState==null?!1:e.returnState,this.goBackwards=e.goBackwards==null?!1:e.goBackwards,this._stateful=e.stateful==null?!1:e.stateful,this.unroll=e.unroll==null?!1:e.unroll,this.supportsMasking=!0,this.inputSpec=[new fn({ndim:3})],this.stateSpec=null,this.states_=null,this.numConstants=null,this.keptStates=[]}getStates(){if(this.states_==null){const e=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;return ai(0,e).map(t=>null)}else return this.states_}setStates(e){this.states_=e}computeOutputShape(e){Nw(e)&&(e=e[0]),e=e;let t=this.cell.stateSize;Array.isArray(t)||(t=[t]);const n=t[0];let s;if(this.returnSequences?s=[e[0],e[1],n]:s=[e[0],n],this.returnState){const i=[];for(const o of t)i.push([e[0],o]);return[s].concat(i)}else return s}computeMask(e,t){return ee(()=>{Array.isArray(t)&&(t=t[0]);const n=this.returnSequences?t:null;if(this.returnState){const s=this.states.map(i=>null);return[n].concat(s)}else return n})}get states(){if(this.states_==null){const e=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1,t=[];for(let n=0;na.shape[a.shape.length-1]),o))throw new j(`An initialState was passed that is not compatible with cell.stateSize. Received stateSpec=${this.stateSpec}; However cell.stateSize is ${this.cell.stateSize}`)}else this.stateSpec=o.map(a=>new fn({shape:[null,a]}));this.stateful&&this.resetStates()}resetStates(e,t=!1){ee(()=>{if(!this.stateful)throw new rr("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape[0];if(n==null)throw new j("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.states_==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(s=>ct([n,s])):this.states_=[ct([n,this.cell.stateSize])];else if(e==null)qe(this.states_),this.keptStates!=null&&(qe(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(s=>ct([n,s])):this.states_[0]=ct([n,this.cell.stateSize]);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new j(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);t===!0?this.keptStates.push(this.states_.slice()):qe(this.states_);for(let s=0;sRn(s.clone()))})}apply(e,t){let n=t==null?null:t.initialState,s=t==null?null:t.constants;t==null&&(t={});const i=ON(e,n,s,this.numConstants);e=i.inputs,n=i.initialState,s=i.constants;let o=[],a=[];if(n!=null){t.initialState=n,o=o.concat(n),this.stateSpec=[];for(const u of n)this.stateSpec.push(new fn({shape:u.shape}));a=a.concat(this.stateSpec)}s!=null&&(t.constants=s,o=o.concat(s),this.numConstants=s.length);const c=o[0]instanceof li;if(c){const u=[e].concat(o),p=this.inputSpec.concat(a),m=this.inputSpec;this.inputSpec=p;const y=super.apply(u,t);return this.inputSpec=m,y}else return super.apply(e,t)}call(e,t){return ee(()=>{const n=t==null?null:t.mask,s=t==null?null:t.training;let i=t==null?null:t.initialState;e=Xe(e),i==null&&(this.stateful?i=this.states_:i=this.getInitialState(e));const o=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;if(i.length!==o)throw new j(`RNN Layer has ${o} state(s) but was passed ${i.length} initial state(s).`);this.unroll&&console.warn("Ignoring unroll = true for RNN layer, due to imperative backend.");const a={training:s},c=(w,I)=>{const T=this.cell.call([w].concat(I),a);return[T[0],T.slice(1)]},u=EN(c,e,i,this.goBackwards,n,null,this.unroll,this.returnSequences),p=u[0],m=u[1],y=u[2];this.stateful&&this.resetStates(y,s);const b=this.returnSequences?m:p;return this.returnState?[b].concat(y):b})}getInitialState(e){return ee(()=>{let t=ct(e.shape);return t=Ue(t,[1,2]),t=Wh(t),Array.isArray(this.cell.stateSize)?this.cell.stateSize.map(n=>n>1?bw(t,[1,n]):t):this.cell.stateSize>1?[bw(t,[1,this.cell.stateSize])]:[t]})}get trainableWeights(){return this.trainable?this.cell.trainableWeights:[]}get nonTrainableWeights(){return this.trainable?this.cell.nonTrainableWeights:this.cell.weights}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.cell!=null&&this.cell.setFastWeightInitDuringBuild(e)}getConfig(){const e=super.getConfig(),t={returnSequences:this.returnSequences,returnState:this.returnState,goBackwards:this.goBackwards,stateful:this.stateful,unroll:this.unroll};this.numConstants!=null&&(t.numConstants=this.numConstants);const n=this.cell.getConfig();return this.getClassName()===Bi.className&&(t.cell={className:this.cell.getClassName(),config:n}),Object.assign({},n,e,t)}static fromConfig(e,t,n={}){const s=t.cell,i=hi(s,n);return new e(Object.assign(t,{cell:i}))}}Bi.className="RNN",ge(Bi);class cc extends lt{}class em extends cc{constructor(e){super(e);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,mn(this.units,"units"),this.activation=Jr(e.activation==null?this.DEFAULT_ACTIVATION:e.activation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Ft(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=_t(e.kernelRegularizer),this.recurrentRegularizer=_t(e.recurrentRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.kernelConstraint=hn(e.kernelConstraint),this.recurrentConstraint=hn(e.recurrentConstraint),this.biasConstraint=hn(e.biasConstraint),this.dropout=nc([1,qr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=nc([1,qr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){e=It(e),this.kernel=this.addWeight("kernel",[e[e.length-1],this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return ee(()=>{if(e=e,e.length!==2)throw new j(`SimpleRNNCell expects 2 input Tensors, got ${e.length}.`);let n=e[1];e=e[0];const s=t.training==null?!1:t.training;0Dn(e),rate:this.dropout,training:s})),0Dn(n),rate:this.recurrentDropout,training:s}));let i;const o=this.dropoutMask,a=this.recurrentDropoutMask;o!=null?i=Wi(X(e,o),this.kernel.read()):i=Wi(e,this.kernel.read()),this.bias!=null&&(i=$i(i,this.bias.read())),a!=null&&(n=X(n,a));let c=be(i,Wi(n,this.recurrentKernel.read()));return this.activation!=null&&(c=this.activation.apply(c)),[c,c]})}getConfig(){const e=super.getConfig(),t={units:this.units,activation:Xr(this.activation),useBias:this.useBias,kernelInitializer:Vt(this.kernelInitializer),recurrentInitializer:Vt(this.recurrentInitializer),biasInitializer:Vt(this.biasInitializer),kernelRegularizer:xt(this.kernelRegularizer),recurrentRegularizer:xt(this.recurrentRegularizer),biasRegularizer:xt(this.biasRegularizer),activityRegularizer:xt(this.activityRegularizer),kernelConstraint:ln(this.kernelConstraint),recurrentConstraint:ln(this.recurrentConstraint),biasConstraint:ln(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout};return Object.assign({},e,t)}}em.className="SimpleRNNCell",ge(em);class oL extends Bi{constructor(e){e.cell=new em(e),super(e)}call(e,t){return ee(()=>{this.cell.dropoutMask!=null&&(qe(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(qe(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return new e(t)}}oL.className="SimpleRNN",ge(oL);class tm extends cc{constructor(e){super(e);if(this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",e.resetAfter)throw new j("GRUCell does not support reset_after parameter set to true.");this.units=e.units,mn(this.units,"units"),this.activation=Jr(e.activation===void 0?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=Jr(e.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Ft(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=_t(e.kernelRegularizer),this.recurrentRegularizer=_t(e.recurrentRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.kernelConstraint=hn(e.kernelConstraint),this.recurrentConstraint=hn(e.recurrentConstraint),this.biasConstraint=hn(e.biasConstraint),this.dropout=nc([1,qr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=nc([1,qr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){e=It(e);const t=e[e.length-1];this.kernel=this.addWeight("kernel",[t,this.units*3],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*3],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units*3],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return ee(()=>{if(e=e,e.length!==2)throw new j(`GRUCell expects 2 input Tensors (inputs, h, c), got ${e.length}.`);const n=t.training==null?!1:t.training;let s=e[1];e=e[0],0Dn(e),rate:this.dropout,training:n,count:3})),0Dn(s),rate:this.recurrentDropout,training:n,count:3}));const i=this.dropoutMask,o=this.recurrentDropoutMask;let a,c,u;0{this.cell.dropoutMask!=null&&(qe(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(qe(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return t.implmentation===0&&(t.implementation=1),new e(t)}}aL.className="GRU",ge(aL);class Yh extends cc{constructor(e){super(e);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,mn(this.units,"units"),this.activation=Jr(e.activation===void 0?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=Jr(e.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Ft(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.unitForgetBias=e.unitForgetBias,this.kernelRegularizer=_t(e.kernelRegularizer),this.recurrentRegularizer=_t(e.recurrentRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.kernelConstraint=hn(e.kernelConstraint),this.recurrentConstraint=hn(e.recurrentConstraint),this.biasConstraint=hn(e.biasConstraint),this.dropout=nc([1,qr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=nc([1,qr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=[this.units,this.units],this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){var t;e=It(e);const n=e[e.length-1];this.kernel=this.addWeight("kernel",[n,this.units*4],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*4],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint);let s;if(this.useBias){if(this.unitForgetBias){const i=this.biasInitializer,o=this.units;s=new(t=class extends Ps{apply(c,u){const p=i.apply([o]),m=new Op().apply([o]),y=i.apply([o*2]);return Nv(Nv(p,m),y)}},t.className="CustomInit",t)}else s=this.biasInitializer;this.bias=this.addWeight("bias",[this.units*4],null,s,this.biasRegularizer,!0,this.biasConstraint)}else this.bias=null;this.built=!0}call(e,t){return ee(()=>{const n=t.training==null?!1:t.training;if(e=e,e.length!==3)throw new j(`LSTMCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);let s=e[1];const i=e[2];e=e[0],0Dn(e),rate:this.dropout,training:n,count:4})),0Dn(s),rate:this.recurrentDropout,training:n,count:4}));const o=this.dropoutMask,a=this.recurrentDropoutMask;let c,u,p,m;0{this.cell.dropoutMask!=null&&(qe(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(qe(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return t.implmentation===0&&(t.implementation=1),new e(t)}}cL.className="LSTM",ge(cL);class nm extends cc{constructor(e){super(e);this.cells=e.cells}get stateSize(){const e=[];for(const t of this.cells.slice().reverse())Array.isArray(t.stateSize)?e.push(...t.stateSize):e.push(t.stateSize);return e}call(e,t){return ee(()=>{e=e;let n=e.slice(1);const s=[];for(const a of this.cells.slice().reverse())Array.isArray(a.stateSize)?s.push(n.splice(0,a.stateSize.length)):s.push(n.splice(0,1));s.reverse();const i=[];let o;for(let a=0;a{Go(`RNNCell_${s}`,()=>{n.build(e),Array.isArray(n.stateSize)?t=n.stateSize[0]:t=n.stateSize,e=[e[0],t]})}),this.built=!0}getConfig(){const e=super.getConfig(),t=i=>({className:i.getClassName(),config:i.getConfig()}),n=this.cells.map(t),s={cells:n};return Object.assign({},e,s)}static fromConfig(e,t,n={}){const s=[];for(const i of t.cells)s.push(hi(i,n));return new e({cells:s})}get trainableWeights(){if(!this.trainable)return[];const e=[];for(const t of this.cells)e.push(...t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.cells)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const n of this.cells)t.push(...n.trainableWeights);return t.concat(e)}return e}getWeights(){const e=[];for(const t of this.cells)e.push(...t.weights);return Cw(e)}setWeights(e){const t=[];for(const n of this.cells){const s=n.weights.length,i=e.splice(s);for(let o=0;oRv(t(),n),a=()=>Uh(o,t,s);if(!i||i<=1)return Rn(a().clone());const c=Array(i).fill(void 0).map(a);return c.map(u=>Rn(u.clone()))}var cG=function(e,t){var n={};for(var s in e)Object.prototype.hasOwnProperty.call(e,s)&&t.indexOf(s)<0&&(n[s]=e[s]);if(e!=null&&typeof Object.getOwnPropertySymbols=="function")for(var i=0,s=Object.getOwnPropertySymbols(e);i{if(this.cell.dropoutMask!=null&&(qe(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(qe(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null),t&&t.constants)throw new j("ConvRNN2D cell does not support constants");const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}computeOutputShape(e){let t=this.computeSingleOutputShape(e);return this.returnSequences||(t=[t[0],...t.slice(2)]),this.returnState&&(t=[t,...Array(2).fill([e[0],...t.slice(-3)])]),t}getInitialState(e){return ee(()=>{const{stateSize:t}=this.cell,n=e.shape,s=this.computeSingleOutputShape(n),i=[s[0],...s.slice(2)],o=ct(i);return Array.isArray(t)?Array(t.length).fill(o):[o]})}resetStates(e,t=!1){ee(()=>{if(!this.stateful)throw new rr("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape,s=this.computeSingleOutputShape(n),i=[s[0],...s.slice(2)],o=n[0];if(o==null)throw new j("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.getStates()==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>ct(i)):this.states_=[ct(i)];else if(e==null)qe(this.states_),this.keptStates!=null&&(qe(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>ct(i)):this.states_[0]=ct(i);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new j(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);t?this.keptStates.push(this.states_.slice()):qe(this.states_);for(let a=0;aRn(a.clone()))})}computeSingleOutputShape(e){const{dataFormat:t,filters:n,kernelSize:s,padding:i,strides:o,dilationRate:a}=this.cell,c=t==="channelsFirst",u=e[c?3:2],p=e[c?4:3],m=ui(u,s[0],i,o[0],a[0]),y=ui(p,s[1],i,o[1],a[1]),b=[...e.slice(0,2),...c?[n,m,y]:[m,y,n]];return b}}DN.className="ConvRNN2D";class sm extends Yh{constructor(e){const{filters:t,kernelSize:n,strides:s,padding:i,dataFormat:o,dilationRate:a}=e;super(Object.assign({},e,{units:t}));this.filters=t,mn(this.filters,"filters"),this.kernelSize=ac(n,2,"kernelSize"),this.kernelSize.forEach(c=>mn(c,"kernelSize")),this.strides=ac(s||1,2,"strides"),this.strides.forEach(c=>mn(c,"strides")),this.padding=i||"valid",Ns(this.padding),this.dataFormat=o||"channelsLast",Gt(this.dataFormat),this.dilationRate=ac(a||1,2,"dilationRate"),this.dilationRate.forEach(c=>mn(c,"dilationRate"))}build(e){var t;e=It(e);const n=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[n]==null)throw new j(`The channel dimension of the input should be defined. Found ${e[n]}`);const s=e[n],i=4,o=this.kernelSize.concat([s,this.filters*i]);this.kernel=this.addWeight("kernel",o,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint);const a=this.kernelSize.concat([this.filters,this.filters*i]);if(this.recurrentKernel=this.addWeight("recurrent_kernel",a,null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){let c;if(this.unitForgetBias){const u=this.biasInitializer,p=this.filters;c=new(t=class extends Ps{apply(y,b){const w=u.apply([p]),I=si([p]),T=u.apply([p*2]);return yw([w,I,T])}},t.className="CustomInit",t)}else c=this.biasInitializer;this.bias=this.addWeight("bias",[this.filters*i],null,c,this.biasRegularizer,!0,this.biasConstraint)}this.built=!0}call(e,t){return ee(()=>{if(e.length!==3)throw new j(`ConvLSTM2DCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);const n=t.training||!1,s=e[0],i=e[1],o=e[2],a=4;0Dn(s),rate:this.dropout,training:n,count:a}));const c=this.dropoutMask,u=(Ie,Se,Ee)=>!Se||!Se[Ee]?Ie:X(Se[Ee],Ie);let p=u(s,c,0),m=u(s,c,1),y=u(s,c,2),b=u(s,c,3);0Dn(i),rate:this.recurrentDropout,training:n,count:a}));const w=this.recurrentDropoutMask;let I=u(i,w,0),T=u(i,w,1),v=u(i,w,2),N=u(i,w,3);const E=3,[D,F,_,B]=os(this.kernel.read(),a,E),[U,Y,q,J]=this.useBias?os(this.bias.read(),a):[null,null,null,null];p=this.inputConv(p,D,U,this.padding),m=this.inputConv(m,F,Y,this.padding),y=this.inputConv(y,_,q,this.padding),b=this.inputConv(b,B,J,this.padding);const[oe,ce,ue,he]=os(this.recurrentKernel.read(),a,E);I=this.recurrentConv(I,oe),T=this.recurrentConv(T,ce),v=this.recurrentConv(v,ue),N=this.recurrentConv(N,he);const pe=this.recurrentActivation.apply(be(p,I)),le=this.recurrentActivation.apply(be(m,T)),ye=be(X(le,o),X(pe,this.activation.apply(be(y,v)))),me=X(this.recurrentActivation.apply(be(b,N)),this.activation.apply(ye));return[me,me,ye]})}getConfig(){const e=super.getConfig(),{units:t}=e,n=cG(e,["units"]),s={filters:this.filters,kernelSize:this.kernelSize,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,strides:this.strides};return Object.assign({},n,s)}inputConv(e,t,n,s){const i=er(e,t,this.strides,s||"valid",this.dataFormat==="channelsFirst"?"NCHW":"NHWC",this.dilationRate);return n?$i(i,n,this.dataFormat):i}recurrentConv(e,t){const n=1;return er(e,t,n,"same",this.dataFormat==="channelsFirst"?"NCHW":"NHWC")}}sm.className="ConvLSTM2DCell",ge(sm);class lL extends DN{constructor(e){const t=new sm(e);super(Object.assign({},e,{cell:t}))}static fromConfig(e,t){return new e(t)}}lL.className="ConvLSTM2D",ge(lL);class im extends lt{constructor(e){super(e);this.rate=Math.max(Math.min(e.rate,1),0),this.noiseShape=e.noiseShape,this.seed=e.seed,this.supportsMasking=!0}getNoiseShape(e){if(this.noiseShape==null)return this.noiseShape;const t=e.shape,n=[];for(let s=0;s{this.invokeCallHook(e,t);const n=Xe(e);if(0Rv(n,this.rate,i,this.seed),()=>n,s);return o}return e})}getConfig(){const e={rate:this.rate,noiseShape:this.noiseShape,seed:this.seed},t=super.getConfig();return Object.assign(e,t),e}dispose(){return super.dispose()}}im.className="Dropout",ge(im);class hL extends im{constructor(e){super(e);this.inputSpec=[{ndim:3}]}getNoiseShape(e){const t=e.shape;return[t[0],1,t[2]]}}hL.className="SpatialDropout1D",ge(hL);class uL extends lt{constructor(e){super(e);if(this.activation=null,this.useBias=!0,this.kernel=null,this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",e.batchInputShape==null&&e.inputShape==null&&e.inputDim!=null){let t=null;e.batchSize!=null&&(t=e.batchSize),this.batchInputShape=[t,e.inputDim]}this.units=e.units,mn(this.units,"units"),this.activation=Jr(e.activation),e.useBias!=null&&(this.useBias=e.useBias),this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelConstraint=hn(e.kernelConstraint),this.biasConstraint=hn(e.biasConstraint),this.kernelRegularizer=_t(e.kernelRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.activityRegularizer=_t(e.activityRegularizer),this.supportsMasking=!0,this.inputSpec=[{minNDim:2}]}build(e){e=It(e);const t=e[e.length-1];this.kernel==null&&(this.kernel=this.addWeight("kernel",[t,this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint))),this.inputSpec=[{minNDim:2,axes:{[-1]:t}}],this.built=!0}computeOutputShape(e){e=It(e);const t=e.slice();return t[t.length-1]=this.units,t}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=bv(this.activation.getClassName());let i;return s!=null?i=Wi(n,this.kernel.read(),s,this.bias?this.bias.read():null):(i=Wi(n,this.kernel.read()),this.bias!=null&&(i=$i(i,this.bias.read())),this.activation!=null&&(i=this.activation.apply(i))),i})}getConfig(){const e={units:this.units,activation:Xr(this.activation),useBias:this.useBias,kernelInitializer:Vt(this.kernelInitializer),biasInitializer:Vt(this.biasInitializer),kernelRegularizer:xt(this.kernelRegularizer),biasRegularizer:xt(this.biasRegularizer),activityRegularizer:xt(this.activityRegularizer),kernelConstraint:ln(this.kernelConstraint),biasConstraint:ln(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}uL.className="Dense",ge(uL);class dL extends lt{constructor(e){e=e||{},super(e),this.inputSpec=[{minNDim:3}],this.dataFormat=e.dataFormat}computeOutputShape(e){e=It(e);for(const t of e.slice(1))if(t==null)throw new j(`The shape of the input to "Flatten" is not fully defined (got ${e.slice(1)}). Make sure to pass a complete "input_shape" or "batch_input_shape" argument to the first layer in your model.`);return[e[0],Yr(e,1)]}call(e,t){return ee(()=>{this.invokeCallHook(e,t);let n=Xe(e);if(this.dataFormat==="channelsFirst"&&n.rank>1){const s=[0];for(let i=2;i{this.invokeCallHook(e,t);const n=Xe(e);return this.activation.apply(n)})}getConfig(){const e={activation:Xr(this.activation)},t=super.getConfig();return Object.assign(e,t),e}}pL.className="Activation",ge(pL);class mL extends lt{constructor(e){super(e);this.n=e.n,this.inputSpec=[{ndim:2}]}computeOutputShape(e){return[e[0],this.n,e[1]]}call(e,t){return ee(()=>(e=Xe(e),Rz(e,this.n)))}getConfig(){const e={n:this.n},t=super.getConfig();return Object.assign(e,t),e}}mL.className="RepeatVector",ge(mL);class fL extends lt{constructor(e){super(e);this.targetShape=e.targetShape;for(let t=0;t{this.invokeCallHook(e,t);const n=Xe(e),s=n.shape,i=s.slice(0,1).concat(this.fixUnknownDimension(s.slice(1),this.targetShape));return n.reshape(i)})}getConfig(){const e={targetShape:this.targetShape},t=super.getConfig();return Object.assign(e,t),e}}fL.className="Reshape",ge(fL);class gL extends lt{constructor(e){super(e);if(e.dims==null)throw new Error("Required configuration field `dims` is missing during Permute constructor call.");if(!Array.isArray(e.dims))throw new Error(`Permute constructor requires \`dims\` to be an Array, but received ${e.dims} instead.`);const t=ai(1,e.dims.length+1);if(!ot(e.dims.slice().sort(),t))throw new Error("Invalid permutation `dims`: "+JSON.stringify(e.dims)+" `dims` must contain consecutive integers starting from 1.");this.dims=e.dims,this.dimsIncludingBatch=[0].concat(this.dims),this.inputSpec=[new fn({ndim:this.dims.length+1})]}computeOutputShape(e){e=It(e);const t=e.slice();return this.dims.forEach((n,s)=>{t[s+1]=e[n]}),t}call(e,t){return Pe(Xe(e),this.dimsIncludingBatch)}getConfig(){const e={dims:this.dims},t=super.getConfig();return Object.assign(e,t),e}}gL.className="Permute",ge(gL);class yL extends lt{constructor(e){super(e==null?{}:e);this.supportsMasking=!0,e!=null?this.maskValue=e.maskValue==null?0:e.maskValue:this.maskValue=0}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={maskValue:this.maskValue};return Object.assign(t,e),t}computeMask(e,t){const n=Xe(e),s=-1;return th(Pr(n,this.maskValue),s)}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=-1,i=!0,o=th(Pr(n,this.maskValue),s,i),a=n.mul(o.asType(n.dtype));return a})}}yL.className="Masking",ge(yL);class bL extends lt{constructor(e){super(e);if(this.embeddings=null,this.DEFAULT_EMBEDDINGS_INITIALIZER="randomUniform",e.batchInputShape==null&&e.inputShape==null){let t=null;e.batchSize!=null&&(t=e.batchSize),e.inputLength==null?this.batchInputShape=[t,null]:this.batchInputShape=[t].concat(Nt(e.inputLength))}this.inputDim=e.inputDim,mn(this.inputDim,"inputDim"),this.outputDim=e.outputDim,mn(this.outputDim,"outputDim"),this.embeddingsInitializer=Ft(e.embeddingsInitializer||this.DEFAULT_EMBEDDINGS_INITIALIZER),this.embeddingsRegularizer=_t(e.embeddingsRegularizer),this.activityRegularizer=_t(e.activityRegularizer),this.embeddingsConstraint=hn(e.embeddingsConstraint),this.maskZero=e.maskZero,this.supportsMasking=e.maskZero,this.inputLength=e.inputLength}build(e){this.embeddings=this.addWeight("embeddings",[this.inputDim,this.outputDim],this.dtype,this.embeddingsInitializer,this.embeddingsRegularizer,!0,this.embeddingsConstraint),this.built=!0}warnOnIncompatibleInputShape(e){}computeMask(e,t){return ee(()=>this.maskZero?(e=Xe(e),Pr(e,et(e))):null)}computeOutputShape(e){if(e=It(e),this.inputLength==null)return[...e,this.outputDim];const t=Nt(this.inputLength);if(t.length!==e.length-1)throw new j(`"inputLength" is ${this.inputLength}, but received input shape has shape ${e}`);{let n=0;for(let s=0;s{this.invokeCallHook(e,t);let n=Xe(e);n.dtype!=="int32"&&(n=_h(n,"int32"));const s=Cv(this.embeddings.read(),n.as1D());return s.reshape(It(this.computeOutputShape(n.shape)))})}getConfig(){const e={inputDim:this.inputDim,outputDim:this.outputDim,embeddingsInitializer:Vt(this.embeddingsInitializer),embeddingsRegularizer:xt(this.embeddingsRegularizer),activityRegularizer:xt(this.activityRegularizer),embeddingsConstraint:ln(this.embeddingsConstraint),maskZero:this.maskZero,inputLength:this.inputLength},t=super.getConfig();return Object.assign(e,t),e}}bL.className="Embedding",ge(bL);class qo extends lt{constructor(e){super(e||{});this.supportsMasking=!0}mergeFunction(e){throw new Ge}computeElementwiseOpOutputShape(e,t){if(e==null||t==null)return null;if(e.length1)throw new j(`Can not merge tensors with different batch sizes. Got tensors with shapes: ${JSON.stringify(e)}.`);let n=e[0]==null?null:e[0].slice(1);for(let i=1;ii.length);e.indexOf(null)===-1&&Hr(s).length===1?this.reshapeRequired=!1:this.reshapeRequired=!0}call(e,t){return ee(()=>{if(e=e,this.reshapeRequired){const n=[],s=e.map(i=>i.rank);if(s.indexOf(null)===-1){const i=qr(s);for(let o of e){const a=o.rank;for(let c=0;c1){const p=ai(1,u).concat([0]);n.push(Pe(c,p)),i=!0}else n.push(c)}let o=this.mergeFunction(n);const a=o.rank;if(i){if(a==null){const c=o.shape,u=c.length,p=c[u-1],m=[p].concat(c.slice(0,c.length-1));o=Pe(o.reshape([-1,p]),[1,0]).reshape(m)}else if(a>1){const c=[a-1].concat(ai(0,a-1));o=Pe(o,c)}}return o}}else return this.mergeFunction(e)})}computeOutputShape(e){e=e;let t;e[0]==null?t=null:t=e[0].slice(1);for(let s=1;s{if(t==null)return null;if(!Array.isArray(t))throw new j("`mask` should be an Array");if(!Array.isArray(e))throw new j("`inputs` should be an Array");if(t.length!==e.length)throw new j(`The Array 'inputs' and 'mask' are expected to have the same length, but have different lengths (${e.length} vs ${t.length})`);if(t.every(s=>s==null))return null;t=t.map(s=>s==null?s:Kn(s,0));let n=t[0];for(let s=1;s{let t=e[0].clone();for(let n=1;n{let t=e[0].clone();for(let n=1;n{let t=e[0].clone();for(let n=1;n{let t=e[0];for(let n=1;n{let t=e[0];for(let n=1;n1)throw new j("A `Concatenate` layer requires inputs with matching shapes except for the concat axis. Got input shapes: "+JSON.stringify(e))}mergeFunction(e){return ee(()=>yw(e,this.axis))}computeOutputShape(e){if(!(Array.isArray(e)&&Array.isArray(e[0])))throw new j("A `Concatenate` layer should be called on a list of inputs.");const t=e,n=t[0].slice(),s=this.axis<0?n.length+this.axis:this.axis;for(const i of t.slice(1)){if(n[s]==null||i[s]==null){n[s]=null;break}n[s]+=i[s]}return n}computeMask(e,t){if(t==null)return null;if(!Array.isArray(t))throw new j("`mask` should be an array for Concatenate");if(!Array.isArray(e))throw new j("`inputs` should be an array for Concatenate");if(t.length!==e.length)throw new j(`Mismatch in the length of mask (${t.length}) and the legnth of inputs (${e.length})`);return ee(()=>{let n=!0;if(t.forEach(o=>{if(o!=null){n=!1;return}}),n)return null;const s=[];for(let o=0;o3||t.shape.length>3)throw new Ge("batchDot is not implemented for tensors of 4D or higher rank yet");if(k(e.shape.length>=2,()=>`batchDot requires the rank of x to be >= 2, but got ${e.shape.length}`),k(e.shape.length>=2,()=>`batchDot requires the rank of y to be >= 2, but got ${t.shape.length}`),typeof n=="number"&&(n=[n,n]),e.dtype==="complex64"||t.dtype==="complex64")throw new Ge("batchDot is not implemented for complex64-type Tensors yet.");const s=e.shape.length,i=t.shape.length;n==null&&(n=[s-1,i-2]);const o=n;return ee(()=>{let a;if(s>i){a=s-i;const u=[];for(let p=0;ps){a=i-s;const u=[];for(let p=0;p0){let u;s>i?u=s+i-3:u=s-1;const p=[];for(let m=u;m"A `Dot` layer should be called on a list of exactly 2 inputs.");const t=e[0],n=e[1];if(t.length>3||n.length>3)throw new Ge("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);if(t[s[0]]!==n[s[1]])throw new j(`Dimension incompatibility: ${t[s[0]]} !== ${n[s[1]]}`)}mergeFunction(e){if(e.length!==2)throw new j(`A \`Dot\` layer must be called on exactly 2 inputs, but received ${e.length} input(s).`);let t=e[0],n=e[1],s;return Array.isArray(this.axes)?s=this.axes.map((i,o)=>Qh(i,e[o].shape.length)):s=[Qh(this.axes,t.shape.length),Qh(this.axes,n.shape.length)],this.normalize&&(t=zp(t,s[0]),n=zp(n,s[1])),lG(t,n,s)}interpretAxes(e,t){let n;return Array.isArray(this.axes)?n=this.axes:n=[Qh(this.axes,e.length),Qh(this.axes,t.length)],n}computeOutputShape(e){k(Array.isArray(e)&&e.length===2&&Array.isArray(e[0])&&Array.isArray(e[1]),()=>"A `Dot` layer should be called on a list of exactly 2 inputs.");const t=e[0].slice(),n=e[1].slice();if(t.length>3||n.length>3)throw new Ge("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);t.splice(s[0],1),n.splice(s[1],1),n.splice(0,1);const i=t.concat(n);return i.length===1&&i.push(1),i}computeMask(e,t){return null}getConfig(){const e={axes:this.axes,normalize:this.normalize},t=super.getConfig();return Object.assign(e,t),e}}wL.className="Dot",ge(wL);class LL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.stddev=e.stddev}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={stddev:this.stddev};return Object.assign(t,e),t}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=()=>Rp(n.shape,0,this.stddev).add(n),i=Uh(s,()=>n,t.training||!1);return i})}}LL.className="GaussianNoise",ge(LL);class SL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.rate=e.rate}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=Xe(e);if(this.rate>0&&this.rate<1){const s=()=>{const i=Math.sqrt(this.rate/(1-this.rate));return n.mul(Rp(n.shape,1,i))};return Uh(s,()=>n,t.training||!1)}return n})}}SL.className="GaussianDropout",ge(SL);class IL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.rate=e.rate,this.noiseShape=e.noiseShape}_getNoiseShape(e){return this.noiseShape||Xe(e).shape}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return ee(()=>{if(this.rate<1&&this.rate>0){const n=this._getNoiseShape(e),s=()=>{const i=Xe(e),o=1.6732632423543772,a=1.0507009873554805,c=-o*a;let u=tr($o(n),this.rate);u=_h(u,"float32");const p=((1-this.rate)*(1+this.rate*c**2))**-.5,m=-p*c*this.rate,y=i.mul(u).add(u.add(-1).mul(c));return y.mul(p).add(m)};return Uh(s,()=>Xe(e),t.training||!1)}return e})}}IL.className="AlphaDropout",ge(IL);function eu(e,t,n,s,i,o=.001){let a;if(e.rank===2)a=nA(e,t,n,s,i,o);else if(e.rank===3)a=sA(e,t,n,s,i,o);else if(e.rank===4)a=iA(e,t,n,s,i,o);else throw new Ge(`batchNormalization is not implemented for array of rank ${e.rank} yet`);return a}function hG(e,t,n,s,i=.001){return ee(()=>{const o=np(e,s),a=o.mean,c=o.variance,u=eu(e,a,c,n,t,i);return[u,a,c]})}function uG(e,t,n,s,i=.001){return ee(()=>{const o=np(e,s),a=o.mean,c=o.variance,u=[];for(const I of ai(0,e.rank))s.indexOf(I)!==-1?u.push(1):u.push(e.shape[I]);const p=a.reshape(u),m=c.reshape(u),y=t==null?null:t.reshape(u),b=n==null?null:n.reshape(u),w=eu(e,p,m,b,y,i);return[w,a,c]})}function dG(e,t,n,s,i=.001){return ot(s.slice().sort(),ai(0,e.rank-1))?hG(e,t,n,s,i):uG(e,t,n,s,i)}class xL extends lt{constructor(e){e==null&&(e={}),super(e),this.supportsMasking=!0,this.axis=e.axis==null?-1:e.axis,this.momentum=e.momentum==null?.99:e.momentum,this.epsilon=e.epsilon==null?.001:e.epsilon,this.center=e.center==null?!0:e.center,this.scale=e.scale==null?!0:e.scale,this.betaInitializer=Ft(e.betaInitializer||"zeros"),this.gammaInitializer=Ft(e.gammaInitializer||"ones"),this.movingMeanInitializer=Ft(e.movingMeanInitializer||"zeros"),this.movingVarianceInitializer=Ft(e.movingVarianceInitializer||"ones"),this.betaConstraint=hn(e.betaConstraint),this.gammaConstraint=hn(e.gammaConstraint),this.betaRegularizer=_t(e.betaRegularizer),this.gammaRegularizer=_t(e.gammaRegularizer)}build(e){e=It(e);const t=this.axis>=0?this.axis:this.axis+e.length,n=e[t];if(n==null)throw new j(`Axis ${t} of input tensor should have a defined dimension but the layer received an input with shape ${JSON.stringify(e)}.`);this.inputSpec=[new fn({ndim:e.length,axes:{[t]:n}})];const s=[n];this.scale&&(this.gamma=this.addWeight("gamma",s,null,this.gammaInitializer,this.gammaRegularizer,!0,this.gammaConstraint)),this.center&&(this.beta=this.addWeight("beta",s,null,this.betaInitializer,this.betaRegularizer,!0,this.betaConstraint)),this.movingMean=this.addWeight("moving_mean",s,null,this.movingMeanInitializer,null,!1),this.movingVariance=this.addWeight("moving_variance",s,null,this.movingVarianceInitializer,null,!1),this.built=!0}call(e,t){return ee(()=>{const n=t.training==null?!1:t.training,s=Xe(e),i=s.shape,o=i.length,a=ai(0,o),c=this.axis>=0?this.axis:this.axis+o;a.splice(c,1);const u=Po(1,o);u[c]=i[c];const p=a.slice();p.sort();const m=!ot(p,ai(0,o).slice(0,o-1)),y=()=>{if(m){const N=this.movingMean.read().reshape(u),E=this.movingVariance.read().reshape(u),D=this.center?this.beta.read().reshape(u):null,F=this.scale?this.gamma.read().reshape(u):null;return eu(s,N,E,D,F,this.epsilon)}else return eu(s,this.movingMean.read(),this.movingVariance.read(),this.beta==null?null:this.beta.read(),this.gamma==null?null:this.gamma.read(),this.epsilon)};if(!n)return y();const[b,w,I]=dG(s,this.gamma.read(),this.beta.read(),a,this.epsilon),T=(N,E,D)=>{ee(()=>{const F=1-D,_=N.read(),B=_.sub(E).mul(F);N.write(_.sub(B))})},v=()=>{T(this.movingMean,w,this.momentum),T(this.movingVariance,I,this.momentum)};return v(),b})}getConfig(){const e={axis:this.axis,momentum:this.momentum,epsilon:this.epsilon,center:this.center,scale:this.scale,betaInitializer:Vt(this.betaInitializer),gammaInitializer:Vt(this.gammaInitializer),movingMeanInitializer:Vt(this.movingMeanInitializer),movingVarianceInitializer:Vt(this.movingVarianceInitializer),betaRegularizer:xt(this.betaRegularizer),gammaRegularizer:xt(this.gammaRegularizer),betaConstraint:ln(this.betaConstraint),gammaConstraint:ln(this.gammaConstraint)},t=super.getConfig();return Object.assign(e,t),e}}xL.className="BatchNormalization",ge(xL);class TL extends lt{constructor(e){if(e==null&&(e={}),super(e),this.axis=e.axis==null?-1:e.axis,typeof this.axis=="number"){if(!Number.isInteger(this.axis))throw new Error(`Expected axis to be an integer, but received ${this.axis}`)}else if(Array.isArray(this.axis)){for(const t of this.axis)if(!Number.isInteger(t))throw new Error(`Expected axis to be an array of integers, but received ${JSON.stringify(this.axis)}`)}else throw new Error(`Expected axis to be an integer or an array of integers, but received ${JSON.stringify(this.axis)}`);this.epsilon=e.epsilon==null?.001:e.epsilon,this.center=e.center==null?!0:e.center,this.scale=e.scale==null?!0:e.scale,this.betaInitializer=Ft(e.betaInitializer||"zeros"),this.gammaInitializer=Ft(e.gammaInitializer||"ones"),this.betaRegularizer=_t(e.betaRegularizer),this.gammaRegularizer=_t(e.gammaRegularizer),this.supportsMasking=!0}build(e){e=It(e);const t=e.length;typeof this.axis=="number"&&(this.axis=[this.axis]);for(let i=0;i=t)throw new Error(`Invalid axis: ${i}`);if(this.axis.length!==Hr(this.axis).length)throw new Error(`Found duplicate axes in: ${this.axis}`);const n=this.axis.map(i=>e[i]),s=!0;this.scale?this.gamma=this.addWeight("gamma",n,"float32",this.gammaInitializer,this.gammaRegularizer,s):this.gamma=null,this.center?this.beta=this.addWeight("beta",n,"float32",this.betaInitializer,this.betaRegularizer,s):this.beta=null,this.built=!0}call(e,t){const n=Xe(e),s=n.shape,i=s.length;return ee(()=>{const o=!0;let{mean:a,variance:c}=np(n,this.axis,o);const u=Po(1,i);for(const I of this.axis)u[I]=s[I];const p=I=>I!=null&&I.shape.length!==i&&this.axis!==[i-1]?I.reshape(u):I;let m=p(this.gamma.read()),y=p(this.beta.read());const b=[],w=[];for(let I=0;I{if(e.rank!==3)throw new j(`temporalPadding expects input tensor to be 3-D, but received a ${e.rank}-D tensor.`);if(t==null&&(t=[1,1]),t.length!==2)throw new j(`temporalPadding expects input padding pattern to be a length-2 array, but received a length-${t.length} array.`);const n=[[0,0],t,[0,0]];return ki(e,n)})}function pG(e,t,n){return ee(()=>{if(e.rank!==4)throw new j(`temporalPadding expects input tensor to be 4-D, but received a ${e.rank}-D tensor.`);if(t==null&&(t=[[1,1],[1,1]]),t.length!==2||t[0].length!==2||t[1].length!==2)throw new j("spatial2dPadding expects `padding` to be an Array of two Arrays, each of which is an Array of two integers.");if(n==null&&(n=ri()),n!=="channelsLast"&&n!=="channelsFirst")throw new j(`Unknown data format: ${n}. Supported data formats are 'channelsLast' and 'channelsFirst.`);let s;return n==="channelsFirst"?s=[[0,0],[0,0],t[0],t[1]]:s=[[0,0],t[0],t[1],[0,0]],ki(e,s)})}class AL extends lt{constructor(e){if(e==null&&(e={}),super(e),this.dataFormat=e.dataFormat==null?ri():e.dataFormat,e.padding==null)this.padding=[[1,1],[1,1]];else if(typeof e.padding=="number")this.padding=[[e.padding,e.padding],[e.padding,e.padding]];else{if(e.padding=e.padding,e.padding.length!==2)throw new j(`ZeroPadding2D expects padding to be a length-2 array, but received a length-${e.padding.length} array.`);let t,n;if(typeof e.padding[0]=="number")t=[e.padding[0],e.padding[0]],n=[e.padding[1],e.padding[1]];else{if(e.padding=e.padding,e.padding[0].length!==2)throw new j(`ZeroPadding2D expects height padding to be a length-2 array, but received a length-${e.padding[0].length} array.`);if(t=e.padding[0],e.padding[1].length!==2)throw new j(`ZeroPadding2D expects width padding to be a length-2 array, but received a length-${e.padding[1].length} array.`);n=e.padding[1]}this.padding=[t,n]}this.inputSpec=[new fn({ndim:4})]}computeOutputShape(e){e=It(e);let t,n;return this.dataFormat==="channelsFirst"?(e[2]!=null&&e[2]>=0?t=e[2]+this.padding[0][0]+this.padding[0][1]:t=null,e[3]!=null&&e[3]>=0?n=e[3]+this.padding[1][0]+this.padding[1][1]:n=null,[e[0],e[1],t,n]):(e[1]!=null&&e[1]>=0?t=e[1]+this.padding[0][0]+this.padding[0][1]:t=null,e[2]!=null&&e[2]>=0?n=e[2]+this.padding[1][0]+this.padding[1][1]:n=null,[e[0],t,n,e[3]])}call(e,t){return ee(()=>pG(Xe(e),this.padding,this.dataFormat))}getConfig(){const e={padding:this.padding,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}AL.className="ZeroPadding2D",ge(AL);function rm(e,t,n,s,i,o){return ee(()=>{Gt(i),Sv(o),Ns(s),n==null&&(n=[1,1]),s==null&&(s="valid"),i==null&&(i=ri()),o==null&&(o="max"),e=Zw(e,i);let a;const c=s==="same"?"same":"valid";return o==="max"?a=mh(e,t,n,c):a=oh(e,t,n,c),i==="channelsFirst"&&(a=Pe(a,[0,3,1,2])),a})}function kN(e,t,n,s,i,o){return ee(()=>{Gt(i),Sv(o),Ns(s),n==null&&(n=[1,1,1]),s==null&&(s="valid"),i==null&&(i=ri()),o==null&&(o="max"),e=vN(e,i);let a;const c=s==="same"?"same":"valid";return o==="max"?a=Nb(e,t,n,c):a=pb(e,t,n,c),i==="channelsFirst"&&(a=Pe(a,[0,4,1,2,3])),a})}class FN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=2),super(e),typeof e.poolSize=="number")this.poolSize=[e.poolSize];else if(Array.isArray(e.poolSize)&&e.poolSize.length===1&&typeof e.poolSize[0]=="number")this.poolSize=e.poolSize;else throw new j(`poolSize for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.poolSize)}`);if(mn(this.poolSize,"poolSize"),e.strides==null)this.strides=this.poolSize;else if(typeof e.strides=="number")this.strides=[e.strides];else if(Array.isArray(e.strides)&&e.strides.length===1&&typeof e.strides[0]=="number")this.strides=e.strides;else throw new j(`strides for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.strides)}`);mn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,Ns(this.padding),this.inputSpec=[new fn({ndim:3})]}computeOutputShape(e){e=It(e);const t=ui(e[1],this.poolSize[0],this.padding,this.strides[0]);return[e[0],t,e[2]]}call(e,t){return ee(()=>{this.invokeCallHook(e,t),e=Wh(Xe(e),2);const n=this.poolingFunction(Xe(e),[this.poolSize[0],1],[this.strides[0],1],this.padding,"channelsLast");return zr(n,[2])})}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides},t=super.getConfig();return Object.assign(e,t),e}}class vL extends FN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),rm(e,t,n,s,i,"max")}}vL.className="MaxPooling1D",ge(vL);class NL extends FN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),rm(e,t,n,s,i,"avg")}}NL.className="AveragePooling1D",ge(NL);class _N extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=[2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize],e.strides==null)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(e.strides.length!==2)throw new j(`If the strides property of a 2D pooling layer is an Array, it is expected to have a length of 2, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides];mn(this.poolSize,"poolSize"),mn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,Gt(this.dataFormat),Ns(this.padding),this.inputSpec=[new fn({ndim:4})]}computeOutputShape(e){e=It(e);let t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2];return t=ui(t,this.poolSize[0],this.padding,this.strides[0]),n=ui(n,this.poolSize[1],this.padding,this.strides[1]),this.dataFormat==="channelsFirst"?[e[0],e[1],t,n]:[e[0],t,n,e[3]]}call(e,t){return ee(()=>(this.invokeCallHook(e,t),this.poolingFunction(Xe(e),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class CL extends _N{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),rm(e,t,n,s,i,"max")}}CL.className="MaxPooling2D",ge(CL);class RL extends _N{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),rm(e,t,n,s,i,"avg")}}RL.className="AveragePooling2D",ge(RL);class WN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=[2,2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize,e.poolSize],e.strides==null)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(e.strides.length!==3)throw new j(`If the strides property of a 3D pooling layer is an Array, it is expected to have a length of 3, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides,e.strides];mn(this.poolSize,"poolSize"),mn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,Gt(this.dataFormat),Ns(this.padding),this.inputSpec=[new fn({ndim:5})]}computeOutputShape(e){e=It(e);let t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2],s=this.dataFormat==="channelsFirst"?e[4]:e[3];return t=ui(t,this.poolSize[0],this.padding,this.strides[0]),n=ui(n,this.poolSize[1],this.padding,this.strides[1]),s=ui(s,this.poolSize[2],this.padding,this.strides[2]),this.dataFormat==="channelsFirst"?[e[0],e[1],t,n,s]:[e[0],t,n,s,e[4]]}call(e,t){return ee(()=>(this.invokeCallHook(e,t),this.poolingFunction(Xe(e),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class OL extends WN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),kN(e,t,n,s,i,"max")}}OL.className="MaxPooling3D",ge(OL);class EL extends WN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ns(s),kN(e,t,n,s,i,"avg")}}EL.className="AveragePooling3D",ge(EL);class $N extends lt{constructor(e){super(e);this.inputSpec=[new fn({ndim:3})]}computeOutputShape(e){return[e[0],e[2]]}call(e,t){throw new Ge}}class DL extends $N{constructor(e){super(e||{})}call(e,t){return ee(()=>{const n=Xe(e);return zt(n,1)})}}DL.className="GlobalAveragePooling1D",ge(DL);class kL extends $N{constructor(e){super(e||{})}call(e,t){return ee(()=>{const n=Xe(e);return Xn(n,1)})}}kL.className="GlobalMaxPooling1D",ge(kL);class UN extends lt{constructor(e){super(e);this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,Gt(this.dataFormat),this.inputSpec=[new fn({ndim:4})]}computeOutputShape(e){return e=e,this.dataFormat==="channelsLast"?[e[0],e[3]]:[e[0],e[1]]}call(e,t){throw new Ge}getConfig(){const e={dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class FL extends UN{call(e,t){return ee(()=>{const n=Xe(e);return this.dataFormat==="channelsLast"?zt(n,[1,2]):zt(n,[2,3])})}}FL.className="GlobalAveragePooling2D",ge(FL);class _L extends UN{call(e,t){return ee(()=>{const n=Xe(e);return this.dataFormat==="channelsLast"?Xn(n,[1,2]):Xn(n,[2,3])})}}_L.className="GlobalMaxPooling2D",ge(_L);class BN extends lt{constructor(e){super(e);this.layer=e.layer}build(e){this.built=!0}get trainable(){return this.layer!=null?this.layer.trainable:!1}set trainable(e){this.layer!=null&&(this.layer.trainable=e)}get trainableWeights(){return this.layer.trainableWeights}get nonTrainableWeights(){return this.layer.nonTrainableWeights}get updates(){return this.layer._updates}get losses(){return this.layer.losses}getWeights(){return this.layer.getWeights()}setWeights(e){this.layer.setWeights(e)}getConfig(){const e={layer:{className:this.layer.getClassName(),config:this.layer.getConfig()}},t=super.getConfig();return Object.assign(e,t),e}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.layer!=null&&this.layer.setFastWeightInitDuringBuild(e)}static fromConfig(e,t,n={}){const s=t.layer,i=hi(s,n);delete t.layer;const o={layer:i};return Object.assign(o,t),new e(o)}}class WL extends BN{constructor(e){super(e);this.supportsMasking=!0}build(e){if(e=It(e),e.length<3)throw new j(`TimeDistributed layer expects an input shape >= 3D, but received input shape ${JSON.stringify(e)}`);this.inputSpec=[{shape:e}];const t=[e[0]].concat(e.slice(2));this.layer.built||(this.layer.build(t),this.layer.built=!0),super.build(e)}computeOutputShape(e){e=It(e);const t=[e[0]].concat(e.slice(2)),n=this.layer.computeOutputShape(t),s=e[1];return[n[0],s].concat(n.slice(1))}call(e,t){return ee(()=>{e=Xe(e);const n=(o,a)=>{const c=Xe(this.layer.call(o,t));return[c,[]]},s=EN(n,e,[],!1,null,null,!1,!0),i=s[1];return i})}}WL.className="TimeDistributed",ge(WL);function mG(e){ec(xz,"BidirectionalMergeMode",e)}const fG="concat";class $L extends BN{constructor(e){super(e);const t=e.layer.getConfig(),n={};n.className=e.layer.getClassName(),n.config=t,this.forwardLayer=hi(n),t.goBackwards=!(t.goBackwards===!0);const s={};if(s.className=e.layer.getClassName(),s.config=t,this.backwardLayer=hi(s),this.forwardLayer.name="forward_"+this.forwardLayer.name,this.backwardLayer.name="backward_"+this.backwardLayer.name,this.mergeMode=e.mergeMode===void 0?fG:e.mergeMode,mG(this.mergeMode),e.weights)throw new Ge("weights support is not implemented for Bidirectional layer yet.");this._stateful=e.layer.stateful,this.returnSequences=e.layer.returnSequences,this.returnState=e.layer.returnState,this.supportsMasking=!0,this._trainable=!0,this.inputSpec=e.layer.inputSpec,this.numConstants=null}get trainable(){return this._trainable}set trainable(e){this._trainable=e,this.forwardLayer!=null&&(this.forwardLayer.trainable=e),this.backwardLayer!=null&&(this.backwardLayer.trainable=e)}getWeights(){return this.forwardLayer.getWeights().concat(this.backwardLayer.getWeights())}setWeights(e){const t=e.length,n=Math.floor(t/2);this.forwardLayer.setWeights(e.slice(0,n)),this.backwardLayer.setWeights(e.slice(n))}computeOutputShape(e){let t=this.forwardLayer.computeOutputShape(e);Array.isArray(t)&&Array.isArray(t[0])||(t=[t]),t=t;let n,s,i;return this.returnState&&(i=t.slice(1)),n=t[0],n=n,this.mergeMode==="concat"?(n[n.length-1]*=2,s=[n]):this.mergeMode==null?s=[n,n.slice()]:s=[n],this.returnState?this.mergeMode==null?s.concat(i).concat(i.slice()):[n].concat(i).concat(i.slice()):Jn(s)}apply(e,t){let n=t==null?null:t.initialState,s=t==null?null:t.constants;t==null&&(t={});const i=ON(e,n,s,this.numConstants);if(e=i.inputs,n=i.initialState,s=i.constants,Array.isArray(e)&&(n=e.slice(1),e=e[0]),(n==null||n.length===0)&&s==null)return super.apply(e,t);const o=[],a=[];if(n!=null){const u=n.length;if(u%2>0)throw new j("When passing `initialState` to a Bidrectional RNN, the state should be an Array containing the states of the underlying RNNs.");t.initialState=n,o.push(...n);const p=n.map(m=>new fn({shape:m.shape}));this.forwardLayer.stateSpec=p.slice(0,u/2),this.backwardLayer.stateSpec=p.slice(u/2),a.push(...p)}if(s!=null)throw new Ge("Support for constants in Bidirectional layers is not implemented yet.");const c=o[0]instanceof li;for(const u of o)if(u instanceof li!==c)throw new j("The initial state of a Bidirectional layer cannot be specified as a mix of symbolic and non-symbolic tensors");if(c){const u=[e].concat(o),p=this.inputSpec.concat(a),m=this.inputSpec;this.inputSpec=p;const y=super.apply(u,t);return this.inputSpec=m,y}else return super.apply(e,t)}call(e,t){return ee(()=>{const n=t.initialState;let s,i;if(n==null)s=this.forwardLayer.call(e,t),i=this.backwardLayer.call(e,t);else{const c=n.slice(0,n.length/2),u=n.slice(n.length/2);s=this.forwardLayer.call(e,Object.assign(t,{initialState:c})),i=this.backwardLayer.call(e,Object.assign(t,{initialState:u}))}let o;this.returnState&&(Array.isArray(s)&&(o=s.slice(1).concat(i.slice(1))),s=s[0],i=i[0]),this.returnSequences&&(i=As(i,1));let a;return this.mergeMode==="concat"?a=yw([s,i]):this.mergeMode==="sum"?a=be(s,i):this.mergeMode==="ave"?a=X(.5,be(s,i)):this.mergeMode==="mul"?a=X(s,i):this.mergeMode==null&&(a=[s,i]),this.returnState?this.mergeMode==null?a.concat(o):[a].concat(o):a})}resetStates(e){this.forwardLayer.resetStates(),this.backwardLayer.resetStates()}build(e){Go(this.forwardLayer.name,()=>{this.forwardLayer.build(e)}),Go(this.backwardLayer.name,()=>{this.backwardLayer.build(e)}),this.built=!0}computeMask(e,t){Array.isArray(t)&&(t=t[0]);let n;if(this.returnSequences?this.mergeMode==null?n=[t,t]:n=t:this.mergeMode==null?n=[null,null]:n=null,this.returnState){const s=this.forwardLayer.states,i=s.map(o=>null);return Array.isArray(n)?n.concat(i).concat(i):[n].concat(i).concat(i)}else return n}get trainableWeights(){return this.forwardLayer.trainableWeights.concat(this.backwardLayer.trainableWeights)}get nonTrainableWeights(){return this.forwardLayer.nonTrainableWeights.concat(this.backwardLayer.nonTrainableWeights)}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.forwardLayer!=null&&this.forwardLayer.setFastWeightInitDuringBuild(e),this.backwardLayer!=null&&this.backwardLayer.setFastWeightInitDuringBuild(e)}getConfig(){const e={mergeMode:this.mergeMode},t=super.getConfig();return Object.assign(e,t),e}static fromConfig(e,t){const n=hi(t.layer);if(delete t.layer,t.numConstants!=null)throw new Ge("Deserialization of a Bidirectional layer with numConstants present is not supported yet.");const s=t;return s.layer=n,new e(s)}}$L.className="Bidirectional",ge($L);function gG(e){return new sc(e)}function yG(e){return new Kw(e)}function bG(e){return new Yw(e)}function wG(e){return new qw(e)}function LG(e){return new jw(e)}function SG(e){return new Jw(e)}function IG(e){return new Xw(e)}function xG(e){return new Qp(e)}function TG(e){return new Hh(e)}function AG(e){return new tL(e)}function vG(e){return new Zp(e)}function NG(e){return new nL(e)}function CG(e){return new sL(e)}function RG(e){return new iL(e)}function OG(e){return new rL(e)}function EG(e){return new pL(e)}function DG(e){return new uL(e)}function kG(e){return new im(e)}function FG(e){return new hL(e)}function _G(e){return new dL(e)}function WG(e){return new mL(e)}function $G(e){return new fL(e)}function UG(e){return new gL(e)}function BG(e){return new bL(e)}function MG(e){return new qh(e)}function PG(e){return new Kh(e)}function zG(e){return new Zh(e)}function GG(e){return new Xh(e)}function VG(e){return new Jh(e)}function HG(e){return new jh(e)}function YG(e){return new wL(e)}function qG(e){return new xL(e)}function jG(e){return new TL(e)}function KG(e){return new AL(e)}function UL(e){return new NL(e)}function XG(e){return UL(e)}function JG(e){return UL(e)}function BL(e){return new RL(e)}function ZG(e){return BL(e)}function QG(e){return BL(e)}function ML(e){return new EL(e)}function eV(e){return ML(e)}function tV(e){return ML(e)}function nV(e){return new DL(e)}function sV(e){return new FL(e)}function MN(e){return new kL(e)}function PN(e){return new _L(e)}function zN(e){return new vL(e)}function GN(e){return new CL(e)}function iV(e){return new OL(e)}function rV(e){return new aL(e)}function oV(e){return new tm(e)}function aV(e){return new cL(e)}function cV(e){return new Yh(e)}function lV(e){return new oL(e)}function hV(e){return new em(e)}function uV(e){return new lL(e)}function dV(e){return new sm(e)}function pV(e){return new Bi(e)}function mV(e){return new nm(e)}function fV(e){return new $L(e)}function gV(e){return new WL(e)}const yV=MN,bV=PN,wV=zN,LV=GN;function SV(e){return new LL(e)}function IV(e){return new SL(e)}function xV(e){return new IL(e)}function TV(e){return new yL(e)}var AV=Object.freeze({__proto__:null,inputLayer:gG,elu:yG,reLU:bG,leakyReLU:wG,prelu:LG,softmax:SG,thresholdedReLU:IG,conv1d:xG,conv2d:TG,conv2dTranspose:AG,conv3d:vG,separableConv2d:NG,cropping2D:CG,upSampling2d:RG,depthwiseConv2d:OG,activation:EG,dense:DG,dropout:kG,spatialDropout1d:FG,flatten:_G,repeatVector:WG,reshape:$G,permute:UG,embedding:BG,add:MG,average:PG,concatenate:zG,maximum:GG,minimum:VG,multiply:HG,dot:YG,batchNormalization:qG,layerNormalization:jG,zeroPadding2d:KG,averagePooling1d:UL,avgPool1d:XG,avgPooling1d:JG,averagePooling2d:BL,avgPool2d:ZG,avgPooling2d:QG,averagePooling3d:ML,avgPool3d:eV,avgPooling3d:tV,globalAveragePooling1d:nV,globalAveragePooling2d:sV,globalMaxPooling1d:MN,globalMaxPooling2d:PN,maxPooling1d:zN,maxPooling2d:GN,maxPooling3d:iV,gru:rV,gruCell:oV,lstm:aV,lstmCell:cV,simpleRNN:lV,simpleRNNCell:hV,convLstm2d:uV,convLstm2dCell:dV,rnn:pV,stackedRNNCells:mV,bidirectional:fV,timeDistributed:gV,globalMaxPool1d:yV,globalMaxPool2d:bV,maxPool1d:wV,maxPool2d:LV,Layer:lt,RNN:Bi,RNNCell:cc,input:hN,gaussianNoise:SV,gaussianDropout:IV,alphaDropout:xV,masking:TV});function vV(e,t){return kw(e,t)}function NV(e,t){return Hv(e,t)}function CV(e,t){return Yv(e,t)}function RV(e,t){return Fw(e,t)}function OV(e,t){return _w(e,t)}function EV(e,t){return Vv(e,t)}function DV(e,t){return b3(e,t)}function kV(e,t){return Hp(e,t)}function FV(e,t){return rc(e,t)}function _V(e,t){return Kr(e,t)}function WV(e,t){return Kr(e,t)}function $V(e,t){return Kr(e,t)}function UV(e,t){return ar(e,t)}function BV(e,t){return ar(e,t)}function MV(e,t){return ar(e,t)}var PV=Object.freeze({__proto__:null,binaryAccuracy:vV,binaryCrossentropy:NV,sparseCategoricalAccuracy:CV,categoricalAccuracy:RV,categoricalCrossentropy:OV,precision:EV,recall:DV,cosineProximity:kV,meanAbsoluteError:FV,meanAbsolutePercentageError:_V,MAPE:WV,mape:$V,meanSquaredError:UV,MSE:BV,mse:MV});var zV=Object.freeze({__proto__:null,modelFromJSON:J3});function GV(e){return new Gh(e)}function VV(e){return rG(e)}function HV(e){return oG(e)}var YV=Object.freeze({__proto__:null,l1l2:GV,l1:VV,l2:HV});class VN extends ic{constructor(){super(...arguments);this.model=null}setModel(e){if(!(e instanceof cr))throw new Error("model must be a LayersModel, not some other Container");this.model=e}}function om(e,t){return et}class YN extends VN{constructor(e){super();if(e==null&&(e={}),e.restoreBestWeights)throw new Ge("restoreBestWeights = True is not implemented in EarlyStopping yet.");this.monitor=e.monitor||"val_loss",this.minDelta=Math.abs(e.minDelta||0),this.patience=e.patience||0,this.verbose=e.verbose||0,this.mode=e.mode||"auto",this.baseline=e.baseline,["auto","min","max"].indexOf(this.mode)===-1&&(console.warn(`EarlyStopping mode '${this.mode}' is invalid. Falling back to mode 'auto'.`),this.mode="auto"),this.mode==="min"?this.monitorFunc=om:this.mode==="max"?this.monitorFunc=HN:this.monitor.indexOf("acc")!==-1?this.monitorFunc=HN:this.monitorFunc=om,this.monitorFunc===om&&(this.minDelta*=-1)}async onTrainBegin(e){this.wait=0,this.stoppedEpoch=0,this.baseline!=null?this.best=this.baseline:this.best=this.monitorFunc===om?Infinity:-Infinity}async onEpochEnd(e,t){await jr(t);const n=this.getMonitorValue(t);if(n==null)return;this.monitorFunc(n-this.minDelta,this.best)?(this.best=n,this.wait=0):(this.wait++,this.wait>=this.patience&&(this.stoppedEpoch=e,this.model.stopTraining=!0))}async onTrainEnd(e){this.stoppedEpoch>0&&this.verbose&&console.log(`Epoch ${this.stoppedEpoch}: early stopping.`)}getMonitorValue(e){e==null&&(e={});const t=e[this.monitor];return t==null&&console.warn(`Metric for EarlyStopping ${this.monitor} is not available. Available metrics are: ${Object.keys(e)}`),t}}function qV(e){return new YN(e)}const jV={earlyStopping:qV};var di;(function(e){e[e.DT_INVALID=0]="DT_INVALID",e[e.DT_FLOAT=1]="DT_FLOAT",e[e.DT_DOUBLE=2]="DT_DOUBLE",e[e.DT_INT32=3]="DT_INT32",e[e.DT_UINT8=4]="DT_UINT8",e[e.DT_INT16=5]="DT_INT16",e[e.DT_INT8=6]="DT_INT8",e[e.DT_STRING=7]="DT_STRING",e[e.DT_COMPLEX64=8]="DT_COMPLEX64",e[e.DT_INT64=9]="DT_INT64",e[e.DT_BOOL=10]="DT_BOOL",e[e.DT_QINT8=11]="DT_QINT8",e[e.DT_QUINT8=12]="DT_QUINT8",e[e.DT_QINT32=13]="DT_QINT32",e[e.DT_BFLOAT16=14]="DT_BFLOAT16",e[e.DT_FLOAT_REF=101]="DT_FLOAT_REF",e[e.DT_DOUBLE_REF=102]="DT_DOUBLE_REF",e[e.DT_INT32_REF=103]="DT_INT32_REF",e[e.DT_UINT8_REF=104]="DT_UINT8_REF",e[e.DT_INT16_REF=105]="DT_INT16_REF",e[e.DT_INT8_REF=106]="DT_INT8_REF",e[e.DT_STRING_REF=107]="DT_STRING_REF",e[e.DT_COMPLEX64_REF=108]="DT_COMPLEX64_REF",e[e.DT_INT64_REF=109]="DT_INT64_REF",e[e.DT_BOOL_REF=110]="DT_BOOL_REF",e[e.DT_QINT8_REF=111]="DT_QINT8_REF",e[e.DT_QUINT8_REF=112]="DT_QUINT8_REF",e[e.DT_QINT32_REF=113]="DT_QINT32_REF",e[e.DT_BFLOAT16_REF=114]="DT_BFLOAT16_REF"})(di||(di={}));var qN;(function(e){let t;(function(n){n[n.LEGACY=0]="LEGACY",n[n.V1=1]="V1",n[n.V2=2]="V2"})(t=e.CheckpointFormatVersion||(e.CheckpointFormatVersion={}))})(qN||(qN={}));const PL={};function KV(e,t){const n={tfOpName:e,category:"custom",inputs:[],attrs:[],customExecutor:t};PL[e]=n}function jN(e){return PL[e]}function XV(e){delete PL[e]}function R(e,t,n,s){const i=t.inputParams[e];if(i&&i.inputIndexStart!==void 0){const a=i.inputIndexStart,c=i.inputIndexEnd===0?void 0:i.inputIndexEnd===void 0?a+1:i.inputIndexEnd;if(i.type==="tensor")return Qn(t.inputNames[i.inputIndexStart],n,s);if(i.type==="tensors"){const m=t.inputNames.slice(a,c);return m.map(y=>Qn(y,n,s))}const u=Qn(t.inputNames.slice(a)[0],n,s),p=u.dataSync();return i.type==="number"?p[0]:Ls(u.shape,p)}const o=t.attrParams[e];return o&&o.value}function Qn(e,t,n){const[s,i]=ls(e),o=n.currentContextIds.find(a=>!!t[am(s,a)]);return o!==void 0?t[am(s,o)][i]:void 0}function JV(e,t,n){return t[am(e,n.currentContextId)]}function lr(e,t){const[n,s]=ls(e);return[am(n,t&&t.currentContextId),s]}function am(e,t){return t?`${e}-${t}`:e}function ls(e){const t=e.split(":");if(t.length===1)return[e,0];const n=t[0];return[n,Number(t[t.length-1])]}function $ee(e,t){const n=[];for(let s=0;sn.json));this.opMappers=t.reduce((n,s)=>(n[s.tfOpName]=s,n),{})}transformGraph(e,t={}){const n=e.node,s=[],i=[],o=[],a=n.reduce((I,T)=>(I[T.name]=this.mapNode(T),T.op.startsWith("Placeholder")?s.push(I[T.name]):T.op==="Const"?i.push(I[T.name]):(T.input==null||T.input.length===0)&&o.push(I[T.name]),I),{});let c=[];const u=[];let p={},m={};t!=null&&(p=this.mapSignatureEntries(t.inputs),m=this.mapSignatureEntries(t.outputs));const y=Object.keys(a);y.forEach(I=>{const T=a[I];T.inputNames.forEach(v=>{const[N]=lr(v);T.inputs.push(a[N]),a[N].children.push(T)})}),Object.keys(m).length===0?y.forEach(I=>{const T=a[I];T.children.length===0&&u.push(T)}):Object.keys(m).forEach(I=>{const[T]=lr(I),v=a[T];v!=null&&(v.signatureKey=m[I],u.push(v))}),Object.keys(p).length>0?Object.keys(p).forEach(I=>{const[T]=lr(I),v=a[T];v&&(v.signatureKey=p[I],c.push(v))}):c=s;let b={};e.library!=null&&e.library.function!=null&&(b=e.library.function.reduce((I,T)=>(I[T.signature.name]=this.mapFunction(T),I),{}));const w={nodes:a,inputs:c,outputs:u,weights:i,placeholders:s,signature:t,functions:b};return o.length>0&&(w.initNodes=o),w}mapSignatureEntries(e){return Object.keys(e||{}).reduce((t,n)=>(t[e[n].name]=n,t),{})}mapNode(e){const t=jN(e.op)||this.opMappers[e.op]||{};e.attr==null&&(e.attr={});const n={name:e.name,op:e.op,category:t.category,inputNames:(e.input||[]).map(s=>s.startsWith("^")?s.substr(1):s),inputs:[],children:[],inputParams:{},attrParams:{},rawAttrs:e.attr};return t.inputs!=null&&(n.inputParams=t.inputs.reduce((s,i)=>(s[i.name]={type:i.type,inputIndexStart:i.start,inputIndexEnd:i.end},s),{})),t.attrs!=null&&(n.attrParams=t.attrs.reduce((s,i)=>{const o=i.type;let a;switch(i.type){case"string":a=zL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=zL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"string[]":a=XL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=XL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"number":a=VL(e.attr,i.tfName,i.defaultValue||0),a===void 0&&!!i.tfDeprecatedName&&(a=VL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"number[]":a=KL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=KL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"bool":a=GL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=GL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"bool[]":a=ZL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=ZL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"shape":a=jL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=jL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"shape[]":a=JL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=JL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"dtype":a=YL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=YL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"dtype[]":a=qL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=qL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"func":a=JN(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=JN(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"tensor":case"tensors":break;default:throw new Error(`Unsupported param type: ${i.type} for op: ${e.op}`)}return s[i.name]={value:a,type:o},s},{})),n}mapFunction(e){const t=e.nodeDef,n=[],s=[];let i={};t!=null&&(i=t.reduce((m,y)=>(m[y.name]=this.mapNode(y),y.op==="Const"&&s.push(m[y.name]),m),{}));const o=[],a=[];e.signature.inputArg.forEach(m=>{const[y]=lr(m.name),b={name:y,op:"Placeholder",inputs:[],inputNames:[],category:"graph",inputParams:{},attrParams:{dtype:{value:HL(m.type),type:"dtype"}},children:[]};b.signatureKey=m.name,o.push(b),i[y]=b});const c=Object.keys(i);c.forEach(m=>{const y=i[m];y.inputNames.forEach(b=>{const[w]=lr(b);y.inputs.push(i[w]),i[w].children.push(y)})});const u=e.ret;e.signature.outputArg.forEach(m=>{const[y,b]=lr(u[m.name]),w=i[y];w!=null&&(w.defaultOutput=b,a.push(w))});const p=this.mapArgsToSignature(e);return{nodes:i,inputs:o,outputs:a,weights:s,placeholders:n,signature:p}}mapArgsToSignature(e){return{methodName:e.signature.name,inputs:e.signature.inputArg.reduce((t,n)=>(t[n.name]=this.mapArgToTensorInfo(n),t),{}),outputs:e.signature.outputArg.reduce((t,n)=>(t[n.name]=this.mapArgToTensorInfo(n,e.ret),t),{})}}mapArgToTensorInfo(e,t){let n=e.name;return t!=null&&(n=t[n]),{name:n,dtype:e.type}}}function OH(e){const t=C().global;if(typeof t.atob!="undefined")return t.atob(e);if(typeof Buffer!="undefined")return new Buffer(e,"base64").toString();throw new Error("Unable to decode base64 in this environment. Missing built-in atob() or Buffer()")}function XN(e,t){const n=Array.isArray(e)?String.fromCharCode.apply(null,e):OH(e);return t?n:n.toLowerCase()}function zL(e,t,n,s=!1){const i=e[t];return i!=null?XN(i.s,s):n}function GL(e,t,n){const s=e[t];return s?s.b:n}function VL(e,t,n){const s=e[t]||{},i=s.i!=null?s.i:s.f!=null?s.f:n;return typeof i=="number"?i:parseInt(i,10)}function HL(e){typeof e=="string"&&(e=di[e]);switch(e){case di.DT_FLOAT:return"float32";case di.DT_INT32:case di.DT_INT64:case di.DT_INT8:case di.DT_UINT8:return"int32";case di.DT_BOOL:return"bool";case di.DT_DOUBLE:return"float32";case di.DT_STRING:return"string";default:return null}}function JN(e,t,n){const s=e[t];return s&&s.func?s.func.name:n}function YL(e,t,n){const s=e[t];return s&&s.type?HL(s.type):n}function qL(e,t,n){const s=e[t];return s&&s.list&&s.list.type?s.list.type.map(i=>HL(i)):n}function ZN(e){return e.unknownRank?void 0:e.dim!=null?e.dim.map(t=>typeof t.size=="number"?t.size:parseInt(t.size,10)):[]}function jL(e,t,n){const s=e[t];return s&&s.shape?ZN(s.shape):n}function KL(e,t,n){const s=e[t];return s?((s.list.f&&s.list.f.length?s.list.f:s.list.i)||[]).map(i=>typeof i=="number"?i:parseInt(i,10)):n}function XL(e,t,n,s=!1){const i=e[t];return i&&i.list&&i.list.s?i.list.s.map(o=>XN(o,s)):n}function JL(e,t,n){const s=e[t];return s&&s.list&&s.list.shape?s.list.shape.map(i=>ZN(i)):n}function ZL(e,t,n){const s=e[t];return s&&s.list&&s.list.b?s.list.b:n}class EH{constructor(e,t,n){this.node=e,this.tensorMap=t,this.context=n,this.inputs=[],this.attrs={},this.inputs=e.inputNames.map(s=>this.getInput(s)),e.rawAttrs!=null&&(this.attrs=Object.keys(e.rawAttrs).reduce((s,i)=>(s[i]=this.getAttr(i),s),{}))}getInput(e){return Qn(e,this.tensorMap,this.context)}getAttr(e,t){const n=this.node.rawAttrs[e];if(n.tensor!=null)return Qn(e,this.tensorMap,this.context);if(n.i!=null||n.f!=null)return VL(this.node.rawAttrs,e,t);if(n.s!=null)return zL(this.node.rawAttrs,e,t);if(n.b!=null)return GL(this.node.rawAttrs,e,t);if(n.shape!=null)return jL(this.node.rawAttrs,e,t);if(n.type!=null)return YL(this.node.rawAttrs,e,t);if(n.list!=null){if(n.list.i!=null||n.list.f!=null)return KL(this.node.rawAttrs,e,t);if(n.list.s!=null)return XL(this.node.rawAttrs,e,t);if(n.list.shape!=null)return JL(this.node.rawAttrs,e,t);if(n.list.b!=null)return ZL(this.node.rawAttrs,e,t);if(n.list.type!=null)return qL(this.node.rawAttrs,e,t)}return t}}const DH=(e,t,n)=>{switch(e.op){case"BiasAdd":case"AddV2":case"Add":return[be(R("a",e,t,n),R("b",e,t,n))];case"AddN":return[eA(R("tensors",e,t,n))];case"FloorMod":case"Mod":return[tp(R("a",e,t,n),R("b",e,t,n))];case"Mul":return[X(R("a",e,t,n),R("b",e,t,n))];case"RealDiv":case"Div":return[_e(R("a",e,t,n),R("b",e,t,n))];case"DivNoNan":return[Lb(R("a",e,t,n),R("b",e,t,n))];case"FloorDiv":return[Md(R("a",e,t,n),R("b",e,t,n))];case"Sub":return[Ce(R("a",e,t,n),R("b",e,t,n))];case"Minimum":return[ko(R("a",e,t,n),R("b",e,t,n))];case"Maximum":return[Us(R("a",e,t,n),R("b",e,t,n))];case"Pow":return[ii(R("a",e,t,n),R("b",e,t,n))];case"SquaredDifference":return[Sh(R("a",e,t,n),R("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Uee="arithmetic";const kH=(e,t,n)=>{switch(e.op){case"Abs":case"ComplexAbs":return[rn(R("x",e,t,n))];case"Acos":return[nb(R("x",e,t,n))];case"Acosh":return[sb(R("x",e,t,n))];case"Asin":return[ob(R("x",e,t,n))];case"Asinh":return[ab(R("x",e,t,n))];case"Atan":return[cb(R("x",e,t,n))];case"Atan2":return[lb(R("x",e,t,n),R("y",e,t,n))];case"Atanh":return[hb(R("x",e,t,n))];case"Ceil":return[fb(R("x",e,t,n))];case"Complex":return[Ci(R("real",e,t,n),R("imag",e,t,n))];case"Cos":return[lh(R("x",e,t,n))];case"Cosh":return[qd(R("x",e,t,n))];case"Elu":return[Do(R("x",e,t,n))];case"Erf":return[Sb(R("x",e,t,n))];case"Exp":return[xs(R("x",e,t,n))];case"Expm1":return[Ib(R("x",e,t,n))];case"Floor":return[Pa(R("x",e,t,n))];case"Log":return[is(R("x",e,t,n))];case"Log1p":return[Jd(R("x",e,t,n))];case"Imag":return[Ga(R("x",e,t,n))];case"Neg":return[Pt(R("x",e,t,n))];case"Reciprocal":return[Eb(R("x",e,t,n))];case"Real":return[Fo(R("x",e,t,n))];case"Relu":return[Fi(R("x",e,t,n))];case"Round":return[kb(R("x",e,t,n))];case"Selu":return[rp(R("x",e,t,n))];case"Sigmoid":return[Ei(R("x",e,t,n))];case"Sin":return[op(R("x",e,t,n))];case"Sign":return[_b(R("x",e,t,n))];case"Sinh":return[ap(R("x",e,t,n))];case"Softplus":return[Va(R("x",e,t,n))];case"Sqrt":return[Sn(R("x",e,t,n))];case"Square":return[Lt(R("x",e,t,n))];case"Tanh":return[Ma(R("x",e,t,n))];case"Tan":return[Ub(R("x",e,t,n))];case"Relu6":case"ClipByValue":return[jn(R("x",e,t,n),R("clipValueMin",e,t,n),R("clipValueMax",e,t,n))];case"Rsqrt":return[ip(Qn(e.inputNames[0],t,n))];case"Prod":return[sp(R("x",e,t,n),R("axes",e,t,n))];case"LeakyRelu":return[Xd(R("x",e,t,n),R("alpha",e,t,n))];case"Prelu":return[gh(R("x",e,t,n),R("alpha",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Bee="basic_math";function Gs(e,t,n=""){k(FH(e,t),()=>n+` Shapes ${e} and ${t} must match`)}function FH(e,t){if(e.length!==t.length)return!1;for(let n=0;n{(e==null||!e.has(t.tensor.id))&&t.tensor.dispose()}),this.tensors=[],this.closed_=!0,this.idTensor.dispose()}size(){return this.tensors.length}read(e){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||e>=this.size())throw new Error(`Tried to read from index ${e}, but array size is: ${this.size()}`);const t=this.tensors[e];if(t.cleared)throw new Error(`TensorArray ${this.name}: Could not read index ${e} twice because it was cleared after a previous read (perhaps try setting clear_after_read = false?).`);return this.clearAfterRead&&(t.cleared=!0),t.read=!0,t.tensor}readMany(e){return e.map(t=>this.read(t))}write(e,t){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||!this.dynamicSize&&e>=this.maxSize)throw new Error(`Tried to write to index ${e}, but array is not resizeable and size is: ${this.maxSize}`);const n=this.tensors[e]||{};if(t.dtype!==this.dtype)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, because the value dtype is ${t.dtype}, but TensorArray dtype is ${this.dtype}.`);if(this.size()===0&&(this.elementShape==null||this.elementShape.length===0)&&(this.elementShape=t.shape),Gs(this.elementShape,t.shape,`TensorArray ${this.name}: Could not write to TensorArray index ${e}.`),n.read)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, because it has already been read.`);if(n.written)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, because it has already been written.`);n.tensor=t,Rn(t),n.written=!0,this.tensors[e]=n}writeMany(e,t){if(e.length!==t.length)throw new Error(`TensorArray ${this.name}: could not write multiple tensors,because the index size: ${e.length} is not the same as tensors size: ${t.length}.`);e.forEach((n,s)=>this.write(n,t[s]))}gather(e,t){if(!!t&&t!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but gather requested dtype ${t}`);if(e)e=e.slice(0,this.size());else{e=[];for(let s=0;s=this.maxSize)throw new Error(`Max index must be < array size (${n} vs. ${this.maxSize})`);this.writeMany(e,_i(t,0))}split(e,t){if(t.dtype!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${t.dtype}`);let n=0;const s=e.map(c=>(n+=c,n));if(n!==t.shape[0])throw new Error(`Expected sum of lengths to be equal to tensor.shape[0], but sum of lengths is ${n}, and tensor's shape is: ${t.shape}`);if(!this.dynamicSize&&e.length!==this.maxSize)throw new Error(`TensorArray's size is not equal to the size of lengths (${this.maxSize} vs. ${e.length}), and the TensorArray is not marked as dynamically resizeable`);const i=n===0?0:t.size/n,o=[];ee(()=>{t=K(t,[1,n,i]);for(let c=0;c{if(n!==i.dtype)throw new Error(`Invalid data types; op elements ${n}, but list elements ${i.dtype}`);Gs(t,i.shape,"TensorList shape mismatch: "),Rn(i)}),this.idTensor=Ne(0),this.maxNumElements=s,Rn(this.idTensor)}get id(){return this.idTensor.id}copy(){return new tu([...this.tensors],this.elementShape,this.elementDtype)}clearAndClose(e){this.tensors.forEach(t=>{(e==null||!e.has(t.id))&&t.dispose()}),this.tensors.length=0,this.idTensor.dispose()}size(){return this.tensors.length}stack(e,t,n=-1){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);if(n!==-1&&this.tensors.length!==n)throw new Error(`Operation expected a list with ${n} elements but got a list with ${this.tensors.length} elements.`);return Gs(e,this.elementShape,"TensorList shape mismatch: "),ee(()=>{const s=this.tensors.map(i=>K(i,e));return as(s,0)})}popBack(e,t){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);if(this.size()===0)throw new Error("Trying to pop from an empty list.");const n=this.tensors.pop();return Gs(n.shape,e,"TensorList shape mismatch: "),K(n,e)}pushBack(e){if(e.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${this.elementDtype}`);if(Gs(e.shape,this.elementShape,"TensorList shape mismatch: "),this.maxNumElements===this.size())throw new Error("Trying to push element into a full list.");Rn(e),this.tensors.push(e)}resize(e){if(e<0)throw new Error(`TensorListResize expects size to be non-negative. Got: ${e}`);if(this.maxNumElements!==-1&&e>this.maxNumElements)throw new Error(`TensorListResize input size ${e} is greater maxNumElement ${this.maxNumElements}.`);this.tensors.length=e}getItem(e,t,n){if(n!==this.elementDtype)throw new Error(`Invalid data types; op elements ${n}, but list elements ${this.elementDtype}`);if(e<0||e>this.tensors.length)throw new Error(`Trying to access element ${e} in a list with ${this.tensors.length} elements.`);if(this.tensors[e]==null)throw new Error(`element at index ${e} is null.`);return Gs(this.tensors[e].shape,t,"TensorList shape mismatch: "),this.tensors[e]}setItem(e,t){if(t.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t.dtype}, but list elements ${this.elementDtype}`);if(e<0||this.maxNumElements!==-1&&e>=this.maxNumElements)throw new Error(`Trying to set element ${e} in a list with max ${this.maxNumElements} elements.`);Gs(this.elementShape,t.shape,"TensorList shape mismatch: "),Rn(t),this.tensors[e]=t}gather(e,t,n){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);return Gs(this.elementShape,n,"TensorList shape mismatch: "),e=e.slice(0,this.size()),e.length===0?en([],[0].concat(this.elementShape)):ee(()=>{const s=e.map(i=>K(this.tensors[i],n));return as(s,0)})}concat(e,t){if(!!e&&e!==this.elementDtype)throw new Error(`TensorList dtype is ${this.elementDtype} but concat requested dtype ${e}`);return Gs(this.elementShape,t,"TensorList shape mismatch: "),this.size()===0?en([],[0].concat(this.elementShape)):ee(()=>{const n=this.tensors.map(s=>K(s,t));return Mt(n,0)})}}function WH(e,t,n){const s=e.dtype;if(e.shape.length<1)throw new Error(`Tensor must be at least a vector, but saw shape: ${e.shape}`);if(e.dtype!==n)throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${n}`);const i=e.shape.slice(1);Gs(i,t,"TensorList shape mismatch: ");const o=_i(e);return new tu(o,t,s)}function $H(e,t,n){return new tu([],e,t,n)}function UH(e,t,n,s){if(t.length!==e.shape[0])throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${t.length} vs. ${e.shape[0]}`);const i=Math.max(...t);if(s!=null&&s!==-1&&i>=s)throw new Error(`Max index must be < array size (${i} vs. ${s})`);const o=new tu([],n,e.dtype,s),a=_i(e,0);return t.forEach((c,u)=>{o.setItem(c,a[u])}),o}function BH(e,t,n){let s=0;const i=t.map(u=>(s+=u,s));if(s!==e.shape[0])throw new Error(`Expected sum of lengths to be equal to tensor.shape[0], but sum of lengths is - ${s}, and tensor's shape is: ${e.shape}`);const o=s===0?0:e.size/s,a=ee(()=>{const u=[];e=K(e,[1,s,o]);for(let p=0;p{switch(e.op){case"If":case"StatelessIf":{const s=R("thenBranch",e,t,n),i=R("elseBranch",e,t,n),o=R("cond",e,t,n),a=R("args",e,t,n),c=await o.data();return c[0]?n.functionMap[s].executeFunctionAsync(a,n.tensorArrayMap,n.tensorListMap):n.functionMap[i].executeFunctionAsync(a,n.tensorArrayMap,n.tensorListMap)}case"While":case"StatelessWhile":{const s=R("body",e,t,n),i=R("cond",e,t,n),o=R("args",e,t,n),a=await n.functionMap[i].executeFunctionAsync(o,n.tensorArrayMap,n.tensorListMap),c=o.map(m=>m.id);let u=await a[0].data();a.forEach(m=>{!m.kept&&c.indexOf(m.id)===-1&&m.dispose()});let p=o;for(;u[0];){const m=p;p=await n.functionMap[s].executeFunctionAsync(p,n.tensorArrayMap,n.tensorListMap);const y=p.map(w=>w.id);m.forEach(w=>{!w.kept&&c.indexOf(w.id)===-1&&y.indexOf(w.id)===-1&&w.dispose()});const b=await n.functionMap[i].executeFunctionAsync(p,n.tensorArrayMap,n.tensorListMap);u=await b[0].data(),b.forEach(w=>{!w.kept&&c.indexOf(w.id)===-1&&y.indexOf(w.id)===-1&&w.dispose()})}return p}case"LoopCond":{const s=R("pred",e,t,n);return[hr(s)]}case"Switch":{const s=R("pred",e,t,n);let i=R("data",e,t,n);return i.kept||(i=hr(i)),(await s.data())[0]?[void 0,i]:[i,void 0]}case"Merge":{const s=e.inputNames.find(i=>Qn(i,t,n)!==void 0);if(s){const i=Qn(s,t,n);return[hr(i)]}return}case"Enter":{const s=R("frameName",e,t,n),i=R("tensor",e,t,n);return n.enterFrame(s),[hr(i)]}case"Exit":{const s=R("tensor",e,t,n);return n.exitFrame(),[hr(s)]}case"NextIteration":{const s=R("tensor",e,t,n);return n.nextIteration(),[hr(s)]}case"TensorArrayV3":{const s=R("size",e,t,n),i=R("dtype",e,t,n),o=R("elementShape",e,t,n),a=R("dynamicSize",e,t,n),c=R("clearAfterRead",e,t,n),u=R("identicalElementShapes",e,t,n),p=R("name",e,t,n),m=new _H(p,i,s,o,u,a,c);return n.addTensorArray(m),[m.idTensor,Ne(1)]}case"TensorArrayWriteV3":{const s=R("tensorArrayId",e,t,n),i=R("index",e,t,n),o=R("tensor",e,t,n),a=n.getTensorArray(s.id);return a.write(i,o),[a.idTensor]}case"TensorArrayReadV3":{const s=R("tensorArrayId",e,t,n),i=R("index",e,t,n),o=n.getTensorArray(s.id);return[o.read(i)]}case"TensorArrayGatherV3":{const s=R("tensorArrayId",e,t,n),i=R("indices",e,t,n),o=R("dtype",e,t,n),a=n.getTensorArray(s.id);return[a.gather(i,o)]}case"TensorArrayScatterV3":{const s=R("tensorArrayId",e,t,n),i=R("indices",e,t,n),o=R("tensor",e,t,n),a=n.getTensorArray(s.id);return a.scatter(i,o),[a.idTensor]}case"TensorArrayConcatV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id),o=R("dtype",e,t,n);return[i.concat(o)]}case"TensorArraySplitV3":{const s=R("tensorArrayId",e,t,n),i=R("tensor",e,t,n),o=R("lengths",e,t,n),a=n.getTensorArray(s.id);return a.split(o,i),[a.idTensor]}case"TensorArraySizeV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id);return[Ne(i.size(),"int32")]}case"TensorArrayCloseV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id);return i.clearAndClose(),[i.idTensor]}case"TensorListSetItem":{const s=R("tensorListId",e,t,n),i=R("index",e,t,n),o=R("tensor",e,t,n),a=n.getTensorList(s.id);return a.setItem(i,o),[a.idTensor]}case"TensorListGetItem":{const s=R("tensorListId",e,t,n),i=R("index",e,t,n),o=R("elementShape",e,t,n),a=R("elementDType",e,t,n),c=n.getTensorList(s.id);return[c.getItem(i,o,a)]}case"TensorListScatterV2":case"TensorListScatter":{const s=R("indices",e,t,n),i=R("tensor",e,t,n),o=R("elementShape",e,t,n),a=R("numElements",e,t,n),c=UH(i,s,o,a);return n.addTensorList(c),[c.idTensor]}case"TensorListReserve":{const s=R("elementShape",e,t,n),i=R("elementDType",e,t,n),o=R("numElements",e,t,n),a=$H(s,i,o);return n.addTensorList(a),[a.idTensor]}case"TensorListGather":{const s=R("tensorListId",e,t,n),i=R("indices",e,t,n),o=R("elementShape",e,t,n),a=R("elementDType",e,t,n),c=n.getTensorList(s.id);return[c.gather(i,a,o)]}case"TensorListStack":{const s=R("tensorListId",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=R("numElements",e,t,n),c=n.getTensorList(s.id);return[c.stack(i,o,a)]}case"TensorListFromTensor":{const s=R("tensor",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=WH(s,i,o);return n.addTensorList(a),[a.idTensor]}case"TensorListConcat":{const s=R("tensorListId",e,t,n),i=n.getTensorList(s.id),o=R("dtype",e,t,n),a=R("elementShape",e,t,n);return[i.concat(o,a)]}case"TensorListPushBack":{const s=R("tensorListId",e,t,n),i=R("tensor",e,t,n),o=n.getTensorList(s.id);return o.pushBack(i),[o.idTensor]}case"TensorListPopBack":{const s=R("tensorListId",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=n.getTensorList(s.id);return[a.popBack(i,o)]}case"TensorListSplit":{const s=R("tensor",e,t,n),i=R("elementShape",e,t,n),o=R("lengths",e,t,n),a=BH(s,o,i);return n.addTensorList(a),[a.idTensor]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Mee="control";function QN(e,t,n){const[s,i]=R("fusedOps",e,t,n),o=s==="biasadd",a=i==="prelu",c=s==="fusedbatchnorm",u=R("numArgs",e,t,n);if(o){if(a&&u!==2)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!a&&u!==1)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd must have one extra argument: bias.")}if(c)throw new Error("FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported.");const p=R("strides",e,t,n),m=cm(e,t,n),y=R("dataFormat",e,t,n).toUpperCase(),b=R("dilations",e,t,n),[w,I]=R("args",e,t,n);return{stride:p,pad:m,dataFormat:y,dilations:b,biasArg:w,preluArg:I,activationFunc:i}}const PH=(e,t,n)=>{switch(e.op){case"Conv1D":{const s=R("stride",e,t,n),i=R("pad",e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilation",e,t,n);return[Hd(R("x",e,t,n),R("filter",e,t,n),s,i,o,a)]}case"Conv2D":{const s=R("strides",e,t,n),i=cm(e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilations",e,t,n);return[er(R("x",e,t,n),R("filter",e,t,n),[s[1],s[2]],i,o,[a[1],a[2]])]}case"_FusedConv2D":{const{stride:s,pad:i,dataFormat:o,dilations:a,biasArg:c,preluArg:u,activationFunc:p}=QN(e,t,n);return[Hb({x:R("x",e,t,n),filter:R("filter",e,t,n),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[a[1],a[2]],bias:c,activation:p,preluActivationWeights:u})]}case"FusedDepthwiseConv2dNative":{const{stride:s,pad:i,dataFormat:o,dilations:a,biasArg:c,preluArg:u,activationFunc:p}=QN(e,t,n);return[YA({x:R("x",e,t,n),filter:R("filter",e,t,n),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[a[1],a[2]],bias:c,activation:p,preluActivationWeights:u})]}case"Conv2DBackpropInput":case"Conv2dTranspose":{const s=R("outputShape",e,t,n),i=R("strides",e,t,n),o=cm(e,t,n);return[Yd(R("x",e,t,n),R("filter",e,t,n),s,[i[1],i[2]],o)]}case"DepthwiseConv2dNative":case"DepthwiseConv2d":{const s=R("strides",e,t,n),i=cm(e,t,n),o=R("dilations",e,t,n),a=R("dataFormat",e,t,n).toUpperCase();return[Co(R("input",e,t,n),R("filter",e,t,n),[s[1],s[2]],i,a,[o[1],o[2]])]}case"Conv3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilations",e,t,n);return[yb(R("x",e,t,n),R("filter",e,t,n),[s[1],s[2],s[3]],i,o,[a[1],a[2],a[3]])]}case"AvgPool":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[oh(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPool":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[mh(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPoolWithArgmax":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n),a=R("includeBatchInIndex",e,t,n),{result:c,indexes:u}=bA(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i,a);return[c,u]}case"AvgPool3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[pb(R("x",e,t,n),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"MaxPool3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[Nb(R("x",e,t,n),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"Dilation2D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("dilations",e,t,n),a=s[1],c=s[2],u=o[1],p=o[2];return[wb(R("x",e,t,n),R("filter",e,t,n),[a,c],i,[u,p],"NHWC")]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Pee="convolution";const zH=(e,t,n)=>{switch(e.op){case"Fill":{const s=R("shape",e,t,n),i=R("dtype",e,t,n),o=R("value",e,t,n);return[hh(s,o,i)]}case"LinSpace":{const s=R("start",e,t,n),i=R("stop",e,t,n),o=R("num",e,t,n);return[fA(s,i,o)]}case"Multinomial":{const s=R("logits",e,t,n),i=R("numSamples",e,t,n),o=R("seed",e,t,n);return[wA(s,i,o)]}case"OneHot":{const s=R("indices",e,t,n),i=R("depth",e,t,n),o=R("onValue",e,t,n),a=R("offValue",e,t,n);return[To(s,i,o,a)]}case"Ones":return[si(R("shape",e,t,n),R("dtype",e,t,n))];case"OnesLike":return[Dn(R("x",e,t,n))];case"RandomUniform":return[_o(R("shape",e,t,n),R("minval",e,t,n),R("maxval",e,t,n),R("dtype",e,t,n))];case"Range":{const s=R("start",e,t,n),i=R("stop",e,t,n),o=R("step",e,t,n);return[yh(s,i,o,R("dtype",e,t,n))]}case"TruncatedNormal":{const s=R("shape",e,t,n),i=R("mean",e,t,n),o=R("stdDev",e,t,n),a=R("seed",e,t,n);return[Ih(s,i,o,R("dtype",e,t,n),a)]}case"Zeros":return[ct(R("shape",e,t,n),R("dtype",e,t,n))];case"ZerosLike":return[et(R("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},zee="creation";function QL(e,t,n){const s=R("boxes",e,t,n),i=R("scores",e,t,n),o=R("maxOutputSize",e,t,n),a=R("iouThreshold",e,t,n),c=R("scoreThreshold",e,t,n),u=R("softNmsSigma",e,t,n);return{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:u}}const GH=async(e,t,n)=>{switch(e.op){case"NonMaxSuppressionV5":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:u}=QL(e,t,n),p=await Vr.nonMaxSuppressionWithScoreAsync(s,i,o,a,c,u);return[p.selectedIndices,p.selectedScores]}case"NonMaxSuppressionV4":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=QL(e,t,n),u=R("padToMaxOutputSize",e,t,n),p=await Vr.nonMaxSuppressionPaddedAsync(s,i,o,a,c,u);return[p.selectedIndices,p.validOutputs]}case"NonMaxSuppressionV3":case"NonMaxSuppressionV2":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=QL(e,t,n);return[await Vr.nonMaxSuppressionAsync(s,i,o,a,c)]}case"Where":{const s=ve(R("condition",e,t,n),"bool"),i=[await Pb(s)];return s.dispose(),i}case"ListDiff":return SA(R("x",e,t,n),R("y",e,t,n));default:throw TypeError(`Node type ${e.op} is not implemented`)}},Gee="dynamic";const VH=(e,t,n)=>{switch(e.op){case"TopKV2":{const s=R("x",e,t,n),i=R("k",e,t,n),o=R("sorted",e,t,n),a=Bb(s,i,o);return[a.values,a.indices]}case"Unique":{const s=R("x",e,t,n),i=up(s);return[i.values,i.indices]}case"UniqueV2":{const s=R("x",e,t,n),i=R("axis",e,t,n),o=up(s,i);return[o.values,o.indices]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Vee="evaluation";const HH=(e,t,n)=>{switch(e.op){case"Const":return t[e.name];case"PlaceholderWithDefault":const s=R("default",e,t,n);return[Qn(e.name,t,n)||s];case"Placeholder":return[Qn(e.name,t,n)];case"Identity":case"StopGradient":case"FakeQuantWithMinMaxVars":{const p=R("x",e,t,n);return[hr(p)]}case"IdentityN":return R("x",e,t,n).map(p=>hr(p));case"Snapshot":const i=R("x",e,t,n);return[hr(i)];case"Shape":return[rs(R("x",e,t,n).shape,"int32")];case"ShapeN":return R("x",e,t,n).map(p=>rs(p.shape));case"Size":return[Ne(R("x",e,t,n).size,"int32")];case"Rank":return[Ne(R("x",e,t,n).rank,"int32")];case"NoOp":return[Ne(1)];case"Print":const o=R("x",e,t,n),a=R("data",e,t,n),c=R("message",e,t,n),u=R("summarize",e,t,n);console.warn("The graph has a tf.print() operation,usually used for debugging, which slows down performance."),console.log(c);for(let p=0;p{switch(e.op){case"ResizeBilinear":{const s=R("images",e,t,n),i=R("size",e,t,n),o=R("alignCorners",e,t,n);return[Vr.resizeBilinear(s,[i[0],i[1]],o)]}case"ResizeNearestNeighbor":{const s=R("images",e,t,n),i=R("size",e,t,n),o=R("alignCorners",e,t,n);return[Vr.resizeNearestNeighbor(s,[i[0],i[1]],o)]}case"CropAndResize":{const s=R("image",e,t,n),i=R("boxes",e,t,n),o=R("boxInd",e,t,n),a=R("cropSize",e,t,n),c=R("method",e,t,n),u=R("extrapolationValue",e,t,n);return[Vr.cropAndResize(s,i,o,a,c,u)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Yee="image";const qH=(e,t,n)=>{switch(e.op){case"Equal":return[ni(R("a",e,t,n),R("b",e,t,n))];case"NotEqual":return[Pr(R("a",e,t,n),R("b",e,t,n))];case"Greater":return[Ts(R("a",e,t,n),R("b",e,t,n))];case"GreaterEqual":return[tr(R("a",e,t,n),R("b",e,t,n))];case"Less":return[dh(R("a",e,t,n),R("b",e,t,n))];case"LessEqual":return[Mr(R("a",e,t,n),R("b",e,t,n))];case"LogicalAnd":return[Bs(R("a",e,t,n),R("b",e,t,n))];case"LogicalNot":return[ph(R("a",e,t,n))];case"LogicalOr":return[ep(R("a",e,t,n),R("b",e,t,n))];case"Select":case"SelectV2":return[$n(R("condition",e,t,n),R("a",e,t,n),R("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},qee="logical";const jH=(e,t,n)=>{switch(e.op){case"BatchMatMul":case"BatchMatMulV2":case"MatMul":return[at(R("a",e,t,n),R("b",e,t,n),R("transposeA",e,t,n),R("transposeB",e,t,n))];case"Transpose":return[Pe(R("x",e,t,n),R("perm",e,t,n))];case"_FusedMatMul":const[s,i]=R("fusedOps",e,t,n),o=s==="biasadd",a=i==="prelu",c=R("numArgs",e,t,n);if(o){if(a&&c!==2)throw new Error("Fused MatMul with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!a&&c!==1)throw new Error("Fused MatMul with BiasAdd must have one extra argument: bias.")}const[u,p]=R("args",e,t,n);return[bp({a:R("a",e,t,n),b:R("b",e,t,n),transposeA:R("transposeA",e,t,n),transposeB:R("transposeB",e,t,n),bias:u,activation:i,preluActivationWeights:p})];default:throw TypeError(`Node type ${e.op} is not implemented`)}},jee="matrices";const KH=(e,t,n)=>{switch(e.op){case"FusedBatchNorm":case"FusedBatchNormV2":return[No(R("x",e,t,n),R("mean",e,t,n),R("variance",e,t,n),R("offset",e,t,n),R("scale",e,t,n),R("epsilon",e,t,n))];case"FusedBatchNormV3":return[No(R("x",e,t,n),R("mean",e,t,n),R("variance",e,t,n),R("offset",e,t,n),R("scale",e,t,n),R("epsilon",e,t,n))];case"LRN":return[Tb(R("x",e,t,n),R("radius",e,t,n),R("bias",e,t,n),R("alpha",e,t,n),R("beta",e,t,n))];case"Softmax":return[Wo(R("x",e,t,n))];case"LogSoftmax":return[Qd(R("x",e,t,n))];case"SparseToDense":return[zb(R("sparseIndices",e,t,n),R("outputShape",e,t,n),R("sparseValues",e,t,n),R("defaultValue",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Kee="normalization";const XH=(e,t,n)=>{switch(e.op){case"Max":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Xn(R("x",e,t,n),s,i)]}case"Mean":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[zt(R("x",e,t,n),s,i)]}case"Min":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Ga(R("x",e,t,n),s,i)]}case"Sum":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Ue(R("x",e,t,n),s,i)]}case"All":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Pd(R("x",e,t,n),s,i)]}case"Any":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[th(R("x",e,t,n),s,i)]}case"ArgMax":{const s=R("axis",e,t,n);return[nh(R("x",e,t,n),s)]}case"ArgMin":{const s=R("axis",e,t,n);return[rb(R("x",e,t,n),s)]}case"Prod":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[sp(R("x",e,t,n),s,i)]}case"Cumsum":{const s=R("axis",e,t,n),i=R("exclusive",e,t,n),o=R("reverse",e,t,n);return[jd(R("x",e,t,n),s,i,o)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Xee="reduction";const JH=(e,t,n)=>{switch(e.op){case"ConcatV2":case"Concat":{const s=R("n",e,t,n),i=R("axis",e,t,n);let o=R("tensors",e,t,n);return o=o.slice(0,s),[Mt(o,i)]}case"GatherV2":case"Gather":{const s=R("axis",e,t,n),i=R("x",e,t,n),o=R("indices",e,t,n);return[Ma(i,ve(o,"int32"),s)]}case"ReverseV2":case"Reverse":{const s=R("axis",e,t,n),i=R("x",e,t,n);return[As(i,s)]}case"Slice":{const s=R("begin",e,t,n),i=R("size",e,t,n);return[st(R("x",e,t,n),s,i)]}case"StridedSlice":{const s=R("begin",e,t,n),i=R("end",e,t,n),o=R("strides",e,t,n),a=R("beginMask",e,t,n),c=R("endMask",e,t,n),u=R("ellipsisMask",e,t,n),p=R("newAxisMask",e,t,n),m=R("shrinkAxisMask",e,t,n),y=R("x",e,t,n);return[$b(y,s,i,o,a,c,u,p,m)]}case"Pack":return ee(()=>{const s=R("axis",e,t,n),i=R("tensors",e,t,n),o=i[0].shape,a=zr(i[0]).shape,c=i.map(u=>{const p=ot(u.shape,o);if(!p&&!ot(zr(u).shape,a))throw new Error("the input tensors shape does not match");return p?u:K(u,o)});return[as(c,s)]});case"Unpack":{const s=R("axis",e,t,n),i=R("tensor",e,t,n);return _i(i,s)}case"Tile":{const s=R("reps",e,t,n);return[Br(R("x",e,t,n),s)]}case"Split":case"SplitV":{const s=R("axis",e,t,n),i=R("numOrSizeSplits",e,t,n),o=R("x",e,t,n);return os(o,i,s)}case"ScatterNd":{const s=R("indices",e,t,n),i=R("values",e,t,n),o=R("shape",e,t,n);return[MA(s,i,o)]}case"GatherNd":{const s=R("x",e,t,n),i=R("indices",e,t,n);return[PA(s,i)]}case"SparseToDense":{const s=R("sparseIndices",e,t,n),i=R("outputShape",e,t,n),o=R("sparseValues",e,t,n),a=R("defaultValue",e,t,n);return[zb(s,o,i,o.dtype===a.dtype?a:ve(a,o.dtype))]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Jee="slice_join";const ZH=(e,t,n)=>{switch(e.op){case"FFT":return[wh(R("x",e,t,n))];case"IFFT":return[Ya(R("x",e,t,n))];case"RFFT":return[Lh(R("x",e,t,n))];case"IRFFT":return[hp(R("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Zee="spectral";const QH=(e,t,n)=>{switch(e.op){case"Cast":return[ve(R("x",e,t,n),R("dtype",e,t,n))];case"ExpandDims":{const s=R("axis",e,t,n);return[Kn(R("x",e,t,n),s)]}case"Squeeze":{const s=R("axis",e,t,n);return[zr(R("x",e,t,n),s)]}case"Reshape":return[K(R("x",e,t,n),R("shape",e,t,n))];case"PadV2":case"Pad":return[ki(R("x",e,t,n),R("padding",e,t,n),R("constantValue",e,t,n))];case"SpaceToBatchND":{const s=R("blockShape",e,t,n),i=R("paddings",e,t,n);return[fh(R("x",e,t,n),s,i)]}case"BatchToSpaceND":{const s=R("blockShape",e,t,n),i=R("crops",e,t,n);return[ah(R("x",e,t,n),s,i)]}case"DepthToSpace":{const s=R("blockSize",e,t,n),i=R("dataFormat",e,t,n).toUpperCase();return[bb(R("x",e,t,n),s,i)]}case"BroadcastTo":return[ch(R("x",e,t,n),R("shape",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Qee="transformation";function e0(e,t,n){const s=((i,o,a)=>{switch(i.category){case"arithmetic":return ee(()=>DH(i,o,a));case"basic_math":return ee(()=>kH(i,o,a));case"control":return MH(i,o,a);case"convolution":return ee(()=>PH(i,o,a));case"creation":return ee(()=>zH(i,o,a));case"dynamic":return GH(i,o,a);case"evaluation":return ee(()=>VH(i,o,a));case"image":return ee(()=>YH(i,o,a));case"graph":return ee(()=>HH(i,o,a));case"logical":return ee(()=>qH(i,o,a));case"matrices":return ee(()=>jH(i,o,a));case"normalization":return ee(()=>KH(i,o,a));case"reduction":return ee(()=>XH(i,o,a));case"slice_join":return ee(()=>JH(i,o,a));case"spectral":return ee(()=>ZH(i,o,a));case"transformation":return ee(()=>QH(i,o,a));case"custom":const c=jN(i.op);if(c&&c.customExecutor)return c.customExecutor(new EH(i,o,a));throw TypeError(`Custom op ${i.op} is not registered.`);default:throw TypeError(`Unknown op '${i.op}'. File an issue at https://github.com/tensorflow/tfjs/issues so we can add it, or register a custom execution with tf.registerOp()`)}})(e,t,n);return s instanceof Promise?s.then(i=>[].concat(i)):[].concat(s)}class t0{constructor(e={},t={},n={},s={}){this.weightMap=e,this.tensorArrayMap=t,this.tensorListMap=n,this.functionMap=s,this.rootContext={id:0,frameName:"",iterationId:0},this.contexts=[this.rootContext],this.lastId=0,this.generateCurrentContextIds()}newFrame(e,t){return{id:e,frameName:t,iterationId:0}}set currentContext(e){this.contexts!==e&&(this.contexts=e,this.generateCurrentContextIds())}get currentContext(){return this.contexts}get currentContextId(){return this._currentContextIds[0]}get currentContextIds(){return this._currentContextIds}generateCurrentContextIds(){const e=[];for(let t=0;tt.id===0&&t.iterationId===0?"":`${t.frameName}-${t.iterationId}`).join("/"):""}enterFrame(e){this.contexts&&(this.lastId++,this.contexts=this.contexts.slice(),this.contexts.push(this.newFrame(this.lastId,e)),this._currentContextIds.unshift(this.contextIdforContexts(this.contexts)))}exitFrame(){if(this.contexts&&this.contexts.length>1)this.contexts=this.contexts.slice(),this.contexts.splice(-1),this.currentContextIds.shift();else throw new Error("Cannot exit frame, the context is empty")}nextIteration(){if(this.contexts&&this.contexts.length>0){this.contexts=this.contexts.slice(),this.lastId++;const e=Object.assign({},this.contexts[this.contexts.length-1]);e.iterationId+=1,e.id=this.lastId,this.contexts.splice(-1,1,e),this._currentContextIds.splice(0,1,this.contextIdforContexts(this.contexts))}else throw new Error("Cannot increase frame iteration, the context is empty")}getWeight(e){return this.weightMap[e]}addTensorArray(e){this.tensorArrayMap[e.id]=e}getTensorArray(e){return this.tensorArrayMap[e]}addTensorList(e){this.tensorListMap[e.id]=e}getTensorList(e){return this.tensorListMap[e]}dispose(e){for(const t in this.tensorArrayMap)this.tensorArrayMap[t].clearAndClose(e);for(const t in this.tensorListMap)this.tensorListMap[t].clearAndClose(e)}}function n0(e,t,n,s){const i=new Set,o=[];let a=null,c=null;const u=new Set,p=Object.keys(e).map(b=>ls(b)[0]);let m=[];s!=null&&(m=s.map(b=>ls(b.name)[0]));const y=[...t];for(;y.length>0;){const b=y.pop();if((s0(b)||sY(b))&&(a==null&&(a=b,c=a.children.map(w=>w.name).filter(w=>i.has(w)))),i.add(b.name),n[b.name]!=null)continue;if(p.indexOf(b.name)!==-1)continue;if(m.indexOf(b.name)!==-1)continue;if(b.inputs.length===0){o.push(b.name);continue}b.inputs.forEach(w=>{if(u.has(w.name))return;u.add(w.name),y.push(w)})}return{inputs:e,outputs:t,usedNodes:i,missingInputs:o,dynamicNode:a,syncInputs:c}}function eY(e,t,n){const{usedNodes:s,inputs:i}=n,o=[],a=Object.keys(i).map(m=>ls(m)[0]).map(m=>e.nodes[m]),c=e.initNodes;a.forEach(m=>{s.has(m.name)&&o.push(m)}),e.weights.forEach(m=>{s.has(m.name)&&o.push(m)}),c!=null&&c.forEach(m=>{s.has(m.name)&&o.push(m)});const u=new Set,p=[];for(;o.length>0;){const m=o.pop();u.add(m.name),t[m.name]||p.push(m),m.children.forEach(y=>{!u.has(y.name)&&s.has(y.name)&&y.inputs.every(b=>u.has(b.name))&&o.push(y)})}return p}const tY=["Switch","Merge","Enter","Exit","NextIteration","StatelessIf","StatelessWhile","if","While"],nY=["NonMaxSuppressionV2","NonMaxSuppressionV3","NonMaxSuppressionV5","Where"];function s0(e){return tY.indexOf(e.op)>=0}function sY(e){return nY.indexOf(e.op)>=0}class eS{constructor(e,t){this.graph=e,this.parent=t,this.compiledMap=new Map,this._weightMap={},this.SEPERATOR=",",this._functions={},this._functionExecutorMap={},this._outputs=e.outputs,this._inputs=e.inputs,this._initNodes=e.initNodes,this._signature=e.signature,this._functions=e.functions,e.functions!=null&&Object.keys(e.functions).forEach(n=>{this._functionExecutorMap[n]=new eS(e.functions[n],this)})}get weightIds(){return this.parent?this.parent.weightIds:this._weightIds}get functionExecutorMap(){return this.parent?this.parent.functionExecutorMap:this._functionExecutorMap}get weightMap(){return this.parent?this.parent.weightMap:this._weightMap}set weightMap(e){const t=Object.keys(e).map(n=>e[n].map(s=>s.id));this._weightIds=[].concat(...t),this._weightMap=e}get inputs(){return this._inputs.map(e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0}))}get outputs(){return this._outputs.map(e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0}))}get inputNodes(){return this._inputs.map(e=>e.signatureKey||e.name)}get outputNodes(){return this._outputs.map(e=>{const t=e.signatureKey||e.name;return e.defaultOutput?`${t}:${e.defaultOutput}`:t})}get functions(){return Object.keys(this._functions).reduce((e,t)=>(e[t]=this._functions[t].signature,e),{})}getCompilationKey(e,t){const n=e.map(i=>i.name).sort(),s=t.map(i=>i.name).sort();return n.join(this.SEPERATOR)+"--"+s.join(this.SEPERATOR)}compile(e,t){const n=n0(e,t,this.weightMap,this._initNodes),{missingInputs:s,dynamicNode:i,syncInputs:o}=n;if(i!=null)throw new Error(`This execution contains the node '${i.name}', which has the dynamic op '${i.op}'. Please use model.executeAsync() instead. Alternatively, to avoid the dynamic ops, specify the inputs [${o}]`);if(s.length>0){const a=t.map(u=>u.name),c=Object.keys(e);throw new Error(`Cannot compute the outputs [${a}] from the provided inputs [${c}]. Missing the following inputs: [${s}]`)}return eY(this.graph,this.weightMap,n)}execute(e,t){e=this.mapInputs(e);const n=Object.keys(e).sort();this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t);const s=n.map(m=>this.graph.nodes[ls(m)[0]]),i=t.map(m=>ls(m)[0]);let o=i.map(m=>this.graph.nodes[m]);o.length===0&&(o=this._outputs);const a=this.getCompilationKey(s,o);let c=this.compiledMap.get(a);c==null&&(c=this.compile(e,o),this.compiledMap.set(a,c));const u={},p={};return ee(()=>{const m=new t0(this.weightMap,u,p,this.functionExecutorMap),y=Object.assign({},this.weightMap);Object.keys(e).forEach(I=>{const[T,v]=ls(I),N=[];N[v]=e[I],y[T]=N});const b=this.getFrozenTensorIds(y),w={};for(let I=0;IQn(I,y,m))})}getFrozenTensorIds(e){const t=[].concat.apply([],Object.keys(e).map(n=>e[n]).map(n=>n.map(s=>s.id)));return new Set(t)}checkTensorForDisposal(e,t,n,s,i,o,a){if(t.category==="control"||o.indexOf(e)!==-1)return;n[e].forEach(c=>{c!=null&&(a[c.id]=(a[c.id]||0)+t.children.length)}),t.inputs.forEach(c=>{if(c.category!=="control"){const u=JV(c.name,n,s);u!=null&&u.forEach(p=>{if(p&&!i.has(p.id)){const m=a[p.id];m===1?(p.dispose(),delete a[p.id]):m!=null&&a[p.id]--}})}})}async executeAsync(e,t){return this._executeAsync(e,t)}async _executeAsync(e,t,n=!1,s={},i={}){n||(e=this.mapInputs(e),this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t));const o=new t0(this.weightMap,s,i,this.functionExecutorMap),a=await this.executeWithControlFlow(e,o,t,n),c=t.map(y=>Qn(y,a,o)),u=c.map(y=>y.id),p=Object.keys(e).map(y=>e[y].id),m=new Set([...u,...p,...this.weightIds]);return Object.keys(a).forEach(y=>{const b=a[y];b.forEach(w=>{w&&!w.isDisposed&&!m.has(w.id)&&w.dispose()})}),this.parent==null&&o.dispose(m),c}async executeFunctionAsync(e,t,n){const s=e.reduce((i,o,a)=>(i[this.inputs[a].name]=o,i),{});return this._executeAsync(s,this.outputNodes,!0,t,n)}async executeWithControlFlow(e,t,n,s){const i=Object.keys(e),o=i.map(E=>this.graph.nodes[ls(E)[0]]),a=n.map(E=>ls(E)[0]),c=a.map(E=>this.graph.nodes[E]),{usedNodes:u,missingInputs:p,dynamicNode:m,syncInputs:y}=n0(e,c,this.weightMap),b=[...o,...this.graph.weights].map(E=>({node:E,contexts:t.currentContext})),w=Object.assign({},this.weightMap);Object.keys(e).forEach(E=>{const[D,F]=ls(E),_=[];_[F]=e[E],w[D]=_});const I={},T=this.getFrozenTensorIds(w),v={};for(;b.length>0;){const E=this.processStack(o,b,t,w,v,T,a,I,u);await Promise.all(E)}m==null&&!s&&console.warn("This model execution did not contain any nodes with control flow or dynamic output shapes. You can use model.execute() instead.");const N=c.filter(E=>!s0(E)&&!Qn(E.name,w,t)).map(E=>E.name);if(N.length>0){let E="";throw m!=null&&(E=`Alternatively, to avoid the dynamic ops, use model.execute() and specify the inputs [${y}]`),new Error(`Cannot compute the outputs [${N}] from the provided inputs [${i}]. Consider providing the following inputs: [${p}]. ${E}`)}return w}processStack(e,t,n,s,i,o,a,c,u){const p=[];for(;t.length>0;){const m=t.pop();n.currentContext=m.contexts;let y="";if(m.node.op==="Enter"&&R("isConstant",m.node,s,n)&&([y]=lr(m.node.name,n)),e.indexOf(m.node)===-1){const b=e0(m.node,s,n);y||([y]=lr(m.node.name,n));const w=n.currentContext;b instanceof Promise?p.push(b.then(I=>(s[y]=I,n.currentContext=w,this.checkTensorForDisposal(y,m.node,s,n,o,a,c),this.processChildNodes(m.node,t,n,s,i,u),I))):(s[y]=b,this.checkTensorForDisposal(y,m.node,s,n,o,a,c),this.processChildNodes(m.node,t,n,s,i,u))}else this.processChildNodes(m.node,t,n,s,i,u)}return p}processChildNodes(e,t,n,s,i,o){e.children.forEach(a=>{const[c]=lr(a.name,n);if(i[c]||!o.has(a.name))return;a.op==="Merge"?a.inputNames.some(u=>!!Qn(u,s,n))&&(i[c]=!0,t.push({contexts:n.currentContext,node:a})):a.inputNames.every(u=>!!Qn(u,s,n))&&(i[c]=!0,t.push({contexts:n.currentContext,node:a}))})}dispose(){Object.keys(this.weightMap).forEach(e=>this.weightMap[e].forEach(t=>t.dispose()))}checkInputShapeAndType(e){Object.keys(e).forEach(t=>{const n=e[t],[s]=ls(t),i=this.graph.nodes[s];if(i.attrParams.shape&&i.attrParams.shape.value){const o=i.attrParams.shape.value,a=o.length===n.shape.length&&n.shape.every((c,u)=>o[u]===-1||o[u]===c);k(a,()=>`The shape of dict['${i.name}'] provided in model.execute(dict) must be [${o}], but was [${n.shape}]`)}i.attrParams.dtype&&i.attrParams.dtype.value&&k(n.dtype===i.attrParams.dtype.value,()=>`The dtype of dict['${i.name}'] provided in model.execute(dict) must be ${i.attrParams.dtype.value}, but was ${n.dtype}`)})}mapInputs(e){const t={};for(const n in e)if(this._signature!=null&&this._signature.inputs!=null&&this._signature.inputs[n]!=null){const s=this._signature.inputs[n];t[s.name]=e[n]}else t[n]=e[n];return t}checkInputs(e){const t=Object.keys(e).filter(n=>{const[s]=ls(n);return this.graph.nodes[s]==null});if(t.length>0)throw new Error(`The dict provided in model.execute(dict) has keys: [${t}] that are not part of graph`)}mapOutputs(e){return e.map(t=>{if(this._signature!=null&&this._signature.outputs!=null&&this._signature.outputs[t]!=null){const n=this._signature.outputs[t];return n.name}return t},{})}checkOutputs(e){e.forEach(t=>{const[n]=ls(t);if(!this.graph.nodes[n])throw new Error(`The output '${t}' is not found in the graph`)})}}const iY="?tfjs-format=file",rY="model.json";class i0{constructor(e,t={}){this.modelUrl=e,this.loadOptions=t,this.version="n/a",t==null&&(this.loadOptions={})}get modelVersion(){return this.version}get inputNodes(){return this.executor.inputNodes}get outputNodes(){return this.executor.outputNodes}get inputs(){return this.executor.inputs}get outputs(){return this.executor.outputs}get weights(){return this.executor.weightMap}findIOHandler(){const e=this.modelUrl;if(e.load!=null)this.handler=e;else if(this.loadOptions.requestInit!=null)this.handler=kd(e,this.loadOptions);else{const t=By(e,this.loadOptions);if(t.length===0)t.push(kd(e,this.loadOptions));else if(t.length>1)throw new Error(`Found more than one (${t.length}) load handlers for URL '${[e]}'`);this.handler=t[0]}}async load(){if(this.findIOHandler(),this.handler.load==null)throw new Error("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const e=await this.handler.load();return this.loadSync(e)}loadSync(e){this.artifacts=e;const t=this.artifacts.modelTopology;let n={};this.artifacts.userDefinedMetadata!=null&&(n=this.artifacts.userDefinedMetadata.signature),this.version=`${t.versions.producer}.${t.versions.minConsumer}`;const s=Rd(this.artifacts.weightData,this.artifacts.weightSpecs);if(this.executor=new eS(KN.Instance.transformGraph(t,n)),this.executor.weightMap=this.convertTensorMapToTensorsMap(s),e.modelInitializer!=null){const i=KN.Instance.transformGraph(e.modelInitializer);this.initializer=new eS(i),this.initializer.weightMap=this.executor.weightMap,this.initializer.execute({},[])}return!0}async save(e,t){if(typeof e=="string"){const n=Uy(e);if(n.length===0)throw new Error(`Cannot find any save handlers for URL '${e}'`);if(n.length>1)throw new Error(`Found more than one (${n.length}) save handlers for URL '${e}'`);e=n[0]}if(e.save==null)throw new Error("GraphModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");return e.save(this.artifacts)}predict(e,t){return this.execute(e,this.outputNodes)}normalizeInputs(e){if(!(e instanceof Q)&&!Array.isArray(e))return e;if(e=Array.isArray(e)?e:[e],e.length!==this.inputNodes.length)throw new Error(`Input tensor count mismatch,the graph model has ${this.inputNodes.length} placeholders, while there are ${e.length} input tensors.`);return this.inputNodes.reduce((t,n,s)=>(t[n]=e[s],t),{})}normalizeOutputs(e){return e=e||this.outputNodes,Array.isArray(e)?e:[e]}execute(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=this.executor.execute(e,t);return n.length>1?n:n[0]}async executeAsync(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=await this.executor.executeAsync(e,t);return n.length>1?n:n[0]}convertTensorMapToTensorsMap(e){return Object.keys(e).reduce((t,n)=>(t[n]=[e[n]],t),{})}dispose(){this.executor.dispose(),this.initializer&&this.initializer.dispose()}}async function oY(e,t={}){if(e==null)throw new Error("modelUrl in loadGraphModel() cannot be null. Please provide a url or an IOHandler that loads the model");t==null&&(t={}),t.fromTFHub&&(e.load==null&&(e.endsWith("/")||(e=e+"/"),e=`${e}${rY}${iY}`));const n=new i0(e,t);return await n.load(),n}const r0="2.6.0";function aY(e,t){return lm(e,t)}function lm(e,t,n=new Map,s=new Set){if(e==null)return null;if(s.has(e))throw new Error("Circular references are not supported.");if(n.has(e))return n.get(e);const i=t(e);if(i.recurse&&i.value!==null)throw new Error("A deep map function may not return both a value and recurse=true.");if(i.recurse)if(ac(e)){const o=Array.isArray(e)?[]:{};s.add(e);for(const a in e){const c=e[a],u=lm(c,t,n,s);o[a]=u}return s.delete(e),o}else throw new Error(`Can't recurse into non-iterable type: ${e}`);else return n.set(e,i.value),i.value}function cY(e,t=a0){return o0(e,t)}function o0(e,t,n=new Set){const s=e[0];if(n.has(s))throw new Error("Circular references are not supported.");const i=t(e);if(i.recurse&&i.value!==null)throw new Error("A deep zip function may not return both a value and recurse=true.");if(i.recurse)if(ac(s)){const o=Array.isArray(s)?[]:{};n.add(s);for(const a in s){const c=e.map(p=>p[a]),u=o0(c,t,n);o[a]=u}return n.delete(s),o}else throw new Error(`Can't recurse into non-iterable type: ${s}`);else return i.value}function a0(e){return e===null?null:ac(e[0])?{value:null,recurse:!0}:{value:e,recurse:!1}}async function c0(e,t){const n=new Map;lm(e,t,n);for(const i of Array.from(n.keys())){const o=n.get(i);if(o instanceof Promise){const a=await o;n.set(i,a)}}const s=lm(e,t,n);return s}function ac(e){return e!=null&&!ArrayBuffer.isView(e)&&(Array.isArray(e)||typeof e=="object"&&!(e instanceof Q))}function lY(e){return e==null||hY(e)||Array.isArray(e)||typeof e=="object"&&e instanceof Q||Ln(e)}function hY(e){return e===null||typeof e!="object"&&typeof e!="function"}function uY(e){return aY(e,dY)}function dY(e){return e instanceof Q?{value:e.clone(),recurse:!1}:ac(e)?{value:null,recurse:!0}:{value:e,recurse:!1}}class l0{constructor(e){if(this.capacity=e,this.begin=0,this.end=0,e==null)throw new RangeError("Can't create a ring buffer of unknown capacity.");if(e<1)throw new RangeError("Can't create ring buffer of capacity < 1.");this.data=new Array(e),this.doubledCapacity=2*e}wrap(e){for(;e<0;)e+=this.doubledCapacity;return e%this.doubledCapacity}get(e){if(e<0)throw new RangeError("Can't get item at a negative index.");return this.data[e%this.capacity]}set(e,t){if(e<0)throw new RangeError("Can't set item at a negative index.");this.data[e%this.capacity]=t}length(){let e=this.end-this.begin;return e<0&&(e=this.doubledCapacity+e),e}isFull(){return this.length()===this.capacity}isEmpty(){return this.length()===0}push(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.set(this.end,e),this.end=this.wrap(this.end+1)}pushAll(e){for(const t of e)this.push(t)}pop(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");this.end=this.wrap(this.end-1);const e=this.get(this.end);return this.set(this.end,void 0),e}unshift(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.begin=this.wrap(this.begin-1),this.set(this.begin,e)}shift(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const e=this.get(this.begin);return this.set(this.begin,void 0),this.begin=this.wrap(this.begin+1),e}shuffleExcise(e){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const t=this.wrap(this.begin+e),n=this.get(t);return this.set(t,this.pop()),n}}class tS extends l0{constructor(){super(tS.INITIAL_CAPACITY)}isFull(){return!1}push(e){super.isFull()&&this.expand(),super.push(e)}unshift(e){super.isFull()&&this.expand(),super.unshift(e)}expand(){const e=this.capacity*2,t=new Array(e),n=this.length();for(let s=0;s({value:t++,done:!1}))}function nu(e){return new fY(e)}function u0(e,t){return new p0(e,t)}function tte(e,t,n){return u0(nu(e).take(t),n)}function pY(e,t=Qr.FAIL){return new TY(e,t)}class gn{async toArray(){const e=[];let t=await this.next();for(;!t.done;)e.push(t.value),t=await this.next();return e}async toArrayForTest(){const e=this.prefetch(100),t=[];let n=await e.next();for(;!n.done;)t.push(n.value),n=await e.next();return t}async resolveFully(){let e=await this.next();for(;!e.done;)e=await this.next()}async resolveWhile(e){let t=await this.next(),n=e(t.value);for(;!t.done&&n;)t=await this.next(),n=e(t.value)}handleErrors(e){return new IY(this,e)}filter(e){return new LY(this,e)}map(e){return new SY(this,e)}mapAsync(e){return new d0(this,e)}serialMapAsync(e){return new d0(this,e).serial()}flatmap(e){return new xY(this,e)}async forEachAsync(e){return this.map(e).resolveFully()}async serialForEach(e){return this.serialMapAsync(e).resolveWhile(t=>t===!0)}rowMajorBatch(e,t=!0){return new wY(this,e,t)}columnMajorBatch(e,t=!0,n=a0){const s=this.rowMajorBatch(e,t);return s.map(i=>cY(i,n))}concatenate(e,t){return new p0(h0([this,e]),t)}take(e){return e<0||e==null?this:new bY(this,e)}skip(e){return e<0||e==null?this:new yY(this,e)}prefetch(e){return new m0(this,e)}shuffle(e,t){return new AY(this,e,t)}serial(){return new gY(this)}}class mY extends gn{constructor(e){super();this.items=e,this.trav=0}summary(){return`Array of ${this.items.length} items`}async next(){if(this.trav>=this.items.length)return{value:null,done:!0};const e=this.items[this.trav];return this.trav++,{value:uY(e),done:!1}}}class fY extends gn{constructor(e){super();this.nextFn=e}summary(){return"Function call"}async next(){try{return this.nextFn()}catch(e){throw e.message=`Error thrown while iterating through a dataset: ${e.message}`,e}}}class gY extends gn{constructor(e){super();this.upstream=e,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Serial`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){return this.upstream.next()}}class yY extends gn{constructor(e,t){super();this.upstream=e,this.maxCount=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Skip`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.count++ Take`}async next(){return this.count++>=this.maxCount?{value:null,done:!0}:this.upstream.next()}}class wY extends gn{constructor(e,t,n=!0){super();this.upstream=e,this.batchSize=t,this.enableSmallLastBatch=n,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> RowMajorBatch`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){const e=[];for(;e.length0?{value:e,done:!1}:{value:null,done:!0};e.push(t.value)}return{value:e,done:!1}}}class LY extends gn{constructor(e,t){super();this.upstream=e,this.predicate=t,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Filter`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;){const e=await this.upstream.next();if(e.done||this.predicate(e.value))return e;qe(e.value)}}}class SY extends gn{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Map`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=Zi(e.value),n=this.transform(e.value),s=Zi(n);for(const i of t)Nd(i,s)||i.dispose();return{value:n,done:!1}}}class IY extends gn{constructor(e,t){super();this.upstream=e,this.handler=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> handleErrors`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;)try{return await this.upstream.next()}catch(e){if(!this.handler(e))return{value:null,done:!0}}}}class d0 extends gn{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> AsyncMap`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=Zi(e.value),n=await this.transform(e.value),s=Zi(n);for(const i of t)Nd(i,s)||i.dispose();return{value:n,done:!1}}}class nS extends gn{constructor(){super();this.outputQueue=new tS,this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.outputQueue.length()===0;)if(!await this.pump())return{value:null,done:!0};return{value:this.outputQueue.shift(),done:!1}}}class xY extends nS{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Flatmap`}async pump(){const e=await this.upstream.next();if(e.done)return!1;const t=Zi(e.value),n=this.transform(e.value),s=Zi(n);this.outputQueue.pushAll(n);for(const i of t)Nd(i,s)||i.dispose();return!0}}class p0 extends gn{constructor(e,t){super();this.baseErrorHandler=t,this.lastRead=null,this.iterator=null,this.moreIterators=e}summary(){const e="TODO: fill in upstream of chained summaries";return`${e} -> Chained`}async next(){return this.lastRead=this.readFromChain(this.lastRead),this.lastRead}async readFromChain(e){if(await e,this.iterator==null){const n=await this.moreIterators.next();if(n.done)return{value:null,done:!0};this.iterator=n.value,this.baseErrorHandler!=null&&(this.iterator=this.iterator.handleErrors(this.baseErrorHandler))}const t=await this.iterator.next();return t.done?(this.iterator=null,this.readFromChain(e)):t}}var Qr;(function(e){e[e.FAIL=0]="FAIL",e[e.SHORTEST=1]="SHORTEST",e[e.LONGEST=2]="LONGEST"})(Qr||(Qr={}));class TY extends gn{constructor(e,t=Qr.FAIL){super();this.iterators=e,this.mismatchMode=t,this.count=0,this.currentPromise=null}summary(){const e="TODO: fill in upstream of zip summaries";return`{${e}} -> Zip`}async nextState(e){await e;let t=0,n=0;function s(o){if(o instanceof gn){const a=o.next();return{value:a.then(c=>(t++,c.done&&n++,c.value)),recurse:!1}}else return{value:null,recurse:!0}}const i=await c0(this.iterators,s);if(t===n)return{value:null,done:!0};if(n>0)switch(this.mismatchMode){case Qr.FAIL:throw new Error(`Zipped streams should have the same length. Mismatched at element ${this.count}.`);case Qr.SHORTEST:return{value:null,done:!0};case Qr.LONGEST:default:}return this.count++,{value:i,done:!1}}async next(){return this.currentPromise=this.nextState(this.currentPromise),this.currentPromise}}class m0 extends gn{constructor(e,t){super();this.upstream=e,this.bufferSize=t,this.buffer=new l0(t)}summary(){return`${this.upstream.summary()} -> Prefetch`}refill(){for(;!this.buffer.isFull();){const e=this.upstream.next();this.buffer.push(e)}}next(){return this.refill(),this.buffer.shift()}}class AY extends m0{constructor(e,t,n){super(e,t);this.upstream=e,this.windowSize=t,this.upstreamExhausted=!1,this.random=Ha(n||qn().toString()),this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}randomInt(e){return Math.floor(this.random()*e)}chooseIndex(){return this.randomInt(this.buffer.length())}async serialNext(){for(this.upstreamExhausted||this.refill();!this.buffer.isEmpty();){const e=this.chooseIndex(),t=await this.buffer.shuffleExcise(e);if(t.done)this.upstreamExhausted=!0;else return this.refill(),t}return{value:null,done:!0}}}class cc{constructor(){this.size=null}batch(e,t=!0){const n=this;k(e>0,()=>`batchSize needs to be positive, but it is - ${e}`);let s;return this.size===Infinity||this.size==null?s=this.size:t?s=Math.ceil(this.size/e):s=Math.floor(this.size/e),hs(async()=>(await n.iterator()).columnMajorBatch(e,t,CY),s)}concatenate(e){const t=this;let n;return this.size===Infinity||e.size===Infinity?n=Infinity:this.size!=null&&e.size!=null?n=this.size+e.size:n=null,hs(async()=>(await t.iterator()).concatenate(await e.iterator()),n)}filter(e){const t=this;let n;return this.size===Infinity?n=Infinity:n=null,hs(async()=>(await t.iterator()).filter(s=>ee(()=>e(s))),n)}async forEachAsync(e){return(await this.iterator()).forEachAsync(e)}map(e){const t=this;return hs(async()=>(await t.iterator()).map(n=>ee(()=>e(n))),this.size)}mapAsync(e){const t=this;return hs(async()=>(await t.iterator()).mapAsync(e),this.size)}prefetch(e){if(e==null)throw new RangeError("`Dataset.prefetch()` requires bufferSize to be specified.");const t=this;return hs(async()=>(await t.iterator()).prefetch(e),this.size)}repeat(e){const t=this;let n;return this.size!=null&&e>0?n=this.size*e:e===0?n=0:this.size!=null&&(e===void 0||e<0)?n=Infinity:n=null,hs(async()=>{const s=nu(async()=>({value:await t.iterator(),done:!1}));return u0(s.take(e))},n)}skip(e){const t=this;let n;return this.size!=null&&e>=0&&this.size>=e?n=this.size-e:this.size!=null&&(this.size(await t.iterator()).skip(e),n)}shuffle(e,t,n=!0){if(e==null||e<0)throw this.size==null?new RangeError("`Dataset.shuffle()` requires bufferSize to be specified."):new RangeError(`\`Dataset.shuffle()\` requires bufferSize to be specified. If your data fits in main memory (for regular JS objects), and/or GPU memory (for \`tf.Tensor\`s), consider setting bufferSize to the dataset size (${this.size} elements)`);const s=this,i=Ha(t||qn().toString());return hs(async()=>{let o=i.int32();return n&&(o+=i.int32()),(await s.iterator()).shuffle(e,o.toString())},this.size)}take(e){const t=this;let n;return this.size!=null&&this.size>e?n=e:this.size!=null&&this.size<=e?n=this.size:n=null,hs(async()=>(await t.iterator()).take(e),n)}async toArray(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArray()}async toArrayForTest(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArrayForTest()}}cc.MAX_BUFFER_SIZE=1e4;function hs(e,t=null){return new class extends cc{constructor(){super(...arguments);this.size=t}async iterator(){return e()}}}function vY(e){return hs(async()=>h0(e),e.length)}function NY(e){if(!ac(e))throw new Error("The argument to zip() must be an object or array.");let t;if(Array.isArray(e))for(let n=0;n{const n=await c0(e,s=>{if(s instanceof cc)return{value:s.iterator(),recurse:!1};if(ac(s))return{value:null,recurse:!0};throw new Error("Leaves of the structure passed to zip() must be Datasets, not primitives.")});return pY(n,Qr.SHORTEST)},t)}function CY(e){if(e===null)return null;const t=e[0];if(lY(t)){const n=RY(e);return{value:n,recurse:!1}}return{value:null,recurse:!0}}function RY(e){if(e.length===0)throw new Error("Can't make a batch of zero elements.");return e[0]instanceof Q?as(e):en(e)}class f0 extends cc{constructor(e){super();this.input=e}async iterator(){const e=await this.input.iterator(),t=e.decodeUTF8(),n=t.split(` -`).map(s=>(s.endsWith("\r")&&(s=s.slice(0,-1)),s));return n}}const hm='"',su=Symbol("out"),g0=Symbol("field"),um=Symbol("quote"),sS=Symbol("quoteafterquote"),y0=Symbol("quoteinquote");class b0 extends cc{constructor(e,t){super();this.input=e,this.hasHeader=!0,this.fullColumnNames=null,this.columnNamesValidated=!1,this.columnConfigs=null,this.configuredColumnsOnly=!1,this.delimiter=",",this.delimWhitespace=!1,this.base=new f0(e),t||(t={}),this.hasHeader=!(t.hasHeader===!1),this.fullColumnNames=t.columnNames,this.columnConfigs=t.columnConfigs,this.configuredColumnsOnly=t.configuredColumnsOnly,t.delimWhitespace?(k(t.delimiter==null,()=>"Delimiter should not be provided when delimWhitespace is true."),this.delimWhitespace=!0,this.delimiter=" "):this.delimiter=t.delimiter?t.delimiter:","}async columnNames(){return this.columnNamesValidated||await this.setColumnNames(),this.configuredColumnsOnly?Object.keys(this.columnConfigs):this.fullColumnNames}async setColumnNames(){const e=await this.maybeReadHeaderLine();if(!this.fullColumnNames&&!e)throw new Error("Column names must be provided if there is no header line.");this.fullColumnNames&&e&&k(e.length===this.fullColumnNames.length,()=>"The length of provided columnNames ("+this.fullColumnNames.length.toString()+") does not match the length of the header line read from file ("+e.length.toString()+")."),this.fullColumnNames||(this.fullColumnNames=e);const t=this.fullColumnNames.reduce((s,i)=>(s[i]=s[i]+1||1,s),{}),n=Object.keys(t).filter(s=>t[s]>1);if(k(n.length===0,()=>"Duplicate column names found: "+n.toString()),this.columnConfigs)for(const s of Object.keys(this.columnConfigs)){const i=this.fullColumnNames.indexOf(s);if(i===-1)throw new Error('The key "'+s+'" provided in columnConfigs does not match any of the column names ('+this.fullColumnNames.toString()+").")}this.columnNamesValidated=!0}async maybeReadHeaderLine(){if(this.hasHeader){const e=await this.base.iterator(),t=await e.next();if(t.done)throw new Error("No data was found for CSV parsing.");const n=t.value,s=this.parseRow(n,!1);return s}else return null}async iterator(){this.columnNamesValidated||await this.setColumnNames();let e=await this.base.iterator();return this.hasHeader&&(e=e.skip(1)),e.map(t=>this.makeDataElement(t))}makeDataElement(e){const t=this.parseRow(e),n={},s={};for(let i=0;i14||!Number.isInteger(t))throw new Error(`Invalid fftSize: it must be a power of 2 between 2 to 4 and 2 to 14, but got ${this.fftSize}`);if(this.numFrames=e.numFramesPerSpectrogram||43,this.sampleRateHz=e.sampleRateHz,this.columnTruncateLength=e.columnTruncateLength||this.fftSize,this.audioTrackConstraints=e.audioTrackConstraints,this.smoothingTimeConstant=e.smoothingTimeConstant||0,this.includeSpectrogram=!(e.includeSpectrogram===!1),this.includeWaveform=e.includeWaveform===!0,!this.includeSpectrogram&&!this.includeWaveform)throw new Error("Both includeSpectrogram and includeWaveform are false. At least one type of data should be returned.")}summary(){return"microphone"}static async create(e={}){if(C().get("IS_NODE"))throw new Error("microphone API is only supported in browser environment.");const t=new w0(e);return await t.start(),t}async start(){try{this.stream=await navigator.mediaDevices.getUserMedia({audio:this.audioTrackConstraints==null?!0:this.audioTrackConstraints,video:!1})}catch(n){throw new Error(`Error thrown while initializing video stream: ${n.message}`)}if(!this.stream)throw new Error("Could not obtain audio from microphone.");const e=window.AudioContext||window.webkitAudioContext;if(this.audioContext=new e,!this.sampleRateHz)this.sampleRateHz=this.audioContext.sampleRate;else if(this.audioContext.sampleRate!==this.sampleRateHz)throw new Error(`Mismatch in sampling rate: Expected: ${this.sampleRateHz}; Actual: ${this.audioContext.sampleRate}`);const t=this.audioContext.createMediaStreamSource(this.stream);this.analyser=this.audioContext.createAnalyser(),this.analyser.fftSize=this.fftSize*2,this.analyser.smoothingTimeConstant=this.smoothingTimeConstant,t.connect(this.analyser),this.freqData=new Float32Array(this.fftSize),this.timeData=new Float32Array(this.fftSize);return}async next(){if(this.isClosed)return{value:null,done:!0};let e,t;const n=await this.getAudioData();if(this.includeSpectrogram){const s=this.flattenQueue(n.freqDataQueue);e=this.getTensorFromAudioDataArray(s,[this.numFrames,this.columnTruncateLength,1])}if(this.includeWaveform){const s=this.flattenQueue(n.timeDataQueue);t=this.getTensorFromAudioDataArray(s,[this.numFrames*this.fftSize,1])}return{value:{spectrogram:e,waveform:t},done:!1}}async capture(){return(await this.next()).value}async getAudioData(){const e=[],t=[];let n=0;return new Promise(s=>{const i=setInterval(()=>{this.includeSpectrogram&&(this.analyser.getFloatFrequencyData(this.freqData),this.freqData[0]===-Infinity&&s({freqDataQueue:e,timeDataQueue:t}),e.push(this.freqData.slice(0,this.columnTruncateLength))),this.includeWaveform&&(this.analyser.getFloatTimeDomainData(this.timeData),t.push(this.timeData.slice())),++n===this.numFrames&&(clearInterval(i),s({freqDataQueue:e,timeDataQueue:t}))},this.fftSize/this.sampleRateHz*1e3)})}stop(){this.isClosed||(this.isClosed=!0,this.analyser.disconnect(),this.audioContext.close(),this.stream!=null&&this.stream.getTracks().length>0&&this.stream.getTracks()[0].stop())}toArray(){throw new Error("Can not convert infinite audio stream to array.")}getSampleRate(){return this.sampleRateHz}flattenQueue(e){const t=e[0].length,n=new Float32Array(e.length*t);return e.forEach((s,i)=>n.set(s,i*t)),n}getTensorFromAudioDataArray(e,t){const n=new Float32Array(we(t));return n.set(e,n.length-e.length),en(n,t)}}class L0 extends gn{constructor(e,t){super();if(this.webcamVideoElement=e,this.webcamConfig=t,this.isClosed=!0,this.resize=!1,this.needToResize())if(this.resize=!0,this.cropSize=[this.webcamConfig.resizeHeight,this.webcamConfig.resizeWidth],this.cropBoxInd=rs([0],"int32"),this.webcamConfig.centerCrop){const n=this.webcamConfig.resizeWidth*1/this.webcamVideoElement.width,s=this.webcamConfig.resizeHeight*1/this.webcamVideoElement.height,i=(1-n)/2,o=(1-s)/2,a=i+n,c=s+o;this.cropBox=Gr([o,i,c,a],[1,4])}else this.cropBox=Gr([0,0,1,1],[1,4])}summary(){return"webcam"}static async create(e,t={}){if(C().get("IS_NODE"))throw new Error("tf.data.webcam is only supported in browser environment.");if(!e){if(e=document.createElement("video"),!t.resizeWidth||!t.resizeHeight)throw new Error("Please provide webcam video element, or resizeWidth and resizeHeight to create a hidden video element.");e.width=t.resizeWidth,e.height=t.resizeHeight}const n=new L0(e,t);return await n.start(),n}async start(){this.webcamConfig.facingMode&&k(this.webcamConfig.facingMode==="user"||this.webcamConfig.facingMode==="environment",()=>`Invalid webcam facing mode: ${this.webcamConfig.facingMode}. Please provide 'user' or 'environment'`);try{this.stream=await navigator.mediaDevices.getUserMedia({video:{deviceId:this.webcamConfig.deviceId,facingMode:this.webcamConfig.facingMode?this.webcamConfig.facingMode:"user",width:this.webcamVideoElement.width,height:this.webcamVideoElement.height}})}catch(e){throw e.message=`Error thrown while initializing video stream: ${e.message}`,e}if(!this.stream)throw new Error("Could not obtain video from webcam.");try{this.webcamVideoElement.srcObject=this.stream}catch(e){console.log(e),this.webcamVideoElement.src=window.URL.createObjectURL(this.stream)}return this.webcamVideoElement.play(),this.isClosed=!1,new Promise(e=>{this.webcamVideoElement.onloadedmetadata=()=>{e()}})}async next(){if(this.isClosed)return{value:null,done:!0};let e;try{e=BT(this.webcamVideoElement)}catch(t){throw new Error(`Error thrown converting video to pixels: ${JSON.stringify(t)}`)}if(this.resize)try{return{value:this.cropAndResizeFrame(e),done:!1}}catch(t){throw new Error(`Error thrown cropping the video: ${t.message}`)}finally{e.dispose()}else return{value:e,done:!1}}needToResize(){return!!(this.webcamConfig.resizeWidth&&this.webcamConfig.resizeHeight&&(this.webcamVideoElement.width!==this.webcamConfig.resizeWidth||this.webcamVideoElement.height!==this.webcamConfig.resizeHeight))}cropAndResizeFrame(e){return ee(()=>{const t=e.toFloat().expandDims(0);let n;n=Vr.cropAndResize(t,this.cropBox,this.cropBoxInd,this.cropSize,"bilinear");const s=n.shape;return n.reshape(s.slice(1))})}async capture(){return(await this.next()).value}stop(){const e=this.stream.getTracks();e.forEach(t=>t.stop());try{this.webcamVideoElement.srcObject=null}catch(t){console.log(t),this.webcamVideoElement.src=null}this.isClosed=!0}toArray(){throw new Error("Can not convert infinite video stream to array.")}}class S0{}class I0 extends gn{split(e){return new OY(this,e)}}class OY extends I0{constructor(e,t){super();this.upstream=e,this.impl=new EY(e,t)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class EY extends nS{constructor(e,t){super();this.upstream=e,this.separator=t,this.carryover=""}summary(){return`${this.upstream.summary()} -> Split('${this.separator}')`}async pump(){const e=await this.upstream.next();if(e.done)return this.carryover===""?!1:(this.outputQueue.push(this.carryover),this.carryover="",!0);const t=e.value.split(this.separator);t[0]=this.carryover+t[0];for(const n of t.slice(0,-1))this.outputQueue.push(n);return this.carryover=t[t.length-1],!0}}class DY extends gn{decodeUTF8(){return new kY(this)}}class kY extends I0{constructor(e){super();this.upstream=e,this.impl=new FY(e)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class FY extends nS{constructor(e){super();if(this.upstream=e,C().get("IS_BROWSER"))this.decoder=new TextDecoder("utf-8");else{const{StringDecoder:t}=require("string_decoder");this.decoder=new t("utf8")}}summary(){return`${this.upstream.summary()} -> Utf8`}async pump(){const e=await this.upstream.next();let t;if(e.done)return!1;t=e.value;let n;return C().get("IS_BROWSER")?n=this.decoder.decode(t,{stream:!0}):n=this.decoder.write(Buffer.from(t.buffer)),this.outputQueue.push(n),!0}}class x0 extends DY{constructor(e,t={}){super();this.file=e,this.options=t,k(e instanceof Uint8Array||(C().get("IS_BROWSER")?e instanceof File||e instanceof Blob:!1),()=>"FileChunkIterator only supports File, Blob and Uint8Array right now."),this.offset=t.offset||0,this.chunkSize=t.chunkSize||1024*1024}summary(){return`FileChunks ${this.file}`}async next(){if(this.offset>=(this.file instanceof Uint8Array?this.file.byteLength:this.file.size))return{value:null,done:!0};const e=new Promise((t,n)=>{const s=this.offset+this.chunkSize;if(this.file instanceof Uint8Array)t(new Uint8Array(this.file.slice(this.offset,s)));else{const i=new FileReader;i.onload=a=>{let c=i.result;if(c instanceof ArrayBuffer&&(c=new Uint8Array(c)),!(c instanceof Uint8Array))return n(new TypeError("FileReader returned unknown type."));t(c)},i.onabort=a=>n(new Error("Aborted")),i.onerror=a=>n(new Error(a.type));const o=this.file.slice(this.offset,s);i.readAsArrayBuffer(o)}this.offset=s});return{value:await e,done:!1}}}async function _Y(e,t={}){let n,s;typeof e=="string"?n=e:(n=e.url,s=WY(e));const i=await uT(n,s);if(i.ok){const o=new Uint8Array(await i.arrayBuffer());return new x0(o,t)}else throw new Error(i.statusText)}const WY=e=>{const t={method:e.method,headers:e.headers,body:e.body,mode:e.mode,credentials:e.credentials,cache:e.cache,redirect:e.redirect,referrer:e.referrer,integrity:e.integrity};return t};function T0(e){return typeof e=="string"&&e.substr(0,7)==="file://"}class A0 extends S0{constructor(e,t={}){super();this.input=e,this.options=t}async iterator(){if(T0(this.input)&&C().get("IS_NODE")){const e=require("fs");this.input=e.readFileSync(this.input.substr(7))}return new x0(this.input,this.options)}}class v0 extends S0{constructor(e,t={}){super();this.url=e,this.fileOptions=t}async iterator(){return T0(this.url)?new A0(this.url,this.fileOptions).iterator():_Y(this.url,this.fileOptions)}}function $Y(e,t={}){return new b0(new v0(e),t)}function UY(e){const t=nu(e);return hs(async()=>t)}function BY(e){return hs(async()=>{const t=await e();return nu(()=>t.next())})}async function MY(e,t){return L0.create(e,t)}async function PY(e){return w0.create(e)}const N0="2.6.0";var zY=Object.freeze({__proto__:null,array:vY,Dataset:cc,zip:NY,CSVDataset:b0,TextLineDataset:f0,csv:$Y,func:UY,generator:BY,microphone:PY,webcam:MY,FileDataSource:A0,URLDataSource:v0,version_data:N0});function Te(e,t){Array.isArray(e)||(e=[e]),e.forEach(n=>{n!=null&&k(n.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the CPU backend.`)})}const GY=wp,VY=rw,HY=ow,YY=aw,qY=dp;function iS(e,t,n,s){if(n==="linear")return e.linear(t);if(n==="relu")return e.relu(t);if(n==="elu")return Oo(t);if(n==="relu6")return e.relu6(t);if(n==="prelu")return e.prelu(t,s);throw new Error(`Activation ${n} has not been implemented for the CPU backend.`)}class jY extends f{constructor(){super();this.blockSize=48,this.firstUse=!0,this.data=new d(this,$s())}write(e,t,n){this.firstUse&&(this.firstUse=!1,C().get("IS_NODE")&&Ja(` + ${s}, and tensor's shape is: ${e.shape}`);const o=s===0?0:e.size/s,a=ee(()=>{const u=[];e=K(e,[1,s,o]);for(let p=0;p{switch(e.op){case"If":case"StatelessIf":{const s=R("thenBranch",e,t,n),i=R("elseBranch",e,t,n),o=R("cond",e,t,n),a=R("args",e,t,n),c=await o.data();return c[0]?n.functionMap[s].executeFunctionAsync(a,n.tensorArrayMap,n.tensorListMap):n.functionMap[i].executeFunctionAsync(a,n.tensorArrayMap,n.tensorListMap)}case"While":case"StatelessWhile":{const s=R("body",e,t,n),i=R("cond",e,t,n),o=R("args",e,t,n),a=await n.functionMap[i].executeFunctionAsync(o,n.tensorArrayMap,n.tensorListMap),c=o.map(m=>m.id);let u=await a[0].data();a.forEach(m=>{!m.kept&&c.indexOf(m.id)===-1&&m.dispose()});let p=o;for(;u[0];){const m=p;p=await n.functionMap[s].executeFunctionAsync(p,n.tensorArrayMap,n.tensorListMap);const y=p.map(w=>w.id);m.forEach(w=>{!w.kept&&c.indexOf(w.id)===-1&&y.indexOf(w.id)===-1&&w.dispose()});const b=await n.functionMap[i].executeFunctionAsync(p,n.tensorArrayMap,n.tensorListMap);u=await b[0].data(),b.forEach(w=>{!w.kept&&c.indexOf(w.id)===-1&&y.indexOf(w.id)===-1&&w.dispose()})}return p}case"LoopCond":{const s=R("pred",e,t,n);return[hr(s)]}case"Switch":{const s=R("pred",e,t,n);let i=R("data",e,t,n);return i.kept||(i=hr(i)),(await s.data())[0]?[void 0,i]:[i,void 0]}case"Merge":{const s=e.inputNames.find(i=>Qn(i,t,n)!==void 0);if(s){const i=Qn(s,t,n);return[hr(i)]}return}case"Enter":{const s=R("frameName",e,t,n),i=R("tensor",e,t,n);return n.enterFrame(s),[hr(i)]}case"Exit":{const s=R("tensor",e,t,n);return n.exitFrame(),[hr(s)]}case"NextIteration":{const s=R("tensor",e,t,n);return n.nextIteration(),[hr(s)]}case"TensorArrayV3":{const s=R("size",e,t,n),i=R("dtype",e,t,n),o=R("elementShape",e,t,n),a=R("dynamicSize",e,t,n),c=R("clearAfterRead",e,t,n),u=R("identicalElementShapes",e,t,n),p=R("name",e,t,n),m=new _H(p,i,s,o,u,a,c);return n.addTensorArray(m),[m.idTensor,Ne(1)]}case"TensorArrayWriteV3":{const s=R("tensorArrayId",e,t,n),i=R("index",e,t,n),o=R("tensor",e,t,n),a=n.getTensorArray(s.id);return a.write(i,o),[a.idTensor]}case"TensorArrayReadV3":{const s=R("tensorArrayId",e,t,n),i=R("index",e,t,n),o=n.getTensorArray(s.id);return[o.read(i)]}case"TensorArrayGatherV3":{const s=R("tensorArrayId",e,t,n),i=R("indices",e,t,n),o=R("dtype",e,t,n),a=n.getTensorArray(s.id);return[a.gather(i,o)]}case"TensorArrayScatterV3":{const s=R("tensorArrayId",e,t,n),i=R("indices",e,t,n),o=R("tensor",e,t,n),a=n.getTensorArray(s.id);return a.scatter(i,o),[a.idTensor]}case"TensorArrayConcatV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id),o=R("dtype",e,t,n);return[i.concat(o)]}case"TensorArraySplitV3":{const s=R("tensorArrayId",e,t,n),i=R("tensor",e,t,n),o=R("lengths",e,t,n),a=n.getTensorArray(s.id);return a.split(o,i),[a.idTensor]}case"TensorArraySizeV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id);return[Ne(i.size(),"int32")]}case"TensorArrayCloseV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id);return i.clearAndClose(),[i.idTensor]}case"TensorListSetItem":{const s=R("tensorListId",e,t,n),i=R("index",e,t,n),o=R("tensor",e,t,n),a=n.getTensorList(s.id);return a.setItem(i,o),[a.idTensor]}case"TensorListGetItem":{const s=R("tensorListId",e,t,n),i=R("index",e,t,n),o=R("elementShape",e,t,n),a=R("elementDType",e,t,n),c=n.getTensorList(s.id);return[c.getItem(i,o,a)]}case"TensorListScatterV2":case"TensorListScatter":{const s=R("indices",e,t,n),i=R("tensor",e,t,n),o=R("elementShape",e,t,n),a=R("numElements",e,t,n),c=UH(i,s,o,a);return n.addTensorList(c),[c.idTensor]}case"TensorListReserve":{const s=R("elementShape",e,t,n),i=R("elementDType",e,t,n),o=R("numElements",e,t,n),a=$H(s,i,o);return n.addTensorList(a),[a.idTensor]}case"TensorListGather":{const s=R("tensorListId",e,t,n),i=R("indices",e,t,n),o=R("elementShape",e,t,n),a=R("elementDType",e,t,n),c=n.getTensorList(s.id);return[c.gather(i,a,o)]}case"TensorListStack":{const s=R("tensorListId",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=R("numElements",e,t,n),c=n.getTensorList(s.id);return[c.stack(i,o,a)]}case"TensorListFromTensor":{const s=R("tensor",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=WH(s,i,o);return n.addTensorList(a),[a.idTensor]}case"TensorListConcat":{const s=R("tensorListId",e,t,n),i=n.getTensorList(s.id),o=R("dtype",e,t,n),a=R("elementShape",e,t,n);return[i.concat(o,a)]}case"TensorListPushBack":{const s=R("tensorListId",e,t,n),i=R("tensor",e,t,n),o=n.getTensorList(s.id);return o.pushBack(i),[o.idTensor]}case"TensorListPopBack":{const s=R("tensorListId",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=n.getTensorList(s.id);return[a.popBack(i,o)]}case"TensorListSplit":{const s=R("tensor",e,t,n),i=R("elementShape",e,t,n),o=R("lengths",e,t,n),a=BH(s,o,i);return n.addTensorList(a),[a.idTensor]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Mee="control";function QN(e,t,n){const[s,i]=R("fusedOps",e,t,n),o=s==="biasadd",a=i==="prelu",c=s==="fusedbatchnorm",u=R("numArgs",e,t,n);if(o){if(a&&u!==2)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!a&&u!==1)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd must have one extra argument: bias.")}if(c)throw new Error("FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported.");const p=R("strides",e,t,n),m=cm(e,t,n),y=R("dataFormat",e,t,n).toUpperCase(),b=R("dilations",e,t,n),[w,I]=R("args",e,t,n);return{stride:p,pad:m,dataFormat:y,dilations:b,biasArg:w,preluArg:I,activationFunc:i}}const PH=(e,t,n)=>{switch(e.op){case"Conv1D":{const s=R("stride",e,t,n),i=R("pad",e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilation",e,t,n);return[Hd(R("x",e,t,n),R("filter",e,t,n),s,i,o,a)]}case"Conv2D":{const s=R("strides",e,t,n),i=cm(e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilations",e,t,n);return[er(R("x",e,t,n),R("filter",e,t,n),[s[1],s[2]],i,o,[a[1],a[2]])]}case"_FusedConv2D":{const{stride:s,pad:i,dataFormat:o,dilations:a,biasArg:c,preluArg:u,activationFunc:p}=QN(e,t,n);return[Hb({x:R("x",e,t,n),filter:R("filter",e,t,n),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[a[1],a[2]],bias:c,activation:p,preluActivationWeights:u})]}case"FusedDepthwiseConv2dNative":{const{stride:s,pad:i,dataFormat:o,dilations:a,biasArg:c,preluArg:u,activationFunc:p}=QN(e,t,n);return[YA({x:R("x",e,t,n),filter:R("filter",e,t,n),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[a[1],a[2]],bias:c,activation:p,preluActivationWeights:u})]}case"Conv2DBackpropInput":case"Conv2dTranspose":{const s=R("outputShape",e,t,n),i=R("strides",e,t,n),o=cm(e,t,n);return[Yd(R("x",e,t,n),R("filter",e,t,n),s,[i[1],i[2]],o)]}case"DepthwiseConv2dNative":case"DepthwiseConv2d":{const s=R("strides",e,t,n),i=cm(e,t,n),o=R("dilations",e,t,n),a=R("dataFormat",e,t,n).toUpperCase();return[Oo(R("input",e,t,n),R("filter",e,t,n),[s[1],s[2]],i,a,[o[1],o[2]])]}case"Conv3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilations",e,t,n);return[yb(R("x",e,t,n),R("filter",e,t,n),[s[1],s[2],s[3]],i,o,[a[1],a[2],a[3]])]}case"AvgPool":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[oh(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPool":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[mh(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPoolWithArgmax":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n),a=R("includeBatchInIndex",e,t,n),{result:c,indexes:u}=bA(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i,a);return[c,u]}case"AvgPool3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[pb(R("x",e,t,n),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"MaxPool3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[Nb(R("x",e,t,n),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"Dilation2D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("dilations",e,t,n),a=s[1],c=s[2],u=o[1],p=o[2];return[wb(R("x",e,t,n),R("filter",e,t,n),[a,c],i,[u,p],"NHWC")]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Pee="convolution";const zH=(e,t,n)=>{switch(e.op){case"Fill":{const s=R("shape",e,t,n),i=R("dtype",e,t,n),o=R("value",e,t,n);return[hh(s,o,i)]}case"LinSpace":{const s=R("start",e,t,n),i=R("stop",e,t,n),o=R("num",e,t,n);return[fA(s,i,o)]}case"Multinomial":{const s=R("logits",e,t,n),i=R("numSamples",e,t,n),o=R("seed",e,t,n);return[wA(s,i,o)]}case"OneHot":{const s=R("indices",e,t,n),i=R("depth",e,t,n),o=R("onValue",e,t,n),a=R("offValue",e,t,n);return[vo(s,i,o,a)]}case"Ones":return[si(R("shape",e,t,n),R("dtype",e,t,n))];case"OnesLike":return[Dn(R("x",e,t,n))];case"RandomUniform":return[$o(R("shape",e,t,n),R("minval",e,t,n),R("maxval",e,t,n),R("dtype",e,t,n))];case"Range":{const s=R("start",e,t,n),i=R("stop",e,t,n),o=R("step",e,t,n);return[yh(s,i,o,R("dtype",e,t,n))]}case"TruncatedNormal":{const s=R("shape",e,t,n),i=R("mean",e,t,n),o=R("stdDev",e,t,n),a=R("seed",e,t,n);return[Ih(s,i,o,R("dtype",e,t,n),a)]}case"Zeros":return[ct(R("shape",e,t,n),R("dtype",e,t,n))];case"ZerosLike":return[et(R("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},zee="creation";function QL(e,t,n){const s=R("boxes",e,t,n),i=R("scores",e,t,n),o=R("maxOutputSize",e,t,n),a=R("iouThreshold",e,t,n),c=R("scoreThreshold",e,t,n),u=R("softNmsSigma",e,t,n);return{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:u}}const GH=async(e,t,n)=>{switch(e.op){case"NonMaxSuppressionV5":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:u}=QL(e,t,n),p=await Vr.nonMaxSuppressionWithScoreAsync(s,i,o,a,c,u);return[p.selectedIndices,p.selectedScores]}case"NonMaxSuppressionV4":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=QL(e,t,n),u=R("padToMaxOutputSize",e,t,n),p=await Vr.nonMaxSuppressionPaddedAsync(s,i,o,a,c,u);return[p.selectedIndices,p.validOutputs]}case"NonMaxSuppressionV3":case"NonMaxSuppressionV2":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=QL(e,t,n);return[await Vr.nonMaxSuppressionAsync(s,i,o,a,c)]}case"Where":{const s=ve(R("condition",e,t,n),"bool"),i=[await Pb(s)];return s.dispose(),i}case"ListDiff":return SA(R("x",e,t,n),R("y",e,t,n));default:throw TypeError(`Node type ${e.op} is not implemented`)}},Gee="dynamic";const VH=(e,t,n)=>{switch(e.op){case"TopKV2":{const s=R("x",e,t,n),i=R("k",e,t,n),o=R("sorted",e,t,n),a=Bb(s,i,o);return[a.values,a.indices]}case"Unique":{const s=R("x",e,t,n),i=up(s);return[i.values,i.indices]}case"UniqueV2":{const s=R("x",e,t,n),i=R("axis",e,t,n),o=up(s,i);return[o.values,o.indices]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Vee="evaluation";const HH=(e,t,n)=>{switch(e.op){case"Const":return t[e.name];case"PlaceholderWithDefault":const s=R("default",e,t,n);return[Qn(e.name,t,n)||s];case"Placeholder":return[Qn(e.name,t,n)];case"Identity":case"StopGradient":case"FakeQuantWithMinMaxVars":{const p=R("x",e,t,n);return[hr(p)]}case"IdentityN":return R("x",e,t,n).map(p=>hr(p));case"Snapshot":const i=R("x",e,t,n);return[hr(i)];case"Shape":return[rs(R("x",e,t,n).shape,"int32")];case"ShapeN":return R("x",e,t,n).map(p=>rs(p.shape));case"Size":return[Ne(R("x",e,t,n).size,"int32")];case"Rank":return[Ne(R("x",e,t,n).rank,"int32")];case"NoOp":return[Ne(1)];case"Print":const o=R("x",e,t,n),a=R("data",e,t,n),c=R("message",e,t,n),u=R("summarize",e,t,n);console.warn("The graph has a tf.print() operation,usually used for debugging, which slows down performance."),console.log(c);for(let p=0;p{switch(e.op){case"ResizeBilinear":{const s=R("images",e,t,n),i=R("size",e,t,n),o=R("alignCorners",e,t,n);return[Vr.resizeBilinear(s,[i[0],i[1]],o)]}case"ResizeNearestNeighbor":{const s=R("images",e,t,n),i=R("size",e,t,n),o=R("alignCorners",e,t,n);return[Vr.resizeNearestNeighbor(s,[i[0],i[1]],o)]}case"CropAndResize":{const s=R("image",e,t,n),i=R("boxes",e,t,n),o=R("boxInd",e,t,n),a=R("cropSize",e,t,n),c=R("method",e,t,n),u=R("extrapolationValue",e,t,n);return[Vr.cropAndResize(s,i,o,a,c,u)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Yee="image";const qH=(e,t,n)=>{switch(e.op){case"Equal":return[ni(R("a",e,t,n),R("b",e,t,n))];case"NotEqual":return[Pr(R("a",e,t,n),R("b",e,t,n))];case"Greater":return[Ts(R("a",e,t,n),R("b",e,t,n))];case"GreaterEqual":return[tr(R("a",e,t,n),R("b",e,t,n))];case"Less":return[dh(R("a",e,t,n),R("b",e,t,n))];case"LessEqual":return[Mr(R("a",e,t,n),R("b",e,t,n))];case"LogicalAnd":return[Bs(R("a",e,t,n),R("b",e,t,n))];case"LogicalNot":return[ph(R("a",e,t,n))];case"LogicalOr":return[ep(R("a",e,t,n),R("b",e,t,n))];case"Select":case"SelectV2":return[$n(R("condition",e,t,n),R("a",e,t,n),R("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},qee="logical";const jH=(e,t,n)=>{switch(e.op){case"BatchMatMul":case"BatchMatMulV2":case"MatMul":return[at(R("a",e,t,n),R("b",e,t,n),R("transposeA",e,t,n),R("transposeB",e,t,n))];case"Transpose":return[Pe(R("x",e,t,n),R("perm",e,t,n))];case"_FusedMatMul":const[s,i]=R("fusedOps",e,t,n),o=s==="biasadd",a=i==="prelu",c=R("numArgs",e,t,n);if(o){if(a&&c!==2)throw new Error("Fused MatMul with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!a&&c!==1)throw new Error("Fused MatMul with BiasAdd must have one extra argument: bias.")}const[u,p]=R("args",e,t,n);return[bp({a:R("a",e,t,n),b:R("b",e,t,n),transposeA:R("transposeA",e,t,n),transposeB:R("transposeB",e,t,n),bias:u,activation:i,preluActivationWeights:p})];default:throw TypeError(`Node type ${e.op} is not implemented`)}},jee="matrices";const KH=(e,t,n)=>{switch(e.op){case"FusedBatchNorm":case"FusedBatchNormV2":return[Ro(R("x",e,t,n),R("mean",e,t,n),R("variance",e,t,n),R("offset",e,t,n),R("scale",e,t,n),R("epsilon",e,t,n))];case"FusedBatchNormV3":return[Ro(R("x",e,t,n),R("mean",e,t,n),R("variance",e,t,n),R("offset",e,t,n),R("scale",e,t,n),R("epsilon",e,t,n))];case"LRN":return[Tb(R("x",e,t,n),R("radius",e,t,n),R("bias",e,t,n),R("alpha",e,t,n),R("beta",e,t,n))];case"Softmax":return[Uo(R("x",e,t,n))];case"LogSoftmax":return[Qd(R("x",e,t,n))];case"SparseToDense":return[zb(R("sparseIndices",e,t,n),R("outputShape",e,t,n),R("sparseValues",e,t,n),R("defaultValue",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Kee="normalization";const XH=(e,t,n)=>{switch(e.op){case"Max":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Xn(R("x",e,t,n),s,i)]}case"Mean":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[zt(R("x",e,t,n),s,i)]}case"Min":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Ha(R("x",e,t,n),s,i)]}case"Sum":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Ue(R("x",e,t,n),s,i)]}case"All":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Pd(R("x",e,t,n),s,i)]}case"Any":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[th(R("x",e,t,n),s,i)]}case"ArgMax":{const s=R("axis",e,t,n);return[nh(R("x",e,t,n),s)]}case"ArgMin":{const s=R("axis",e,t,n);return[rb(R("x",e,t,n),s)]}case"Prod":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[sp(R("x",e,t,n),s,i)]}case"Cumsum":{const s=R("axis",e,t,n),i=R("exclusive",e,t,n),o=R("reverse",e,t,n);return[jd(R("x",e,t,n),s,i,o)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Xee="reduction";const JH=(e,t,n)=>{switch(e.op){case"ConcatV2":case"Concat":{const s=R("n",e,t,n),i=R("axis",e,t,n);let o=R("tensors",e,t,n);return o=o.slice(0,s),[Mt(o,i)]}case"GatherV2":case"Gather":{const s=R("axis",e,t,n),i=R("x",e,t,n),o=R("indices",e,t,n);return[za(i,ve(o,"int32"),s)]}case"ReverseV2":case"Reverse":{const s=R("axis",e,t,n),i=R("x",e,t,n);return[As(i,s)]}case"Slice":{const s=R("begin",e,t,n),i=R("size",e,t,n);return[st(R("x",e,t,n),s,i)]}case"StridedSlice":{const s=R("begin",e,t,n),i=R("end",e,t,n),o=R("strides",e,t,n),a=R("beginMask",e,t,n),c=R("endMask",e,t,n),u=R("ellipsisMask",e,t,n),p=R("newAxisMask",e,t,n),m=R("shrinkAxisMask",e,t,n),y=R("x",e,t,n);return[$b(y,s,i,o,a,c,u,p,m)]}case"Pack":return ee(()=>{const s=R("axis",e,t,n),i=R("tensors",e,t,n),o=i[0].shape,a=zr(i[0]).shape,c=i.map(u=>{const p=ot(u.shape,o);if(!p&&!ot(zr(u).shape,a))throw new Error("the input tensors shape does not match");return p?u:K(u,o)});return[as(c,s)]});case"Unpack":{const s=R("axis",e,t,n),i=R("tensor",e,t,n);return _i(i,s)}case"Tile":{const s=R("reps",e,t,n);return[Br(R("x",e,t,n),s)]}case"Split":case"SplitV":{const s=R("axis",e,t,n),i=R("numOrSizeSplits",e,t,n),o=R("x",e,t,n);return os(o,i,s)}case"ScatterNd":{const s=R("indices",e,t,n),i=R("values",e,t,n),o=R("shape",e,t,n);return[MA(s,i,o)]}case"GatherNd":{const s=R("x",e,t,n),i=R("indices",e,t,n);return[PA(s,i)]}case"SparseToDense":{const s=R("sparseIndices",e,t,n),i=R("outputShape",e,t,n),o=R("sparseValues",e,t,n),a=R("defaultValue",e,t,n);return[zb(s,o,i,o.dtype===a.dtype?a:ve(a,o.dtype))]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Jee="slice_join";const ZH=(e,t,n)=>{switch(e.op){case"FFT":return[wh(R("x",e,t,n))];case"IFFT":return[ja(R("x",e,t,n))];case"RFFT":return[Lh(R("x",e,t,n))];case"IRFFT":return[hp(R("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Zee="spectral";const QH=(e,t,n)=>{switch(e.op){case"Cast":return[ve(R("x",e,t,n),R("dtype",e,t,n))];case"ExpandDims":{const s=R("axis",e,t,n);return[Kn(R("x",e,t,n),s)]}case"Squeeze":{const s=R("axis",e,t,n);return[zr(R("x",e,t,n),s)]}case"Reshape":return[K(R("x",e,t,n),R("shape",e,t,n))];case"PadV2":case"Pad":return[ki(R("x",e,t,n),R("padding",e,t,n),R("constantValue",e,t,n))];case"SpaceToBatchND":{const s=R("blockShape",e,t,n),i=R("paddings",e,t,n);return[fh(R("x",e,t,n),s,i)]}case"BatchToSpaceND":{const s=R("blockShape",e,t,n),i=R("crops",e,t,n);return[ah(R("x",e,t,n),s,i)]}case"DepthToSpace":{const s=R("blockSize",e,t,n),i=R("dataFormat",e,t,n).toUpperCase();return[bb(R("x",e,t,n),s,i)]}case"BroadcastTo":return[ch(R("x",e,t,n),R("shape",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Qee="transformation";function e0(e,t,n){const s=((i,o,a)=>{switch(i.category){case"arithmetic":return ee(()=>DH(i,o,a));case"basic_math":return ee(()=>kH(i,o,a));case"control":return MH(i,o,a);case"convolution":return ee(()=>PH(i,o,a));case"creation":return ee(()=>zH(i,o,a));case"dynamic":return GH(i,o,a);case"evaluation":return ee(()=>VH(i,o,a));case"image":return ee(()=>YH(i,o,a));case"graph":return ee(()=>HH(i,o,a));case"logical":return ee(()=>qH(i,o,a));case"matrices":return ee(()=>jH(i,o,a));case"normalization":return ee(()=>KH(i,o,a));case"reduction":return ee(()=>XH(i,o,a));case"slice_join":return ee(()=>JH(i,o,a));case"spectral":return ee(()=>ZH(i,o,a));case"transformation":return ee(()=>QH(i,o,a));case"custom":const c=jN(i.op);if(c&&c.customExecutor)return c.customExecutor(new EH(i,o,a));throw TypeError(`Custom op ${i.op} is not registered.`);default:throw TypeError(`Unknown op '${i.op}'. File an issue at https://github.com/tensorflow/tfjs/issues so we can add it, or register a custom execution with tf.registerOp()`)}})(e,t,n);return s instanceof Promise?s.then(i=>[].concat(i)):[].concat(s)}class t0{constructor(e={},t={},n={},s={}){this.weightMap=e,this.tensorArrayMap=t,this.tensorListMap=n,this.functionMap=s,this.rootContext={id:0,frameName:"",iterationId:0},this.contexts=[this.rootContext],this.lastId=0,this.generateCurrentContextIds()}newFrame(e,t){return{id:e,frameName:t,iterationId:0}}set currentContext(e){this.contexts!==e&&(this.contexts=e,this.generateCurrentContextIds())}get currentContext(){return this.contexts}get currentContextId(){return this._currentContextIds[0]}get currentContextIds(){return this._currentContextIds}generateCurrentContextIds(){const e=[];for(let t=0;tt.id===0&&t.iterationId===0?"":`${t.frameName}-${t.iterationId}`).join("/"):""}enterFrame(e){this.contexts&&(this.lastId++,this.contexts=this.contexts.slice(),this.contexts.push(this.newFrame(this.lastId,e)),this._currentContextIds.unshift(this.contextIdforContexts(this.contexts)))}exitFrame(){if(this.contexts&&this.contexts.length>1)this.contexts=this.contexts.slice(),this.contexts.splice(-1),this.currentContextIds.shift();else throw new Error("Cannot exit frame, the context is empty")}nextIteration(){if(this.contexts&&this.contexts.length>0){this.contexts=this.contexts.slice(),this.lastId++;const e=Object.assign({},this.contexts[this.contexts.length-1]);e.iterationId+=1,e.id=this.lastId,this.contexts.splice(-1,1,e),this._currentContextIds.splice(0,1,this.contextIdforContexts(this.contexts))}else throw new Error("Cannot increase frame iteration, the context is empty")}getWeight(e){return this.weightMap[e]}addTensorArray(e){this.tensorArrayMap[e.id]=e}getTensorArray(e){return this.tensorArrayMap[e]}addTensorList(e){this.tensorListMap[e.id]=e}getTensorList(e){return this.tensorListMap[e]}dispose(e){for(const t in this.tensorArrayMap)this.tensorArrayMap[t].clearAndClose(e);for(const t in this.tensorListMap)this.tensorListMap[t].clearAndClose(e)}}function n0(e,t,n,s){const i=new Set,o=[];let a=null,c=null;const u=new Set,p=Object.keys(e).map(b=>ls(b)[0]);let m=[];s!=null&&(m=s.map(b=>ls(b.name)[0]));const y=[...t];for(;y.length>0;){const b=y.pop();if((s0(b)||sY(b))&&(a==null&&(a=b,c=a.children.map(w=>w.name).filter(w=>i.has(w)))),i.add(b.name),n[b.name]!=null)continue;if(p.indexOf(b.name)!==-1)continue;if(m.indexOf(b.name)!==-1)continue;if(b.inputs.length===0){o.push(b.name);continue}b.inputs.forEach(w=>{if(u.has(w.name))return;u.add(w.name),y.push(w)})}return{inputs:e,outputs:t,usedNodes:i,missingInputs:o,dynamicNode:a,syncInputs:c}}function eY(e,t,n){const{usedNodes:s,inputs:i}=n,o=[],a=Object.keys(i).map(m=>ls(m)[0]).map(m=>e.nodes[m]),c=e.initNodes;a.forEach(m=>{s.has(m.name)&&o.push(m)}),e.weights.forEach(m=>{s.has(m.name)&&o.push(m)}),c!=null&&c.forEach(m=>{s.has(m.name)&&o.push(m)});const u=new Set,p=[];for(;o.length>0;){const m=o.pop();u.add(m.name),t[m.name]||p.push(m),m.children.forEach(y=>{!u.has(y.name)&&s.has(y.name)&&y.inputs.every(b=>u.has(b.name))&&o.push(y)})}return p}const tY=["Switch","Merge","Enter","Exit","NextIteration","StatelessIf","StatelessWhile","if","While"],nY=["NonMaxSuppressionV2","NonMaxSuppressionV3","NonMaxSuppressionV5","Where"];function s0(e){return tY.indexOf(e.op)>=0}function sY(e){return nY.indexOf(e.op)>=0}class eS{constructor(e,t){this.graph=e,this.parent=t,this.compiledMap=new Map,this._weightMap={},this.SEPERATOR=",",this._functions={},this._functionExecutorMap={},this._outputs=e.outputs,this._inputs=e.inputs,this._initNodes=e.initNodes,this._signature=e.signature,this._functions=e.functions,e.functions!=null&&Object.keys(e.functions).forEach(n=>{this._functionExecutorMap[n]=new eS(e.functions[n],this)})}get weightIds(){return this.parent?this.parent.weightIds:this._weightIds}get functionExecutorMap(){return this.parent?this.parent.functionExecutorMap:this._functionExecutorMap}get weightMap(){return this.parent?this.parent.weightMap:this._weightMap}set weightMap(e){const t=Object.keys(e).map(n=>e[n].map(s=>s.id));this._weightIds=[].concat(...t),this._weightMap=e}get inputs(){return this._inputs.map(e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0}))}get outputs(){return this._outputs.map(e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0}))}get inputNodes(){return this._inputs.map(e=>e.signatureKey||e.name)}get outputNodes(){return this._outputs.map(e=>{const t=e.signatureKey||e.name;return e.defaultOutput?`${t}:${e.defaultOutput}`:t})}get functions(){return Object.keys(this._functions).reduce((e,t)=>(e[t]=this._functions[t].signature,e),{})}getCompilationKey(e,t){const n=e.map(i=>i.name).sort(),s=t.map(i=>i.name).sort();return n.join(this.SEPERATOR)+"--"+s.join(this.SEPERATOR)}compile(e,t){const n=n0(e,t,this.weightMap,this._initNodes),{missingInputs:s,dynamicNode:i,syncInputs:o}=n;if(i!=null)throw new Error(`This execution contains the node '${i.name}', which has the dynamic op '${i.op}'. Please use model.executeAsync() instead. Alternatively, to avoid the dynamic ops, specify the inputs [${o}]`);if(s.length>0){const a=t.map(u=>u.name),c=Object.keys(e);throw new Error(`Cannot compute the outputs [${a}] from the provided inputs [${c}]. Missing the following inputs: [${s}]`)}return eY(this.graph,this.weightMap,n)}execute(e,t){e=this.mapInputs(e);const n=Object.keys(e).sort();this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t);const s=n.map(m=>this.graph.nodes[ls(m)[0]]),i=t.map(m=>ls(m)[0]);let o=i.map(m=>this.graph.nodes[m]);o.length===0&&(o=this._outputs);const a=this.getCompilationKey(s,o);let c=this.compiledMap.get(a);c==null&&(c=this.compile(e,o),this.compiledMap.set(a,c));const u={},p={};return ee(()=>{const m=new t0(this.weightMap,u,p,this.functionExecutorMap),y=Object.assign({},this.weightMap);Object.keys(e).forEach(I=>{const[T,v]=ls(I),N=[];N[v]=e[I],y[T]=N});const b=this.getFrozenTensorIds(y),w={};for(let I=0;IQn(I,y,m))})}getFrozenTensorIds(e){const t=[].concat.apply([],Object.keys(e).map(n=>e[n]).map(n=>n.map(s=>s.id)));return new Set(t)}checkTensorForDisposal(e,t,n,s,i,o,a){if(t.category==="control"||o.indexOf(e)!==-1)return;n[e].forEach(c=>{c!=null&&(a[c.id]=(a[c.id]||0)+t.children.length)}),t.inputs.forEach(c=>{if(c.category!=="control"){const u=JV(c.name,n,s);u!=null&&u.forEach(p=>{if(p&&!i.has(p.id)){const m=a[p.id];m===1?(p.dispose(),delete a[p.id]):m!=null&&a[p.id]--}})}})}async executeAsync(e,t){return this._executeAsync(e,t)}async _executeAsync(e,t,n=!1,s={},i={}){n||(e=this.mapInputs(e),this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t));const o=new t0(this.weightMap,s,i,this.functionExecutorMap),a=await this.executeWithControlFlow(e,o,t,n),c=t.map(y=>Qn(y,a,o)),u=c.map(y=>y.id),p=Object.keys(e).map(y=>e[y].id),m=new Set([...u,...p,...this.weightIds]);return Object.keys(a).forEach(y=>{const b=a[y];b.forEach(w=>{w&&!w.isDisposed&&!m.has(w.id)&&w.dispose()})}),this.parent==null&&o.dispose(m),c}async executeFunctionAsync(e,t,n){const s=e.reduce((i,o,a)=>(i[this.inputs[a].name]=o,i),{});return this._executeAsync(s,this.outputNodes,!0,t,n)}async executeWithControlFlow(e,t,n,s){const i=Object.keys(e),o=i.map(E=>this.graph.nodes[ls(E)[0]]),a=n.map(E=>ls(E)[0]),c=a.map(E=>this.graph.nodes[E]),{usedNodes:u,missingInputs:p,dynamicNode:m,syncInputs:y}=n0(e,c,this.weightMap),b=[...o,...this.graph.weights].map(E=>({node:E,contexts:t.currentContext})),w=Object.assign({},this.weightMap);Object.keys(e).forEach(E=>{const[D,F]=ls(E),_=[];_[F]=e[E],w[D]=_});const I={},T=this.getFrozenTensorIds(w),v={};for(;b.length>0;){const E=this.processStack(o,b,t,w,v,T,a,I,u);await Promise.all(E)}m==null&&!s&&console.warn("This model execution did not contain any nodes with control flow or dynamic output shapes. You can use model.execute() instead.");const N=c.filter(E=>!s0(E)&&!Qn(E.name,w,t)).map(E=>E.name);if(N.length>0){let E="";throw m!=null&&(E=`Alternatively, to avoid the dynamic ops, use model.execute() and specify the inputs [${y}]`),new Error(`Cannot compute the outputs [${N}] from the provided inputs [${i}]. Consider providing the following inputs: [${p}]. ${E}`)}return w}processStack(e,t,n,s,i,o,a,c,u){const p=[];for(;t.length>0;){const m=t.pop();n.currentContext=m.contexts;let y="";if(m.node.op==="Enter"&&R("isConstant",m.node,s,n)&&([y]=lr(m.node.name,n)),e.indexOf(m.node)===-1){const b=e0(m.node,s,n);y||([y]=lr(m.node.name,n));const w=n.currentContext;b instanceof Promise?p.push(b.then(I=>(s[y]=I,n.currentContext=w,this.checkTensorForDisposal(y,m.node,s,n,o,a,c),this.processChildNodes(m.node,t,n,s,i,u),I))):(s[y]=b,this.checkTensorForDisposal(y,m.node,s,n,o,a,c),this.processChildNodes(m.node,t,n,s,i,u))}else this.processChildNodes(m.node,t,n,s,i,u)}return p}processChildNodes(e,t,n,s,i,o){e.children.forEach(a=>{const[c]=lr(a.name,n);if(i[c]||!o.has(a.name))return;a.op==="Merge"?a.inputNames.some(u=>!!Qn(u,s,n))&&(i[c]=!0,t.push({contexts:n.currentContext,node:a})):a.inputNames.every(u=>!!Qn(u,s,n))&&(i[c]=!0,t.push({contexts:n.currentContext,node:a}))})}dispose(){Object.keys(this.weightMap).forEach(e=>this.weightMap[e].forEach(t=>t.dispose()))}checkInputShapeAndType(e){Object.keys(e).forEach(t=>{const n=e[t],[s]=ls(t),i=this.graph.nodes[s];if(i.attrParams.shape&&i.attrParams.shape.value){const o=i.attrParams.shape.value,a=o.length===n.shape.length&&n.shape.every((c,u)=>o[u]===-1||o[u]===c);k(a,()=>`The shape of dict['${i.name}'] provided in model.execute(dict) must be [${o}], but was [${n.shape}]`)}i.attrParams.dtype&&i.attrParams.dtype.value&&k(n.dtype===i.attrParams.dtype.value,()=>`The dtype of dict['${i.name}'] provided in model.execute(dict) must be ${i.attrParams.dtype.value}, but was ${n.dtype}`)})}mapInputs(e){const t={};for(const n in e)if(this._signature!=null&&this._signature.inputs!=null&&this._signature.inputs[n]!=null){const s=this._signature.inputs[n];t[s.name]=e[n]}else t[n]=e[n];return t}checkInputs(e){const t=Object.keys(e).filter(n=>{const[s]=ls(n);return this.graph.nodes[s]==null});if(t.length>0)throw new Error(`The dict provided in model.execute(dict) has keys: [${t}] that are not part of graph`)}mapOutputs(e){return e.map(t=>{if(this._signature!=null&&this._signature.outputs!=null&&this._signature.outputs[t]!=null){const n=this._signature.outputs[t];return n.name}return t},{})}checkOutputs(e){e.forEach(t=>{const[n]=ls(t);if(!this.graph.nodes[n])throw new Error(`The output '${t}' is not found in the graph`)})}}const iY="?tfjs-format=file",rY="model.json";class i0{constructor(e,t={}){this.modelUrl=e,this.loadOptions=t,this.version="n/a",t==null&&(this.loadOptions={})}get modelVersion(){return this.version}get inputNodes(){return this.executor.inputNodes}get outputNodes(){return this.executor.outputNodes}get inputs(){return this.executor.inputs}get outputs(){return this.executor.outputs}get weights(){return this.executor.weightMap}findIOHandler(){const e=this.modelUrl;if(e.load!=null)this.handler=e;else if(this.loadOptions.requestInit!=null)this.handler=kd(e,this.loadOptions);else{const t=By(e,this.loadOptions);if(t.length===0)t.push(kd(e,this.loadOptions));else if(t.length>1)throw new Error(`Found more than one (${t.length}) load handlers for URL '${[e]}'`);this.handler=t[0]}}async load(){if(this.findIOHandler(),this.handler.load==null)throw new Error("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const e=await this.handler.load();return this.loadSync(e)}loadSync(e){this.artifacts=e;const t=this.artifacts.modelTopology;let n={};this.artifacts.userDefinedMetadata!=null&&(n=this.artifacts.userDefinedMetadata.signature),this.version=`${t.versions.producer}.${t.versions.minConsumer}`;const s=Rd(this.artifacts.weightData,this.artifacts.weightSpecs);if(this.executor=new eS(KN.Instance.transformGraph(t,n)),this.executor.weightMap=this.convertTensorMapToTensorsMap(s),e.modelInitializer!=null){const i=KN.Instance.transformGraph(e.modelInitializer);this.initializer=new eS(i),this.initializer.weightMap=this.executor.weightMap,this.initializer.execute({},[])}return!0}async save(e,t){if(typeof e=="string"){const n=Uy(e);if(n.length===0)throw new Error(`Cannot find any save handlers for URL '${e}'`);if(n.length>1)throw new Error(`Found more than one (${n.length}) save handlers for URL '${e}'`);e=n[0]}if(e.save==null)throw new Error("GraphModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");return e.save(this.artifacts)}predict(e,t){return this.execute(e,this.outputNodes)}normalizeInputs(e){if(!(e instanceof Q)&&!Array.isArray(e))return e;if(e=Array.isArray(e)?e:[e],e.length!==this.inputNodes.length)throw new Error(`Input tensor count mismatch,the graph model has ${this.inputNodes.length} placeholders, while there are ${e.length} input tensors.`);return this.inputNodes.reduce((t,n,s)=>(t[n]=e[s],t),{})}normalizeOutputs(e){return e=e||this.outputNodes,Array.isArray(e)?e:[e]}execute(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=this.executor.execute(e,t);return n.length>1?n:n[0]}async executeAsync(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=await this.executor.executeAsync(e,t);return n.length>1?n:n[0]}convertTensorMapToTensorsMap(e){return Object.keys(e).reduce((t,n)=>(t[n]=[e[n]],t),{})}dispose(){this.executor.dispose(),this.initializer&&this.initializer.dispose()}}async function oY(e,t={}){if(e==null)throw new Error("modelUrl in loadGraphModel() cannot be null. Please provide a url or an IOHandler that loads the model");t==null&&(t={}),t.fromTFHub&&(e.load==null&&(e.endsWith("/")||(e=e+"/"),e=`${e}${rY}${iY}`));const n=new i0(e,t);return await n.load(),n}const r0="2.6.0";function aY(e,t){return lm(e,t)}function lm(e,t,n=new Map,s=new Set){if(e==null)return null;if(s.has(e))throw new Error("Circular references are not supported.");if(n.has(e))return n.get(e);const i=t(e);if(i.recurse&&i.value!==null)throw new Error("A deep map function may not return both a value and recurse=true.");if(i.recurse)if(lc(e)){const o=Array.isArray(e)?[]:{};s.add(e);for(const a in e){const c=e[a],u=lm(c,t,n,s);o[a]=u}return s.delete(e),o}else throw new Error(`Can't recurse into non-iterable type: ${e}`);else return n.set(e,i.value),i.value}function cY(e,t=a0){return o0(e,t)}function o0(e,t,n=new Set){const s=e[0];if(n.has(s))throw new Error("Circular references are not supported.");const i=t(e);if(i.recurse&&i.value!==null)throw new Error("A deep zip function may not return both a value and recurse=true.");if(i.recurse)if(lc(s)){const o=Array.isArray(s)?[]:{};n.add(s);for(const a in s){const c=e.map(p=>p[a]),u=o0(c,t,n);o[a]=u}return n.delete(s),o}else throw new Error(`Can't recurse into non-iterable type: ${s}`);else return i.value}function a0(e){return e===null?null:lc(e[0])?{value:null,recurse:!0}:{value:e,recurse:!1}}async function c0(e,t){const n=new Map;lm(e,t,n);for(const i of Array.from(n.keys())){const o=n.get(i);if(o instanceof Promise){const a=await o;n.set(i,a)}}const s=lm(e,t,n);return s}function lc(e){return e!=null&&!ArrayBuffer.isView(e)&&(Array.isArray(e)||typeof e=="object"&&!(e instanceof Q))}function lY(e){return e==null||hY(e)||Array.isArray(e)||typeof e=="object"&&e instanceof Q||Ln(e)}function hY(e){return e===null||typeof e!="object"&&typeof e!="function"}function uY(e){return aY(e,dY)}function dY(e){return e instanceof Q?{value:e.clone(),recurse:!1}:lc(e)?{value:null,recurse:!0}:{value:e,recurse:!1}}class l0{constructor(e){if(this.capacity=e,this.begin=0,this.end=0,e==null)throw new RangeError("Can't create a ring buffer of unknown capacity.");if(e<1)throw new RangeError("Can't create ring buffer of capacity < 1.");this.data=new Array(e),this.doubledCapacity=2*e}wrap(e){for(;e<0;)e+=this.doubledCapacity;return e%this.doubledCapacity}get(e){if(e<0)throw new RangeError("Can't get item at a negative index.");return this.data[e%this.capacity]}set(e,t){if(e<0)throw new RangeError("Can't set item at a negative index.");this.data[e%this.capacity]=t}length(){let e=this.end-this.begin;return e<0&&(e=this.doubledCapacity+e),e}isFull(){return this.length()===this.capacity}isEmpty(){return this.length()===0}push(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.set(this.end,e),this.end=this.wrap(this.end+1)}pushAll(e){for(const t of e)this.push(t)}pop(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");this.end=this.wrap(this.end-1);const e=this.get(this.end);return this.set(this.end,void 0),e}unshift(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.begin=this.wrap(this.begin-1),this.set(this.begin,e)}shift(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const e=this.get(this.begin);return this.set(this.begin,void 0),this.begin=this.wrap(this.begin+1),e}shuffleExcise(e){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const t=this.wrap(this.begin+e),n=this.get(t);return this.set(t,this.pop()),n}}class tS extends l0{constructor(){super(tS.INITIAL_CAPACITY)}isFull(){return!1}push(e){super.isFull()&&this.expand(),super.push(e)}unshift(e){super.isFull()&&this.expand(),super.unshift(e)}expand(){const e=this.capacity*2,t=new Array(e),n=this.length();for(let s=0;s({value:t++,done:!1}))}function nu(e){return new fY(e)}function u0(e,t){return new p0(e,t)}function tte(e,t,n){return u0(nu(e).take(t),n)}function pY(e,t=Qr.FAIL){return new TY(e,t)}class gn{async toArray(){const e=[];let t=await this.next();for(;!t.done;)e.push(t.value),t=await this.next();return e}async toArrayForTest(){const e=this.prefetch(100),t=[];let n=await e.next();for(;!n.done;)t.push(n.value),n=await e.next();return t}async resolveFully(){let e=await this.next();for(;!e.done;)e=await this.next()}async resolveWhile(e){let t=await this.next(),n=e(t.value);for(;!t.done&&n;)t=await this.next(),n=e(t.value)}handleErrors(e){return new IY(this,e)}filter(e){return new LY(this,e)}map(e){return new SY(this,e)}mapAsync(e){return new d0(this,e)}serialMapAsync(e){return new d0(this,e).serial()}flatmap(e){return new xY(this,e)}async forEachAsync(e){return this.map(e).resolveFully()}async serialForEach(e){return this.serialMapAsync(e).resolveWhile(t=>t===!0)}rowMajorBatch(e,t=!0){return new wY(this,e,t)}columnMajorBatch(e,t=!0,n=a0){const s=this.rowMajorBatch(e,t);return s.map(i=>cY(i,n))}concatenate(e,t){return new p0(h0([this,e]),t)}take(e){return e<0||e==null?this:new bY(this,e)}skip(e){return e<0||e==null?this:new yY(this,e)}prefetch(e){return new m0(this,e)}shuffle(e,t){return new AY(this,e,t)}serial(){return new gY(this)}}class mY extends gn{constructor(e){super();this.items=e,this.trav=0}summary(){return`Array of ${this.items.length} items`}async next(){if(this.trav>=this.items.length)return{value:null,done:!0};const e=this.items[this.trav];return this.trav++,{value:uY(e),done:!1}}}class fY extends gn{constructor(e){super();this.nextFn=e}summary(){return"Function call"}async next(){try{return this.nextFn()}catch(e){throw e.message=`Error thrown while iterating through a dataset: ${e.message}`,e}}}class gY extends gn{constructor(e){super();this.upstream=e,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Serial`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){return this.upstream.next()}}class yY extends gn{constructor(e,t){super();this.upstream=e,this.maxCount=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Skip`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.count++ Take`}async next(){return this.count++>=this.maxCount?{value:null,done:!0}:this.upstream.next()}}class wY extends gn{constructor(e,t,n=!0){super();this.upstream=e,this.batchSize=t,this.enableSmallLastBatch=n,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> RowMajorBatch`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){const e=[];for(;e.length0?{value:e,done:!1}:{value:null,done:!0};e.push(t.value)}return{value:e,done:!1}}}class LY extends gn{constructor(e,t){super();this.upstream=e,this.predicate=t,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Filter`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;){const e=await this.upstream.next();if(e.done||this.predicate(e.value))return e;qe(e.value)}}}class SY extends gn{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Map`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=Zi(e.value),n=this.transform(e.value),s=Zi(n);for(const i of t)Nd(i,s)||i.dispose();return{value:n,done:!1}}}class IY extends gn{constructor(e,t){super();this.upstream=e,this.handler=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> handleErrors`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;)try{return await this.upstream.next()}catch(e){if(!this.handler(e))return{value:null,done:!0}}}}class d0 extends gn{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> AsyncMap`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=Zi(e.value),n=await this.transform(e.value),s=Zi(n);for(const i of t)Nd(i,s)||i.dispose();return{value:n,done:!1}}}class nS extends gn{constructor(){super();this.outputQueue=new tS,this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.outputQueue.length()===0;)if(!await this.pump())return{value:null,done:!0};return{value:this.outputQueue.shift(),done:!1}}}class xY extends nS{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Flatmap`}async pump(){const e=await this.upstream.next();if(e.done)return!1;const t=Zi(e.value),n=this.transform(e.value),s=Zi(n);this.outputQueue.pushAll(n);for(const i of t)Nd(i,s)||i.dispose();return!0}}class p0 extends gn{constructor(e,t){super();this.baseErrorHandler=t,this.lastRead=null,this.iterator=null,this.moreIterators=e}summary(){const e="TODO: fill in upstream of chained summaries";return`${e} -> Chained`}async next(){return this.lastRead=this.readFromChain(this.lastRead),this.lastRead}async readFromChain(e){if(await e,this.iterator==null){const n=await this.moreIterators.next();if(n.done)return{value:null,done:!0};this.iterator=n.value,this.baseErrorHandler!=null&&(this.iterator=this.iterator.handleErrors(this.baseErrorHandler))}const t=await this.iterator.next();return t.done?(this.iterator=null,this.readFromChain(e)):t}}var Qr;(function(e){e[e.FAIL=0]="FAIL",e[e.SHORTEST=1]="SHORTEST",e[e.LONGEST=2]="LONGEST"})(Qr||(Qr={}));class TY extends gn{constructor(e,t=Qr.FAIL){super();this.iterators=e,this.mismatchMode=t,this.count=0,this.currentPromise=null}summary(){const e="TODO: fill in upstream of zip summaries";return`{${e}} -> Zip`}async nextState(e){await e;let t=0,n=0;function s(o){if(o instanceof gn){const a=o.next();return{value:a.then(c=>(t++,c.done&&n++,c.value)),recurse:!1}}else return{value:null,recurse:!0}}const i=await c0(this.iterators,s);if(t===n)return{value:null,done:!0};if(n>0)switch(this.mismatchMode){case Qr.FAIL:throw new Error(`Zipped streams should have the same length. Mismatched at element ${this.count}.`);case Qr.SHORTEST:return{value:null,done:!0};case Qr.LONGEST:default:}return this.count++,{value:i,done:!1}}async next(){return this.currentPromise=this.nextState(this.currentPromise),this.currentPromise}}class m0 extends gn{constructor(e,t){super();this.upstream=e,this.bufferSize=t,this.buffer=new l0(t)}summary(){return`${this.upstream.summary()} -> Prefetch`}refill(){for(;!this.buffer.isFull();){const e=this.upstream.next();this.buffer.push(e)}}next(){return this.refill(),this.buffer.shift()}}class AY extends m0{constructor(e,t,n){super(e,t);this.upstream=e,this.windowSize=t,this.upstreamExhausted=!1,this.random=qa(n||qn().toString()),this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}randomInt(e){return Math.floor(this.random()*e)}chooseIndex(){return this.randomInt(this.buffer.length())}async serialNext(){for(this.upstreamExhausted||this.refill();!this.buffer.isEmpty();){const e=this.chooseIndex(),t=await this.buffer.shuffleExcise(e);if(t.done)this.upstreamExhausted=!0;else return this.refill(),t}return{value:null,done:!0}}}class hc{constructor(){this.size=null}batch(e,t=!0){const n=this;k(e>0,()=>`batchSize needs to be positive, but it is + ${e}`);let s;return this.size===Infinity||this.size==null?s=this.size:t?s=Math.ceil(this.size/e):s=Math.floor(this.size/e),hs(async()=>(await n.iterator()).columnMajorBatch(e,t,CY),s)}concatenate(e){const t=this;let n;return this.size===Infinity||e.size===Infinity?n=Infinity:this.size!=null&&e.size!=null?n=this.size+e.size:n=null,hs(async()=>(await t.iterator()).concatenate(await e.iterator()),n)}filter(e){const t=this;let n;return this.size===Infinity?n=Infinity:n=null,hs(async()=>(await t.iterator()).filter(s=>ee(()=>e(s))),n)}async forEachAsync(e){return(await this.iterator()).forEachAsync(e)}map(e){const t=this;return hs(async()=>(await t.iterator()).map(n=>ee(()=>e(n))),this.size)}mapAsync(e){const t=this;return hs(async()=>(await t.iterator()).mapAsync(e),this.size)}prefetch(e){if(e==null)throw new RangeError("`Dataset.prefetch()` requires bufferSize to be specified.");const t=this;return hs(async()=>(await t.iterator()).prefetch(e),this.size)}repeat(e){const t=this;let n;return this.size!=null&&e>0?n=this.size*e:e===0?n=0:this.size!=null&&(e===void 0||e<0)?n=Infinity:n=null,hs(async()=>{const s=nu(async()=>({value:await t.iterator(),done:!1}));return u0(s.take(e))},n)}skip(e){const t=this;let n;return this.size!=null&&e>=0&&this.size>=e?n=this.size-e:this.size!=null&&(this.size(await t.iterator()).skip(e),n)}shuffle(e,t,n=!0){if(e==null||e<0)throw this.size==null?new RangeError("`Dataset.shuffle()` requires bufferSize to be specified."):new RangeError(`\`Dataset.shuffle()\` requires bufferSize to be specified. If your data fits in main memory (for regular JS objects), and/or GPU memory (for \`tf.Tensor\`s), consider setting bufferSize to the dataset size (${this.size} elements)`);const s=this,i=qa(t||qn().toString());return hs(async()=>{let o=i.int32();return n&&(o+=i.int32()),(await s.iterator()).shuffle(e,o.toString())},this.size)}take(e){const t=this;let n;return this.size!=null&&this.size>e?n=e:this.size!=null&&this.size<=e?n=this.size:n=null,hs(async()=>(await t.iterator()).take(e),n)}async toArray(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArray()}async toArrayForTest(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArrayForTest()}}hc.MAX_BUFFER_SIZE=1e4;function hs(e,t=null){return new class extends hc{constructor(){super(...arguments);this.size=t}async iterator(){return e()}}}function vY(e){return hs(async()=>h0(e),e.length)}function NY(e){if(!lc(e))throw new Error("The argument to zip() must be an object or array.");let t;if(Array.isArray(e))for(let n=0;n{const n=await c0(e,s=>{if(s instanceof hc)return{value:s.iterator(),recurse:!1};if(lc(s))return{value:null,recurse:!0};throw new Error("Leaves of the structure passed to zip() must be Datasets, not primitives.")});return pY(n,Qr.SHORTEST)},t)}function CY(e){if(e===null)return null;const t=e[0];if(lY(t)){const n=RY(e);return{value:n,recurse:!1}}return{value:null,recurse:!0}}function RY(e){if(e.length===0)throw new Error("Can't make a batch of zero elements.");return e[0]instanceof Q?as(e):en(e)}class f0 extends hc{constructor(e){super();this.input=e}async iterator(){const e=await this.input.iterator(),t=e.decodeUTF8(),n=t.split(` +`).map(s=>(s.endsWith("\r")&&(s=s.slice(0,-1)),s));return n}}const hm='"',su=Symbol("out"),g0=Symbol("field"),um=Symbol("quote"),sS=Symbol("quoteafterquote"),y0=Symbol("quoteinquote");class b0 extends hc{constructor(e,t){super();this.input=e,this.hasHeader=!0,this.fullColumnNames=null,this.columnNamesValidated=!1,this.columnConfigs=null,this.configuredColumnsOnly=!1,this.delimiter=",",this.delimWhitespace=!1,this.base=new f0(e),t||(t={}),this.hasHeader=!(t.hasHeader===!1),this.fullColumnNames=t.columnNames,this.columnConfigs=t.columnConfigs,this.configuredColumnsOnly=t.configuredColumnsOnly,t.delimWhitespace?(k(t.delimiter==null,()=>"Delimiter should not be provided when delimWhitespace is true."),this.delimWhitespace=!0,this.delimiter=" "):this.delimiter=t.delimiter?t.delimiter:","}async columnNames(){return this.columnNamesValidated||await this.setColumnNames(),this.configuredColumnsOnly?Object.keys(this.columnConfigs):this.fullColumnNames}async setColumnNames(){const e=await this.maybeReadHeaderLine();if(!this.fullColumnNames&&!e)throw new Error("Column names must be provided if there is no header line.");this.fullColumnNames&&e&&k(e.length===this.fullColumnNames.length,()=>"The length of provided columnNames ("+this.fullColumnNames.length.toString()+") does not match the length of the header line read from file ("+e.length.toString()+")."),this.fullColumnNames||(this.fullColumnNames=e);const t=this.fullColumnNames.reduce((s,i)=>(s[i]=s[i]+1||1,s),{}),n=Object.keys(t).filter(s=>t[s]>1);if(k(n.length===0,()=>"Duplicate column names found: "+n.toString()),this.columnConfigs)for(const s of Object.keys(this.columnConfigs)){const i=this.fullColumnNames.indexOf(s);if(i===-1)throw new Error('The key "'+s+'" provided in columnConfigs does not match any of the column names ('+this.fullColumnNames.toString()+").")}this.columnNamesValidated=!0}async maybeReadHeaderLine(){if(this.hasHeader){const e=await this.base.iterator(),t=await e.next();if(t.done)throw new Error("No data was found for CSV parsing.");const n=t.value,s=this.parseRow(n,!1);return s}else return null}async iterator(){this.columnNamesValidated||await this.setColumnNames();let e=await this.base.iterator();return this.hasHeader&&(e=e.skip(1)),e.map(t=>this.makeDataElement(t))}makeDataElement(e){const t=this.parseRow(e),n={},s={};for(let i=0;i14||!Number.isInteger(t))throw new Error(`Invalid fftSize: it must be a power of 2 between 2 to 4 and 2 to 14, but got ${this.fftSize}`);if(this.numFrames=e.numFramesPerSpectrogram||43,this.sampleRateHz=e.sampleRateHz,this.columnTruncateLength=e.columnTruncateLength||this.fftSize,this.audioTrackConstraints=e.audioTrackConstraints,this.smoothingTimeConstant=e.smoothingTimeConstant||0,this.includeSpectrogram=!(e.includeSpectrogram===!1),this.includeWaveform=e.includeWaveform===!0,!this.includeSpectrogram&&!this.includeWaveform)throw new Error("Both includeSpectrogram and includeWaveform are false. At least one type of data should be returned.")}summary(){return"microphone"}static async create(e={}){if(C().get("IS_NODE"))throw new Error("microphone API is only supported in browser environment.");const t=new w0(e);return await t.start(),t}async start(){try{this.stream=await navigator.mediaDevices.getUserMedia({audio:this.audioTrackConstraints==null?!0:this.audioTrackConstraints,video:!1})}catch(n){throw new Error(`Error thrown while initializing video stream: ${n.message}`)}if(!this.stream)throw new Error("Could not obtain audio from microphone.");const e=window.AudioContext||window.webkitAudioContext;if(this.audioContext=new e,!this.sampleRateHz)this.sampleRateHz=this.audioContext.sampleRate;else if(this.audioContext.sampleRate!==this.sampleRateHz)throw new Error(`Mismatch in sampling rate: Expected: ${this.sampleRateHz}; Actual: ${this.audioContext.sampleRate}`);const t=this.audioContext.createMediaStreamSource(this.stream);this.analyser=this.audioContext.createAnalyser(),this.analyser.fftSize=this.fftSize*2,this.analyser.smoothingTimeConstant=this.smoothingTimeConstant,t.connect(this.analyser),this.freqData=new Float32Array(this.fftSize),this.timeData=new Float32Array(this.fftSize);return}async next(){if(this.isClosed)return{value:null,done:!0};let e,t;const n=await this.getAudioData();if(this.includeSpectrogram){const s=this.flattenQueue(n.freqDataQueue);e=this.getTensorFromAudioDataArray(s,[this.numFrames,this.columnTruncateLength,1])}if(this.includeWaveform){const s=this.flattenQueue(n.timeDataQueue);t=this.getTensorFromAudioDataArray(s,[this.numFrames*this.fftSize,1])}return{value:{spectrogram:e,waveform:t},done:!1}}async capture(){return(await this.next()).value}async getAudioData(){const e=[],t=[];let n=0;return new Promise(s=>{const i=setInterval(()=>{this.includeSpectrogram&&(this.analyser.getFloatFrequencyData(this.freqData),this.freqData[0]===-Infinity&&s({freqDataQueue:e,timeDataQueue:t}),e.push(this.freqData.slice(0,this.columnTruncateLength))),this.includeWaveform&&(this.analyser.getFloatTimeDomainData(this.timeData),t.push(this.timeData.slice())),++n===this.numFrames&&(clearInterval(i),s({freqDataQueue:e,timeDataQueue:t}))},this.fftSize/this.sampleRateHz*1e3)})}stop(){this.isClosed||(this.isClosed=!0,this.analyser.disconnect(),this.audioContext.close(),this.stream!=null&&this.stream.getTracks().length>0&&this.stream.getTracks()[0].stop())}toArray(){throw new Error("Can not convert infinite audio stream to array.")}getSampleRate(){return this.sampleRateHz}flattenQueue(e){const t=e[0].length,n=new Float32Array(e.length*t);return e.forEach((s,i)=>n.set(s,i*t)),n}getTensorFromAudioDataArray(e,t){const n=new Float32Array(we(t));return n.set(e,n.length-e.length),en(n,t)}}class L0 extends gn{constructor(e,t){super();if(this.webcamVideoElement=e,this.webcamConfig=t,this.isClosed=!0,this.resize=!1,this.needToResize())if(this.resize=!0,this.cropSize=[this.webcamConfig.resizeHeight,this.webcamConfig.resizeWidth],this.cropBoxInd=rs([0],"int32"),this.webcamConfig.centerCrop){const n=this.webcamConfig.resizeWidth*1/this.webcamVideoElement.width,s=this.webcamConfig.resizeHeight*1/this.webcamVideoElement.height,i=(1-n)/2,o=(1-s)/2,a=i+n,c=s+o;this.cropBox=Gr([o,i,c,a],[1,4])}else this.cropBox=Gr([0,0,1,1],[1,4])}summary(){return"webcam"}static async create(e,t={}){if(C().get("IS_NODE"))throw new Error("tf.data.webcam is only supported in browser environment.");if(!e){if(e=document.createElement("video"),!t.resizeWidth||!t.resizeHeight)throw new Error("Please provide webcam video element, or resizeWidth and resizeHeight to create a hidden video element.");e.width=t.resizeWidth,e.height=t.resizeHeight}const n=new L0(e,t);return await n.start(),n}async start(){this.webcamConfig.facingMode&&k(this.webcamConfig.facingMode==="user"||this.webcamConfig.facingMode==="environment",()=>`Invalid webcam facing mode: ${this.webcamConfig.facingMode}. Please provide 'user' or 'environment'`);try{this.stream=await navigator.mediaDevices.getUserMedia({video:{deviceId:this.webcamConfig.deviceId,facingMode:this.webcamConfig.facingMode?this.webcamConfig.facingMode:"user",width:this.webcamVideoElement.width,height:this.webcamVideoElement.height}})}catch(e){throw e.message=`Error thrown while initializing video stream: ${e.message}`,e}if(!this.stream)throw new Error("Could not obtain video from webcam.");try{this.webcamVideoElement.srcObject=this.stream}catch(e){console.log(e),this.webcamVideoElement.src=window.URL.createObjectURL(this.stream)}return this.webcamVideoElement.play(),this.isClosed=!1,new Promise(e=>{this.webcamVideoElement.onloadedmetadata=()=>{e()}})}async next(){if(this.isClosed)return{value:null,done:!0};let e;try{e=BT(this.webcamVideoElement)}catch(t){throw new Error(`Error thrown converting video to pixels: ${JSON.stringify(t)}`)}if(this.resize)try{return{value:this.cropAndResizeFrame(e),done:!1}}catch(t){throw new Error(`Error thrown cropping the video: ${t.message}`)}finally{e.dispose()}else return{value:e,done:!1}}needToResize(){return!!(this.webcamConfig.resizeWidth&&this.webcamConfig.resizeHeight&&(this.webcamVideoElement.width!==this.webcamConfig.resizeWidth||this.webcamVideoElement.height!==this.webcamConfig.resizeHeight))}cropAndResizeFrame(e){return ee(()=>{const t=e.toFloat().expandDims(0);let n;n=Vr.cropAndResize(t,this.cropBox,this.cropBoxInd,this.cropSize,"bilinear");const s=n.shape;return n.reshape(s.slice(1))})}async capture(){return(await this.next()).value}stop(){const e=this.stream.getTracks();e.forEach(t=>t.stop());try{this.webcamVideoElement.srcObject=null}catch(t){console.log(t),this.webcamVideoElement.src=null}this.isClosed=!0}toArray(){throw new Error("Can not convert infinite video stream to array.")}}class S0{}class I0 extends gn{split(e){return new OY(this,e)}}class OY extends I0{constructor(e,t){super();this.upstream=e,this.impl=new EY(e,t)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class EY extends nS{constructor(e,t){super();this.upstream=e,this.separator=t,this.carryover=""}summary(){return`${this.upstream.summary()} -> Split('${this.separator}')`}async pump(){const e=await this.upstream.next();if(e.done)return this.carryover===""?!1:(this.outputQueue.push(this.carryover),this.carryover="",!0);const t=e.value.split(this.separator);t[0]=this.carryover+t[0];for(const n of t.slice(0,-1))this.outputQueue.push(n);return this.carryover=t[t.length-1],!0}}class DY extends gn{decodeUTF8(){return new kY(this)}}class kY extends I0{constructor(e){super();this.upstream=e,this.impl=new FY(e)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class FY extends nS{constructor(e){super();if(this.upstream=e,C().get("IS_BROWSER"))this.decoder=new TextDecoder("utf-8");else{const{StringDecoder:t}=require("string_decoder");this.decoder=new t("utf8")}}summary(){return`${this.upstream.summary()} -> Utf8`}async pump(){const e=await this.upstream.next();let t;if(e.done)return!1;t=e.value;let n;return C().get("IS_BROWSER")?n=this.decoder.decode(t,{stream:!0}):n=this.decoder.write(Buffer.from(t.buffer)),this.outputQueue.push(n),!0}}class x0 extends DY{constructor(e,t={}){super();this.file=e,this.options=t,k(e instanceof Uint8Array||(C().get("IS_BROWSER")?e instanceof File||e instanceof Blob:!1),()=>"FileChunkIterator only supports File, Blob and Uint8Array right now."),this.offset=t.offset||0,this.chunkSize=t.chunkSize||1024*1024}summary(){return`FileChunks ${this.file}`}async next(){if(this.offset>=(this.file instanceof Uint8Array?this.file.byteLength:this.file.size))return{value:null,done:!0};const e=new Promise((t,n)=>{const s=this.offset+this.chunkSize;if(this.file instanceof Uint8Array)t(new Uint8Array(this.file.slice(this.offset,s)));else{const i=new FileReader;i.onload=a=>{let c=i.result;if(c instanceof ArrayBuffer&&(c=new Uint8Array(c)),!(c instanceof Uint8Array))return n(new TypeError("FileReader returned unknown type."));t(c)},i.onabort=a=>n(new Error("Aborted")),i.onerror=a=>n(new Error(a.type));const o=this.file.slice(this.offset,s);i.readAsArrayBuffer(o)}this.offset=s});return{value:await e,done:!1}}}async function _Y(e,t={}){let n,s;typeof e=="string"?n=e:(n=e.url,s=WY(e));const i=await uT(n,s);if(i.ok){const o=new Uint8Array(await i.arrayBuffer());return new x0(o,t)}else throw new Error(i.statusText)}const WY=e=>{const t={method:e.method,headers:e.headers,body:e.body,mode:e.mode,credentials:e.credentials,cache:e.cache,redirect:e.redirect,referrer:e.referrer,integrity:e.integrity};return t};function T0(e){return typeof e=="string"&&e.substr(0,7)==="file://"}class A0 extends S0{constructor(e,t={}){super();this.input=e,this.options=t}async iterator(){if(T0(this.input)&&C().get("IS_NODE")){const e=require("fs");this.input=e.readFileSync(this.input.substr(7))}return new x0(this.input,this.options)}}class v0 extends S0{constructor(e,t={}){super();this.url=e,this.fileOptions=t}async iterator(){return T0(this.url)?new A0(this.url,this.fileOptions).iterator():_Y(this.url,this.fileOptions)}}function $Y(e,t={}){return new b0(new v0(e),t)}function UY(e){const t=nu(e);return hs(async()=>t)}function BY(e){return hs(async()=>{const t=await e();return nu(()=>t.next())})}async function MY(e,t){return L0.create(e,t)}async function PY(e){return w0.create(e)}const N0="2.6.0";var zY=Object.freeze({__proto__:null,array:vY,Dataset:hc,zip:NY,CSVDataset:b0,TextLineDataset:f0,csv:$Y,func:UY,generator:BY,microphone:PY,webcam:MY,FileDataSource:A0,URLDataSource:v0,version_data:N0});function Te(e,t){Array.isArray(e)||(e=[e]),e.forEach(n=>{n!=null&&k(n.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the CPU backend.`)})}const GY=wp,VY=rw,HY=ow,YY=aw,qY=dp;function iS(e,t,n,s){if(n==="linear")return e.linear(t);if(n==="relu")return e.relu(t);if(n==="elu")return Do(t);if(n==="relu6")return e.relu6(t);if(n==="prelu")return e.prelu(t,s);throw new Error(`Activation ${n} has not been implemented for the CPU backend.`)}class jY extends f{constructor(){super();this.blockSize=48,this.firstUse=!0,this.data=new d(this,$s())}write(e,t,n){this.firstUse&&(this.firstUse=!1,C().get("IS_NODE")&&Qa(` ============================ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed things up dramatically, install our node backend, which binds to TensorFlow C++, by running npm i @tensorflow/tfjs-node, or npm i @tensorflow/tfjs-node-gpu if you have CUDA. Then call require('@tensorflow/tfjs-node'); (-gpu suffix for CUDA) at the start of your program. Visit https://github.com/tensorflow/tfjs-node for more details. -============================`));const s={};return this.data.set(s,{values:e,dtype:n,refCount:1}),s}makeTensorInfo(e,t,n){const s=this.write(n,e,t);return{dataId:s,shape:e,dtype:t}}incRef(e){const t=this.data.get(e);t.refCount++}decRef(e){if(this.data.has(e)){const t=this.data.get(e);t.refCount--}}move(e,t,n,s){this.data.set(e,{values:t,dtype:s,refCount:1})}numDataIds(){return this.data.numDataIds()}async read(e){return this.readSync(e)}readSync(e){const{dtype:t,complexTensorInfos:n}=this.data.get(e);if(t==="complex64"){const s=this.readSync(n.real.dataId),i=this.readSync(n.imag.dataId);return ir(s,i)}return this.data.get(e).values}bufferSync(e){const t=this.readSync(e.dataId);let n=t;if(e.dtype==="string")try{n=t.map(s=>Yl(s))}catch(s){throw new Error("Failed to decode encoded string bytes into utf-8")}return Qe(e.shape,e.dtype,n)}makeOutput(e,t,n){const s=this.write(e,t,n);return $s().makeTensorFromDataId(s,t,n,this)}disposeData(e){if(this.data.has(e)){const{complexTensorInfos:t}=this.data.get(e);t!=null&&(this.disposeData(t.real.dataId),this.disposeData(t.imag.dataId)),this.data.delete(e)}}disposeIntermediateTensorInfo(e){const t=e.dataId;if(this.data.has(t)){const n=this.data.get(t);n.refCount--,n.refCount<1&&this.disposeData(t)}}async time(e){const t=qn();e();const n=qn()-t;return{kernelMs:n}}memory(){return{unreliable:!0,reasons:["The reported memory is an upper bound. Due to automatic garbage collection, the true allocated memory may be less."]}}stridedSlice(e,t,n,s){Te(e,"stridedSlice");const i=Wd(t,n,s);if(i.some(c=>c===0))return en([],i);const o=Qe(i,e.dtype),a=this.bufferSync(e);for(let c=0;ca[c]=e.shape[c]-1-a[c]),n.set(s.get(...a),...o)}return n.toTensor()}neg(e){return Te(e,"neg"),X(Ne(-1),e)}addN(e){Te(e,"addN");const t=e.map(i=>this.readSync(i.dataId)),n=Qe(e[0].shape,e[0].dtype),s=n.values;for(let i=0;iMath.pow(n,s))}batchMatMul(e,t,n,s){Te([e,t],"matMul");const i=n?e.shape[1]:e.shape[2],o=n?e.shape[2]:e.shape[1],a=s?t.shape[1]:t.shape[2],c=e.shape[0],u=this.readSync(e.dataId),p=this.readSync(t.dataId),[m,y,b]=n?[e.strides[0],1,e.strides[1]]:[e.strides[0],e.strides[1],1],[w,I,T]=s?[1,t.strides[1],t.strides[0]]:[t.strides[1],1,t.strides[0]],v=o*a,N=Qe([c,o,a],e.dtype),E=N.values,D=this.blockSize;for(let F=0;FMath.floor(i/o),s="int32";return this.broadcastedBinaryOp(e,t,s,n)}sum(e,t){Te(e,"sum"),ss("sum",t,e.rank);const[n,s]=On(e.shape,t),i=Cn(e.dtype,"int32"),o=ct(n,i),a=we(s),c=this.readSync(o.dataId),u=this.readSync(e.dataId);for(let p=0;py&&(y=I,b=w)}c[p]=b}return o}cumsum(e,t,n,s){if(Te(e,"cumsum"),t!==e.rank-1)throw new Error(`backend.cumsum in CPU expects an inner-most axis=${e.rank-1} but got axis=${t}`);const i=Cn(e.dtype,"int32"),o=ct(e.shape,i),a=this.readSync(o.dataId),c=this.readSync(e.dataId),u=e.shape[e.rank-1],p=s?(m,y)=>m+u-y-1:(m,y)=>m+y;for(let m=0;mn===s?1:0)}notEqual(e,t){return Te([e,t],"notEqual"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n!==s?1:0)}less(e,t){return Te([e,t],"less"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>nn<=s?1:0)}greater(e,t){return Te([e,t],"greater"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n>s?1:0)}greaterEqual(e,t){return Te([e,t],"greaterEqual"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n>=s?1:0)}logicalAnd(e,t){return Te([e,t],"logicalAnd"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n&&s)}logicalOr(e,t){return Te([e,t],"logicalOr"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n||s)}select(e,t,n){Te([e,t,n],"select");const s=this.readSync(e.dataId),i=this.readSync(t.dataId),o=this.readSync(n.dataId),a=ct(t.shape,Cn(t.dtype,n.dtype)),c=this.readSync(a.dataId);let u=0;const p=e.rank===0||e.rank>1||t.rank===1?1:we(t.shape.slice(1));for(let m=0;mMath.min(n,s))}mod(e,t){return Te([e,t],"mod"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>{const i=n%s;return n<0&&s<0||n>=0&&s>=0?i:(i+s)%s})}maximum(e,t){return Te([e,t],"maximum"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>Math.max(n,s))}all(e,t){Te(e,"all"),ss("all",t,e.rank);const[n,s]=On(e.shape,t),i=ct(n,e.dtype),o=we(s),a=this.readSync(i.dataId),c=this.readSync(e.dataId);for(let u=0;u{const i=n-s;return i*i})}linear(e){return e}relu(e){Te(e,"relu");const t=ct(e.shape,e.dtype),n=this.readSync(t.dataId),s=this.readSync(e.dataId);for(let i=0;in<0?s*n:n)}eluDer(e,t){Te([e,t],"eluDer");const n=new Float32Array(t.size),s=this.readSync(t.dataId),i=this.readSync(e.dataId);for(let o=0;o=1?n[o]=i[o]:n[o]=i[o]*(a+1)}return this.makeOutput(n,t.shape,"float32")}atan2(e,t){return Te([e,t],"atan2"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>Math.atan2(n,s))}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){let a=this.conv2d(e,t,n);return s&&(a=be(a,s)),i&&(a=iS(this,a,i,o)),a}conv2d(e,t,n){Te([e,t],"conv2d");const s=n.filterHeight,i=n.filterWidth,o=n.dilationHeight,a=n.dilationWidth,c=n.padInfo.left,u=n.padInfo.top,p=n.dataFormat==="channelsLast",m=Qe(n.outShape,e.dtype),y=e.strides[0],b=p?e.strides[1]:e.strides[2],w=p?e.strides[2]:1,I=p?1:e.strides[1],T=m.strides[0],v=p?m.strides[1]:m.strides[2],N=p?m.strides[2]:1,E=p?1:m.strides[1],D=this.readSync(e.dataId),F=this.readSync(t.dataId),_=m.values;for(let B=0;B=n.inHeight)continue;const he=ce*t.strides[0],pe=U+ue*b;for(let le=0;le=n.inWidth)continue;const Ee=he+Ie*t.strides[1],We=pe+Se*w;let Oe=Ee;for(let $e=0;$e=n.inDepth)continue;const Y=B*t.strides[0],q=N+U*e.strides[1];for(let J=0;J=n.inHeight)continue;const pe=Y+ue*t.strides[1],le=q+he*e.strides[2];for(let ye=0;ye=n.inWidth)continue;const We=pe+Se*t.strides[2],Oe=le+Ee*n.inChannels;let $e=We;for(let He=0;He=n.inHeight)continue;const B=F*t.strides[0],U=T+_*e.strides[1];for(let Y=0;Y=n.inWidth)continue;const ue=B+oe*t.strides[1],he=U+ce*n.inChannels;let pe=q,le=ue;for(let ye=0;yep*m),i=Rh(e.shape,t,s),o=Oh(i.length,t.length),a=Eh(e.shape,t,s),c=jb(n,t.length),u=Kb(a,n,t.length);return Pe(e.reshape(i),o).reshape(a).slice(c,u)}pool3d(e,t,n){Te(e,"pool3d");const s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,a=t.dilationDepth,c=t.dilationHeight,u=t.dilationWidth,p=t.effectiveFilterDepth,m=t.effectiveFilterHeight,y=t.effectiveFilterWidth,b=t.padInfo.front,w=t.padInfo.top,I=t.padInfo.left,T=n==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,v=this.readSync(e.dataId),N=Qe(t.outShape,e.dtype),E=N.values,D=t.outShape[1]*t.outShape[2]*t.outShape[3]*t.outShape[4],F=t.outShape[2]*t.outShape[3]*t.outShape[4],_=t.outShape[3]*t.outShape[4],B=t.outShape[4];for(let U=0;Utt?tt=Hs:n==="avg"&&(bt+=Hs,Jt++),isNaN(tt))break}if(isNaN(tt))break}if(isNaN(tt))break}const un=He+J;E[un]=n==="avg"?bt/Jt:tt}}}}return N.toTensor()}avgPool3d(e,t){return Te(e,"avgPool3d"),this.pool3d(e,t,"avg").toFloat()}avgPool3dBackprop(e,t,n){Te([e,t],"avgPool3dBackprop");const s=n.strideDepth,i=n.strideHeight,o=n.strideWidth,a=n.filterDepth,c=n.filterHeight,u=n.filterWidth,p=n.dilationDepth,m=n.dilationHeight,y=n.dilationWidth,b=n.effectiveFilterDepth,w=n.effectiveFilterHeight,I=n.effectiveFilterWidth,T=b-1-n.padInfo.front,v=I-1-n.padInfo.left,N=w-1-n.padInfo.top,E=Qe(t.shape,"float32"),D=1/(a*c*u),F=this.bufferSync(e);for(let _=0;_=n.outDepth||Math.floor(pe)!==pe)continue;for(let le=0;le=n.outHeight||Math.floor(ye)!==ye)continue;for(let me=0;me=n.outWidth||Math.floor(Ie)!==Ie)continue;const Se=F.get(_,pe,ye,Ie,B);ue+=Se}}}E.set(ue*D,_,U,Y,q,B)}return E.toTensor()}maxPool3d(e,t){return Te(e,"maxPool3d"),this.pool3d(e,t,"max").toFloat()}maxPool3dPositions(e,t){const n=Qe(t.outShape,"int32"),s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,a=t.dilationDepth,c=t.dilationHeight,u=t.dilationWidth,p=t.effectiveFilterDepth,m=t.effectiveFilterHeight,y=t.effectiveFilterWidth,b=t.padInfo.front,w=t.padInfo.top,I=t.padInfo.left,T=this.bufferSync(e);for(let v=0;v=he&&(he=We,pe=ye*m*y+Ie*m+Ee)}}}n.set(pe,v,E,B,J,N)}}}return n.toTensor()}maxPool3dBackprop(e,t,n,s){Te([t,n],"maxPool3dBackprop");const i=this.maxPool3dPositions(t,s),o=s.strideDepth,a=s.strideHeight,c=s.strideWidth,u=s.dilationDepth,p=s.dilationHeight,m=s.dilationWidth,y=s.effectiveFilterDepth,b=s.effectiveFilterHeight,w=s.effectiveFilterWidth,I=y-1-s.padInfo.front,T=w-1-s.padInfo.left,v=b-1-s.padInfo.top,N=Qe(t.shape,"float32"),E=this.bufferSync(i),D=this.bufferSync(e);for(let F=0;F=s.outDepth||Math.floor(he)!==he)continue;for(let pe=0;pe=s.outHeight||Math.floor(le)!==le)continue;for(let ye=0;ye=s.outWidth||Math.floor(me)!==me)continue;const Ie=y*b*w-1-E.get(F,he,le,me,_),Se=ue*b*w+pe*w+ye,Ee=Ie===Se?1:0;if(Ee===0)continue;const We=D.get(F,he,le,me,_);ce+=We*Ee}}}N.set(ce,F,B,U,Y,_)}return N.toTensor()}resizeBilinear(e,t,n,s){Te(e,"resizeBilinear");const[i,o,a,c]=e.shape,u=this.readSync(e.dataId),p=new Float32Array(we([i,t,n,c])),m=[s&&t>1?o-1:o,s&&n>1?a-1:a],y=[s&&t>1?t-1:t,s&&n>1?n-1:n];let b=0;const w=m[0]/y[0],I=m[1]/y[1];for(let T=0;T1?i-1:i,n&&u>1?o-1:o],y=[n&&c>1?c-1:c,n&&u>1?u-1:u],b=m[0]/y[0],w=m[1]/y[1],I=this.readSync(e.dataId);let T=0;for(let v=0;v1?o-1:o,s&&n>1?a-1:a],y=[s&&t>1?t-1:t,s&&n>1?n-1:n],b=m[0]/y[0],w=m[1]/y[1];let I=0;for(let T=0;T1?i-1:i,n&&u>1?o-1:o],b=[n&&c>1?c-1:c,n&&u>1?u-1:u],w=y[0]/b[0],I=y[1]/b[1],T=1/w,v=1/I,N=Math.ceil(T)*2+2,E=Math.ceil(v)*2+2;for(let D=0;D=c)continue;const ye=F+le*e.strides[1],me=le*w,Ie=Math.min(i-1,n?Math.round(me):Math.floor(me));if(_!==Ie)continue;for(let Se=0;Se=u)continue;const We=ye+Ee*e.strides[2],Oe=Ee*I,$e=Math.min(o-1,n?Math.round(Oe):Math.floor(Oe));q===$e&&(he+=m[We+ue])}}p[J+ue]=he}}}}return ja(p,t.shape,t.dtype)}localResponseNormalization4D(e,t,n,s,i){Te(e,"localResponseNormalization4D");const o=e.shape[3],a=o-1,c=this.readSync(e.dataId),u=e.size,p=new Float32Array(u);function m(y){const b=y%o;let w=y-b+Math.max(0,b-t);const I=y-b+Math.min(b+t,a);let T=0;for(;w<=I;w++){const v=c[w];T+=v*v}return T}for(let y=0;y=0&&o[a]`Only NHWC dataFormat supported on CPU for depthToSpace. Got ${n}`),k(t>1,()=>`blockSize should be > 1 for depthToSpace, but was: ${t}`);const s=e.shape[0],i=e.shape[1],o=e.shape[2],a=e.shape[3],c=i*t,u=o*t,p=a/(t*t),m=this.readSync(e.dataId),y=new Float32Array(s*c*u*p);let b=0;for(let w=0;wT[D]=0);const v=y.locToIndex(T),N=I.slice(-t.rank);p.forEach(D=>N[D]=0);const E=b.locToIndex(N);m[w]=s(a[v],c[E])}}return o.toTensor()}split(e,t,n){return VY(e,t,n)}dispose(){}floatPrecision(){return 32}epsilon(){return super.epsilon()}cropAndResize(e,t,n,s,i,o){const[a,c,u,p]=e.shape,m=t.shape[0],[y,b]=s,w=Qe([m,y,b,p],"float32"),I=this.readSync(t.dataId),T=this.readSync(n.dataId),v=this.readSync(e.dataId),N=e.strides,E=w.strides;for(let D=0;D=a)continue;const J=y>1?(U-_)*(c-1)/(y-1):0,oe=b>1?(Y-B)*(u-1)/(b-1):0;for(let ce=0;ce1?_*(c-1)+ce*J:.5*(_+U)*(c-1);if(ue<0||ue>c-1){for(let he=0;he1?B*(u-1)+ye*oe:.5*(B+Y)*(u-1);if(me<0||me>u-1){for(let We=0;We1?B*(u-1)+he*oe:.5*(B+Y)*(u-1);if(pe<0||pe>u-1){for(let me=0;me=e.size/a)throw new Error(`Invalid indices: ${b} does not index into ${e.shape}`);for(let I=0;I=s/i)throw new Error(`Invalid indices: ${T} does not index into ${n}`);for(let N=0;N{const{x:t}=e.inputs,n=e.backend;let s=new Float32Array(we(t.shape));if(t.dtype!=="complex64"){const i=n.data.get(t.dataId).values;s=C0(i)}else{const i=n.data.get(t.dataId),o=i.complexTensorInfos.real,a=i.complexTensorInfos.imag,c=n.data.get(o.dataId).values,u=n.data.get(a.dataId).values;for(let p=0;p{const a=nt(t,n),c=a.length,u=Ot(a),p=we(a),m=wn(o,p),y=t.length,b=n.length,w=Ot(t),I=Ot(n),T=Ro(t,a),v=Ro(n,a);if(T.length+v.length===0)for(let N=0;ND[U]=0);const F=ti(D,y,w),_=E.slice(-b);v.forEach(U=>_[U]=0);const B=ti(_,b,I);m[N]=e(s[F],i[B])}return[m,a]}}function pi(e){const{inputs:t,backend:n}=e,{real:s,imag:i}=t,o=n.data.get(s.dataId).values,a=n.data.get(i.dataId).values,c=n.makeTensorInfo(s.shape,"complex64"),u=n.data.get(c.dataId);return u.complexTensorInfos={real:n.makeTensorInfo(s.shape,"float32",o),imag:n.makeTensorInfo(i.shape,"float32",a)},c}const JY={kernelName:Rg,backendName:"cpu",kernelFunc:pi};function lc(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const ZY={kernelName:Sl,backendName:"cpu",kernelFunc:lc};function iu(e){const{inputs:t,backend:n}=e,{input:s}=t,i=n.data.get(s.dataId).complexTensorInfos.real,o=n.data.get(i.dataId).values;return n.makeTensorInfo(i.shape,i.dtype,o)}const QY={kernelName:ey,backendName:"cpu",kernelFunc:iu};function ru(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{dtype:o}=s;if(o==="complex64"){if(i.dtype==="complex64")return lc({inputs:{x:i},backend:n});const a=ct(i.shape),c=ru({inputs:{x:i},backend:n,attrs:{dtype:"float32"}}),u=pi({inputs:{real:c,imag:a},backend:n});return a.dispose(),n.disposeIntermediateTensorInfo(c),u}if(i.dtype==="complex64"){const a=iu({inputs:{input:i},backend:n}),c=ru({inputs:{x:a},backend:n,attrs:{dtype:o}});return n.disposeIntermediateTensorInfo(a),c}if(!xy(i.dtype,o)){const a=lc({inputs:{x:i},backend:n});return{dataId:a.dataId,shape:a.shape,dtype:o}}if(o==="int32"){const a=n.data.get(i.dataId).values,c=Int32Array.from(a);return n.makeTensorInfo(i.shape,"int32",c)}if(o==="bool"){const a=n.data.get(i.dataId).values,c=Dr([0],i.dtype),[u,p]=Yo((m,y)=>m!==y?1:0)(i.shape,[],a,c,"bool");return n.makeTensorInfo(p,"bool",u)}throw new Error(`Error in Cast: failed to cast ${i.dtype} to ${o}`)}const eq={kernelName:ul,backendName:"cpu",kernelFunc:ru};function hc(e,t,n,s){return n==null?({inputs:i,backend:o})=>{const{a,b:c}=i,u=o;Te([a,c],e);const p=u.data.get(a.dataId).values,m=u.data.get(c.dataId).values,y=s||a.dtype,[b,w]=t(a.shape,c.shape,p,m,y);return u.makeTensorInfo(w,y,b)}:({inputs:i,backend:o})=>{const{a,b:c}=i,u=o;if(a.dtype==="complex64"||c.dtype==="complex64"){const p=ru({inputs:{x:a},backend:u,attrs:{dtype:"complex64"}}),m=u.data.get(p.dataId),y=m.complexTensorInfos.real,b=m.complexTensorInfos.imag,w=u.data.get(y.dataId).values,I=u.data.get(b.dataId).values,T=ru({inputs:{x:c},backend:u,attrs:{dtype:"complex64"}}),v=u.data.get(T.dataId),N=v.complexTensorInfos.real,E=v.complexTensorInfos.imag,D=u.data.get(N.dataId).values,F=u.data.get(E.dataId).values,[_,B,U]=n(a.shape,c.shape,w,I,D,F),Y=u.makeTensorInfo(U,"float32",_),q=u.makeTensorInfo(U,"float32",B),J=pi({inputs:{real:Y,imag:q},backend:u});return u.disposeIntermediateTensorInfo(p),u.disposeIntermediateTensorInfo(T),u.disposeIntermediateTensorInfo(Y),u.disposeIntermediateTensorInfo(q),J}else{const p=u.data.get(a.dataId).values,m=u.data.get(c.dataId).values,y=s||a.dtype,[b,w]=t(a.shape,c.shape,p,m,y);return u.makeTensorInfo(w,y,b)}}}function rS(e){return(t,n,s,i,o,a)=>{const c=nt(t,n),u=we(c),p=c.length,m=Ot(c),y=wn("float32",u),b=wn("float32",u),w=Ro(t,c),I=Ro(n,c),T=ir(s,i),v=ir(o,a),N=t.length,E=Ot(t),D=n.length,F=Ot(n);if(w.length+I.length===0)for(let _=0;_U[ce]=0);const Y=ti(U,N,E),q=B.slice(-D);I.forEach(ce=>q[ce]=0);const J=ti(q,D,F),oe=e(T[Y*2],T[Y*2+1],v[J*2],v[J*2+1]);y[_]=oe.real,b[_]=oe.imag}return[y,b,c]}}const R0=Yo((e,t)=>e+t),tq=rS((e,t,n,s)=>({real:e+n,imag:t+s})),O0=hc(xe,R0,tq),nq={kernelName:xe,backendName:"cpu",kernelFunc:O0};function uc(e){return(t,n,s)=>{const i=wn(n,t.length);for(let o=0;o{const{x:a}=s;if(Te(a,e),a.dtype==="string"||n==="string")throw new Error("unaryKernelFunc does not support string input/output");const c=o,u=c.data.get(a.dataId).values,p=we(a.shape),m=n||a.dtype,y=wo(m,p);for(let b=0;b{const{x:a}=s;if(Te(a,e),a.dtype==="string"||n==="string")throw new Error("unaryKernelFunc does not support string input/output");const c=o,u=c.data.get(a.dataId).values,p=n||a.dtype,m=t(u,p,i);return c.makeTensorInfo(a.shape,p,m)}}const E0=uc(e=>Math.ceil(e)),sq=dc(dl,E0),iq={kernelName:dl,backendName:"cpu",kernelFunc:sq};const D0=uc(e=>Math.exp(e)),rq=dc(yl,D0),oq={kernelName:yl,backendName:"cpu",kernelFunc:rq};const k0=uc(e=>Math.expm1(e)),aq=dc(bl,k0),cq={kernelName:bl,backendName:"cpu",kernelFunc:aq};const F0=uc(e=>Math.floor(e)),lq=dc(wl,F0),hq={kernelName:wl,backendName:"cpu",kernelFunc:lq};const _0=uc(e=>Math.log(e)),uq=dc(Al,_0),dq={kernelName:Al,backendName:"cpu",kernelFunc:uq};function W0(e,t,n,s){const i=wn(s,we(n));for(let o=0;oc&&(c=p)}i[o]=c}return i}const $0=Yo((e,t)=>e*t),pq=rS((e,t,n,s)=>({real:e*n-t*s,imag:e*s+t*n})),U0=hc(Rl,$0,pq),mq={kernelName:Rl,backendName:"cpu",kernelFunc:U0};const B0=uc(e=>1/Math.sqrt(e)),fq=dc(kl,B0),gq={kernelName:kl,backendName:"cpu",kernelFunc:fq};function M0(e,t,n,s,i){const o=Xy(s,t,n),a=we(n),c=Ot(s);if(o){const p=Jy(t,c);return e.subarray(p,p+a)}const u=wn(i,a);for(let p=0;pT+t[v]),I=ti(w,s.length,c);u[p]=e[I]}return u}function oS(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{begin:o,size:a}=s;Te(i,"slice");const[c,u]=$d(i,o,a);Ky(i,c,u);const p=n.data.get(i.dataId).values,m=M0(p,c,u,i.shape,i.dtype);return n.makeTensorInfo(u,i.dtype,m)}const yq={kernelName:pd,backendName:"cpu",kernelFunc:oS};const P0=Yo((e,t)=>e-t),bq=rS((e,t,n,s)=>({real:e-n,imag:t-s})),z0=hc(Ml,P0,bq),wq={kernelName:Ml,backendName:"cpu",kernelFunc:z0};function aS(e,t,n,s,i){const o=t.length,a=we(t),c=Ot(t),u=Ot(i),p=wn(n,we(i));for(let m=0;m{for(let v=0;vnew jY,1);const Iq=Tt(de,e=>Math.acos(e)),xq={kernelName:de,backendName:"cpu",kernelFunc:Iq};const Tq=Tt(Ae,e=>Math.acosh(e)),Aq={kernelName:Ae,backendName:"cpu",kernelFunc:Tq};const vq=Tt(Fn,e=>Math.asin(e)),Nq={kernelName:Fn,backendName:"cpu",kernelFunc:vq};const Cq=Tt(vn,e=>Math.asinh(e)),Rq={kernelName:vn,backendName:"cpu",kernelFunc:Cq};const Oq=Tt(Nn,e=>Math.atan(e)),Eq={kernelName:Nn,backendName:"cpu",kernelFunc:Oq};const Dq=Tt(Qs,e=>Math.atanh(e)),kq={kernelName:Qs,backendName:"cpu",kernelFunc:Dq};function cS(e,t,n,s,i,o){const a=i.strideHeight,c=i.strideWidth,u=i.dilationHeight,p=i.dilationWidth,m=i.effectiveFilterHeight,y=i.effectiveFilterWidth,b=i.padInfo.top,w=i.padInfo.left,I=o==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,T=Qe(i.outShape,n),v=T.values,N=i.outShape[1]*i.outShape[2]*i.outShape[3],E=i.outShape[2]*i.outShape[3],D=i.outShape[3];for(let F=0;Fye?ye=He:o==="avg"&&(me+=He,Ie++)}if(isNaN(ye))break}const Se=ce+ue*D+U;v[Se]=o==="avg"?me/Ie:ye}}}return T}function V0(e,t,n,s,i=!1,o=!1){const a=Qe(s.outShape,"int32"),c=s.strideHeight,u=s.strideWidth,p=s.dilationHeight,m=s.dilationWidth,y=s.effectiveFilterHeight,b=s.effectiveFilterWidth,w=s.padInfo.top,I=s.padInfo.left,T=Qe(t,n,e);for(let v=0;vJ&&(J=le,i?oe=o?((v*s.inHeight+ce)*s.inWidth+he)*s.inChannels+N:(ce*s.inWidth+he)*s.inChannels+N:oe=ue*b+pe)}}a.set(oe,v,E,B,N)}}return a}function Fq(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t;Te(i,"avgPool");const{filterSize:o,strides:a,pad:c,dimRoundingMode:u}=s,p=1;k(rn(a,p),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${p}'`);const m=Wn(i.shape,o,a,p,c,u);let y;if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))y=lc({inputs:{x:i},backend:n});else{const b=n.data.get(i.dataId).values,w=Ot(i.shape),I=cS(b,i.shape,i.dtype,w,m,"avg");y=n.makeTensorInfo(m.outShape,i.dtype,I.values)}return y}const _q={kernelName:ei,backendName:"cpu",kernelFunc:Fq};function Wq(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o}=t,a=o;Te([i,o],"avgPoolBackprop");const{filterSize:c,strides:u,pad:p}=s,m=Wn(a.shape,c,u,1,p),y=m.strideHeight,b=m.strideWidth,w=m.filterHeight,I=m.filterWidth,T=m.dilationHeight,v=m.dilationWidth,N=m.effectiveFilterHeight,E=m.effectiveFilterWidth,D=E-1-m.padInfo.left,F=N-1-m.padInfo.top,_=Qe(a.shape,"float32"),B=1/(w*I),U=n.data.get(i.dataId).values,Y=Qe(i.shape,"float32",U);for(let q=0;q=m.outHeight||Math.floor(ye)!==ye)continue;for(let me=0;me=m.outWidth||Math.floor(Ie)!==Ie)continue;const Se=Y.get(q,ye,Ie,J);pe+=Se}}_.set(pe*B,q,oe,ce,J)}return n.makeTensorInfo(_.shape,_.dtype,_.values)}const $q={kernelName:Sa,backendName:"cpu",kernelFunc:Wq};function Uq(e){const{inputs:t,backend:n,attrs:s}=e,{x:i,scale:o,offset:a,mean:c,variance:u}=t;k(c.shape.length===u.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),k(a==null||c.shape.length===a.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),k(o==null||c.shape.length===o.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks."),Te([i,c,u,o,a],"batchNorm");let{varianceEpsilon:p}=s;p==null&&(p=.001);const m=n.data.get(i.dataId).values,y=n.data.get(c.dataId).values,b=n.data.get(u.dataId).values,w=o?n.data.get(o.dataId).values:new Float32Array([1]),I=a?n.data.get(a.dataId).values:new Float32Array([0]),T=new Float32Array(m.length),v=I.length,N=w.length,E=b.length,D=y.length;let F=0,_=0,B=0,U=0;for(let Y=0;Y=v&&(F=0),_>=D&&(_=0),B>=N&&(B=0),U>=E&&(U=0);return n.makeTensorInfo(i.shape,i.dtype,T)}const Bq={kernelName:Ll,backendName:"cpu",kernelFunc:Uq};const Mq=Tt(pl,(e,t)=>{const n=t;return e>n.clipValueMax?n.clipValueMax:e`The new shape (${c}) has ${u} elements and the old shape (${i.shape}) has ${a} elements. The new shape and old shape must have the same number of elements.`),n.incRef(i.dataId);const p=n.data.get(i.dataId);if(p.complexTensorInfos!=null){const m=p.complexTensorInfos.real,y=p.complexTensorInfos.imag;m.shape=c,y.shape=c}return{dataId:i.dataId,shape:c,dtype:i.dtype}}const Gq={kernelName:El,backendName:"cpu",kernelFunc:eo};function ou(e){const{inputs:t,backend:n,attrs:s}=e,{axis:i}=s,o=ft(i,t[0].shape)[0];let a=Ur(t.map(w=>w.shape),o);if(we(a)===0)return n.makeTensorInfo(a,t[0].dtype,[]);const c=t.filter(w=>we(w.shape)>0);if(c.length===1)return c[0];const u=c.map(w=>w.shape);if(mb(u,o),c[0].dtype==="complex64"){const w=c.map(E=>iu({inputs:{input:E},backend:n})),I=c.map(E=>dm({inputs:{input:E},backend:n})),T=ou({inputs:w,backend:n,attrs:{axis:i}}),v=ou({inputs:I,backend:n,attrs:{axis:i}}),N=pi({inputs:{real:T,imag:v},backend:n});return w.forEach(E=>n.disposeIntermediateTensorInfo(E)),I.forEach(E=>n.disposeIntermediateTensorInfo(E)),n.disposeIntermediateTensorInfo(T),n.disposeIntermediateTensorInfo(v),N}const p=c.map(w=>{const I=we(w.shape.slice(o)),T=[-1,I];return eo({inputs:{x:w},backend:n,attrs:{shape:T}})});a=Ur(p.map(w=>w.shape),1);const m=wn(c[0].dtype,we(a));if(p[0].shape[0]===1){let w=0;p.forEach(I=>{const T=n.data.get(I.dataId).values,v=we(I.shape);m.set(T,w),w+=v})}else{let w=0;p.forEach(I=>{const T=n.data.get(I.dataId).values;let v=0;for(let N=0;Nw.shape),o),b=n.makeTensorInfo(y,t[0].dtype,m);return p.forEach(w=>n.disposeIntermediateTensorInfo(w)),b}const Vq={kernelName:td,backendName:"cpu",kernelFunc:ou};const Hq=Tt(Ia,e=>Math.cos(e)),Yq={kernelName:Ia,backendName:"cpu",kernelFunc:Hq};const qq=Tt(ml,e=>Math.cosh(e)),jq={kernelName:ml,backendName:"cpu",kernelFunc:qq};const Kq={kernelName:nd,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i}=e,{strides:o,pad:a,dilations:c}=n,u=t,p=u.data.get(s.dataId).values,m=s.shape.length,y=u.data.get(i.dataId).values,b=i.shape.length,{batchSize:w,inHeight:I,inWidth:T,inChannels:v,outHeight:N,outWidth:E,padInfo:D,strideHeight:F,strideWidth:_,filterHeight:B,filterWidth:U,dilationHeight:Y,dilationWidth:q,outShape:J}=zd(s.shape,i.shape,o,a,"NHWC",c),oe=we(J),ce=J.length,ue=wo(s.dtype,oe);for(let pe=0;pe=0&&$e=0&&ttEe&&(Ee=un)}}}const We=ti([pe,le,me,Se],ce,Ot(J));ue[We]=Ee}}}const he=u.write(Dr(ue,s.dtype),J,s.dtype);return{dataId:he,shape:J,dtype:s.dtype}}};const Xq={kernelName:id,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i,dy:o}=e,{strides:a,pad:c,dilations:u}=n,p=t,m=Ls(s.shape,p.data.get(s.dataId).values),y=Ls(i.shape,p.data.get(i.dataId).values),{batchSize:b,inHeight:w,inWidth:I,inChannels:T,outHeight:v,outWidth:N,padInfo:E,strideHeight:D,strideWidth:F,filterHeight:_,filterWidth:B,dilationHeight:U,dilationWidth:Y,outShape:q}=zd(s.shape,i.shape,a,c,"NHWC",u);k(o.rank===q.length,()=>`Error in ${id}, dy must have the same rank as output ${q.length}, but got ${o.rank}`);const J=Ls(q,p.data.get(o.dataId).values),oe=vy(i.shape,i.dtype);for(let ue=0;ue=0&&Oe=0&&HeIe&&(Ie=tt,Se=We,Ee=$e)}}}oe[Se][Ee][me]+=J[ue][he][le][me]}}}const ce=p.write(Dr(oe,s.dtype),i.shape,i.dtype);return{dataId:ce,shape:i.shape,dtype:i.dtype}}};const Jq={kernelName:sd,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i,dy:o}=e,{strides:a,pad:c,dilations:u}=n,p=t,m=Ls(s.shape,p.data.get(s.dataId).values),y=Ls(i.shape,p.data.get(i.dataId).values),{batchSize:b,inHeight:w,inWidth:I,inChannels:T,outHeight:v,outWidth:N,padInfo:E,strideHeight:D,strideWidth:F,filterHeight:_,filterWidth:B,dilationHeight:U,dilationWidth:Y,outShape:q}=zd(s.shape,i.shape,a,c,"NHWC",u);k(o.rank===q.length,()=>`Error in ${sd}, dy must have the same rank as output ${q.length}, but got ${o.rank}`);const J=Ls(q,p.data.get(o.dataId).values),oe=vy(s.shape,s.dtype);for(let ue=0;ue=0&&Oe=0&&HeIe&&(Ie=tt,Se=Oe,Ee=He)}}}oe[ue][Se][Ee][me]+=J[ue][he][le][me]}}}const ce=p.write(Dr(oe,s.dtype),s.shape,s.dtype);return{dataId:ce,shape:s.shape,dtype:s.dtype}}};const Zq=Yo((e,t)=>e/t),Qq=hc(xa,Zq),lS={kernelName:xa,backendName:"cpu",kernelFunc:Qq};const e4=Tt(fl,e=>e>=0?e:Math.exp(e)-1),t4={kernelName:fl,backendName:"cpu",kernelFunc:e4};const n4=Xb,s4=Jb,i4=Zb,r4=Qb,o4=ew,a4=tw,c4=Tt(gl,e=>{const t=Math.sign(e),n=Math.abs(e),s=1/(1+n4*n);return t*(1-((((a4*s+o4)*s+r4)*s+i4)*s+s4)*s*Math.exp(-n*n))}),l4={kernelName:gl,backendName:"cpu",kernelFunc:c4};function H0(e,t,n){const s=e.shape,i=s[0],o=s[1],a=n.data.get(e.dataId),c=a.complexTensorInfos.real,u=a.complexTensorInfos.imag,p=[i,o],m=we(p),y=wn("float32",m),b=wn("float32",m);for(let v=0;v{const{image:s}=e,i=n,o=wn(s.dtype,we(s.shape)),[a,c,u,p]=s.shape,m=i.data.get(s.dataId).values;for(let b=0;b=0&&_Number.isFinite(e)?1:0,"bool"),w4={kernelName:Il,backendName:"cpu",kernelFunc:b4};const L4=Tt(xl,e=>Math.abs(e)===Infinity?1:0,"bool"),S4={kernelName:xl,backendName:"cpu",kernelFunc:L4};const I4=Tt(Tl,e=>Number.isNaN(e)?1:0,"bool"),x4={kernelName:Tl,backendName:"cpu",kernelFunc:I4};const T4=Tt(vl,e=>Math.log1p(e)),A4={kernelName:vl,backendName:"cpu",kernelFunc:T4};const v4=Tt(od,e=>e?0:1,"bool"),N4={kernelName:od,backendName:"cpu",kernelFunc:v4};const C4={kernelName:Nl,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{reductionIndices:i,keepDims:o}=t,a=n;let c=s.shape;const u=c.length,p=ft(i,c);let m=p;const y=_n(m,u);let b=a.data.get(s.dataId).values;if(y!=null){const D=new Array(u);for(let F=0;F`Error in maxPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${p}'`);const m=Wn(i.shape,o,a,p,c,u);let y;if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))y=lc({inputs:{x:i},backend:n});else{const b=n.data.get(i.dataId).values,w=Ot(i.shape),I=cS(b,i.shape,i.dtype,w,m,"max");y=n.makeTensorInfo(m.outShape,i.dtype,I.values)}return y}const O4={kernelName:Cl,backendName:"cpu",kernelFunc:R4};function E4(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o,output:a}=t,c=o;Te([o,a],"maxPoolBackprop");const{filterSize:u,strides:p,pad:m,dimRoundingMode:y}=s,b=Wn(c.shape,u,p,1,m,y),w=n.data.get(c.dataId).values,I=Qe(b.outShape,c.dtype,V0(w,c.shape,c.dtype,b).values),T=b.strideHeight,v=b.strideWidth,N=b.dilationHeight,E=b.dilationWidth,D=b.effectiveFilterHeight,F=b.effectiveFilterWidth,_=F-1-b.padInfo.left,B=D-1-b.padInfo.top,U=Qe(c.shape,"float32"),Y=n.data.get(i.dataId).values,q=Qe(i.shape,"float32",Y);for(let J=0;J=b.outHeight||Math.floor(me)!==me)continue;for(let Ie=0;Ie=b.outWidth||Math.floor(Se)!==Se)continue;const Ee=D*F-1-I.get(J,me,Se,oe),We=ye*F+Ie,Oe=Ee===We?1:0;if(Oe===0)continue;const $e=q.get(J,me,Se,oe);le+=$e*Oe}}U.set(le,J,ce,ue,oe)}return n.makeTensorInfo(U.shape,U.dtype,U.values)}const D4={kernelName:ad,backendName:"cpu",kernelFunc:E4};function k4(e,t,n,s,i){const o=Ot(t),a=cS(e,t,n,o,i,"max"),c=V0(e,t,n,i,!0,s);return[a.values,c.values]}const F4={kernelName:cd,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:i,strides:o,pad:a,includeBatchInIndex:c}=t,u=n;Te(s,"MaxPoolWithArgmax");const p=u.data.get(s.dataId).values,m=Wn(s.shape,i,o,[1,1],a),[y,b]=k4(p,s.shape,s.dtype,c,m),w=u.write(y,m.outShape,s.dtype),I=u.write(b,m.outShape,s.dtype);return[{dataId:w,shape:m.outShape,dtype:s.dtype},{dataId:I,shape:m.outShape,dtype:"int32"}]}};const _4=Lp,W4={kernelName:hd,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,padToMaxOutputSize:u}=n,p=t;Te(s,"NonMaxSuppressionPadded");const m=p.data.get(s.dataId).values,y=p.data.get(i.dataId).values,{selectedIndices:b,validOutputs:w}=_4(m,y,o,a,c,u);return[b,w]}};const $4=Sp,U4={kernelName:ud,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:u}=n,p=t;Te(s,"NonMaxSuppressionWithScore");const m=p.data.get(s.dataId).values,y=p.data.get(i.dataId).values,b=o,w=a,I=c,T=u,{selectedIndices:v,selectedScores:N}=$4(m,y,b,w,I,T);return[v,N]}};const B4=Yo((e,t)=>e!==t?1:0),M4=hc(ld,B4,null,"bool"),P4={kernelName:ld,backendName:"cpu",kernelFunc:M4};function z4(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{paddings:o,constantValue:a}=s;Te(i,"pad");const c=o.map((E,D)=>E[0]+i.shape[D]+E[1]),u=o.map(E=>E[0]),p=n.data.get(i.dataId).values,m=we(i.shape),y=i.shape.length,b=Ot(i.shape),w=we(c),I=c.length,T=Ot(c),v=wn(i.dtype,w);a!==0&&v.fill(a);for(let E=0;EB+u[U]),_=ti(F,I,T);v[_]=p[E]}const N=n.write(v,c,i.dtype);return{dataId:N,shape:c,dtype:i.dtype}}const Y0={kernelName:dd,backendName:"cpu",kernelFunc:z4};const G4=Tt(Ol,e=>1/e),V4={kernelName:Ol,backendName:"cpu",kernelFunc:G4};const H4={kernelName:bd,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,{radians:i,fillValue:o,center:a}=t,c=n,u=wn(s.dtype,we(s.shape)),[p,m,y,b]=s.shape,[w,I]=qb(a,m,y),T=255,v=Math.sin(i),N=Math.cos(i),E=c.data.get(s.dataId).values;for(let F=0;F=0&&he=0&&pe{const t=Math.floor(e);return e-t<.5?Math.floor(e):e-t>.5?Math.ceil(e):t%2===0?t:t+1}),q4={kernelName:Dl,backendName:"cpu",kernelFunc:Y4};const j4=xp,K4=Tp,X4=Tt(Fl,e=>e>=0?K4*e:j4*(Math.exp(e)-1)),J4={kernelName:Fl,backendName:"cpu",kernelFunc:X4};const Z4=Tt($l,e=>1/(1+Math.exp(-e))),Q4={kernelName:$l,backendName:"cpu",kernelFunc:Z4};const ej=Tt(Wl,e=>e<0?-1:e>0?1:0),tj={kernelName:Wl,backendName:"cpu",kernelFunc:ej};const nj=Tt(Ta,e=>Math.sin(e)),sj={kernelName:Ta,backendName:"cpu",kernelFunc:nj};const ij=Tt(_l,e=>Math.sinh(e)),rj={kernelName:_l,backendName:"cpu",kernelFunc:ij};const oj=11920928955078125e-23,q0=Math.log(oj)+2,aj=Tt(Ul,e=>{const t=e>-q0,n=eMath.sqrt(e)),pj={kernelName:Bl,backendName:"cpu",kernelFunc:dj};const mj={kernelName:fd,backendName:"cpu",kernelFunc:({inputs:e,backend:t})=>{const{x:n}=e,s=t;Te(n,"square");const i=s.data.get(n.dataId).values,o=new Float32Array(i.length);for(let c=0;c{const n=e-t;return n*n}),gj=hc(Aa,fj),yj={kernelName:Aa,backendName:"cpu",kernelFunc:gj};const bj=Tt(Gl,(e,t)=>{const n=t;return isNaN(e)?NaN:e>0?1:n.alpha}),wj={kernelName:Gl,backendName:"cpu",kernelFunc:bj};const Lj=Tt(va,e=>Math.tan(e)),Sj={kernelName:va,backendName:"cpu",kernelFunc:Lj};const Ij=Tt(Pl,e=>Math.tanh(e)),xj={kernelName:Pl,backendName:"cpu",kernelFunc:Ij};function Tj(e){const{inputs:t,attrs:n,backend:s}=e,{axis:i}=n,{x:o}=t;Te(o,"unique");const a=s.data.get(o.dataId).values,{outputValues:c,outputShape:u,indices:p}=G0(a,i,o.shape,o.dtype);return[s.makeTensorInfo(u,o.dtype,c),s.makeTensorInfo([p.length],"int32",p)]}const Aj={kernelName:gd,backendName:"cpu",kernelFunc:Tj};const vj=[XY,xq,Aq,nq,Nq,Rq,Eq,kq,_q,$q,Bq,eq,iq,Pq,JY,Vq,Yq,jq,Kq,Jq,Xq,lS,t4,l4,oq,cq,m4,f4,hq,ZY,y4,zq,w4,S4,x4,dq,A4,N4,O4,D4,F4,C4,mq,W4,U4,P4,Y0,QY,V4,Gq,H4,q4,gq,J4,Q4,tj,sj,rj,yq,cj,uj,pj,mj,yj,wj,wq,Sj,xj,lj,Aj];for(const e of vj)Ld(e);const qo={},uS={alpha:!1,antialias:!1,premultipliedAlpha:!1,preserveDrawingBuffer:!1,depth:!1,stencil:!1,failIfMajorPerformanceCaveat:!0};function Nj(e,t){qo[e]=t}function Mi(e){if(!(e in qo)){const n=Rj(e);if(n!==null)qo[e]=n;else return console.log("Could not get context for WebGL version",e),null}const t=qo[e];return t.isContextLost()?(delete qo[e],Mi(e)):(t.disable(t.DEPTH_TEST),t.disable(t.STENCIL_TEST),t.disable(t.BLEND),t.disable(t.DITHER),t.disable(t.POLYGON_OFFSET_FILL),t.disable(t.SAMPLE_COVERAGE),t.enable(t.SCISSOR_TEST),t.enable(t.CULL_FACE),t.cullFace(t.BACK),qo[e])}function Cj(e){if(typeof OffscreenCanvas!="undefined"&&e===2)return new OffscreenCanvas(300,150);if(typeof document!="undefined")return document.createElement("canvas");throw new Error("Cannot create a canvas in this context")}function Rj(e){if(e!==1&&e!==2)throw new Error("Cannot get WebGL rendering context, WebGL is disabled.");const t=Cj(e);return t.addEventListener("webglcontextlost",n=>{n.preventDefault(),delete qo[e]},!1),e===1?t.getContext("webgl",uS)||t.getContext("experimental-webgl",uS):t.getContext("webgl2",uS)}var au;(function(e){e[e.DENSE=0]="DENSE",e[e.SHARED_BATCH=1]="SHARED_BATCH"})(au||(au={}));var Cs;(function(e){e[e.RENDER=0]="RENDER",e[e.UPLOAD=1]="UPLOAD",e[e.PIXELS=2]="PIXELS",e[e.DOWNLOAD=3]="DOWNLOAD"})(Cs||(Cs={}));var In;(function(e){e[e.UNPACKED_FLOAT16=0]="UNPACKED_FLOAT16",e[e.UNPACKED_FLOAT32=1]="UNPACKED_FLOAT32",e[e.PACKED_4X1_UNSIGNED_BYTE=2]="PACKED_4X1_UNSIGNED_BYTE",e[e.PACKED_2X2_FLOAT32=3]="PACKED_2X2_FLOAT32",e[e.PACKED_2X2_FLOAT16=4]="PACKED_2X2_FLOAT16"})(In||(In={}));function cu(e,t){return[t,e]}function Oj(e,t){return e*t}function nte(e,t){return[t*4,e]}function lu(e){const t=we(e),n=Math.ceil(t/4);return Sd(n)}function ste(e,t){if(e%t!==0)throw new Error(`unpackedSize (${e}) must be a multiple of ${t}`);return e/t}function ite(e,t,n){const s=e.length*n/4;if(t.length= ${s}`);let i=0;for(let o=0;oe.getExtension(t),'Extension "'+t+'" not supported on this browser.')}function $j(e,t){const n=ur(e,()=>e.createShader(e.VERTEX_SHADER),"Unable to create vertex WebGLShader.");if(Re(e,()=>e.shaderSource(n,t)),Re(e,()=>e.compileShader(n)),e.getShaderParameter(n,e.COMPILE_STATUS)===!1)throw console.log(e.getShaderInfoLog(n)),new Error("Failed to compile vertex shader.");return n}function Uj(e,t){const n=ur(e,()=>e.createShader(e.FRAGMENT_SHADER),"Unable to create fragment WebGLShader.");if(Re(e,()=>e.shaderSource(n,t)),Re(e,()=>e.compileShader(n)),e.getShaderParameter(n,e.COMPILE_STATUS)===!1)throw Mj(t,e.getShaderInfoLog(n)),new Error("Failed to compile fragment shader.");return n}const Bj=/ERROR: [0-9]+:([0-9]+):/g;function Mj(e,t){const n=Bj.exec(t);if(n==null){console.log(`Couldn't parse line number in error: ${t}`),console.log(e);return}const s=+n[1],i=e.split(` -`),o=i.length.toString().length+2,a=i.map((y,b)=>bo((b+1).toString(),o)+y);let c=0;for(let y=0;yYl(s))}catch(s){throw new Error("Failed to decode encoded string bytes into utf-8")}return Qe(e.shape,e.dtype,n)}makeOutput(e,t,n){const s=this.write(e,t,n);return $s().makeTensorFromDataId(s,t,n,this)}disposeData(e){if(this.data.has(e)){const{complexTensorInfos:t}=this.data.get(e);t!=null&&(this.disposeData(t.real.dataId),this.disposeData(t.imag.dataId)),this.data.delete(e)}}disposeIntermediateTensorInfo(e){const t=e.dataId;if(this.data.has(t)){const n=this.data.get(t);n.refCount--,n.refCount<1&&this.disposeData(t)}}async time(e){const t=qn();e();const n=qn()-t;return{kernelMs:n}}memory(){return{unreliable:!0,reasons:["The reported memory is an upper bound. Due to automatic garbage collection, the true allocated memory may be less."]}}stridedSlice(e,t,n,s){Te(e,"stridedSlice");const i=Wd(t,n,s);if(i.some(c=>c===0))return en([],i);const o=Qe(i,e.dtype),a=this.bufferSync(e);for(let c=0;ca[c]=e.shape[c]-1-a[c]),n.set(s.get(...a),...o)}return n.toTensor()}neg(e){return Te(e,"neg"),X(Ne(-1),e)}addN(e){Te(e,"addN");const t=e.map(i=>this.readSync(i.dataId)),n=Qe(e[0].shape,e[0].dtype),s=n.values;for(let i=0;iMath.pow(n,s))}batchMatMul(e,t,n,s){Te([e,t],"matMul");const i=n?e.shape[1]:e.shape[2],o=n?e.shape[2]:e.shape[1],a=s?t.shape[1]:t.shape[2],c=e.shape[0],u=this.readSync(e.dataId),p=this.readSync(t.dataId),[m,y,b]=n?[e.strides[0],1,e.strides[1]]:[e.strides[0],e.strides[1],1],[w,I,T]=s?[1,t.strides[1],t.strides[0]]:[t.strides[1],1,t.strides[0]],v=o*a,N=Qe([c,o,a],e.dtype),E=N.values,D=this.blockSize;for(let F=0;FMath.floor(i/o),s="int32";return this.broadcastedBinaryOp(e,t,s,n)}sum(e,t){Te(e,"sum"),ss("sum",t,e.rank);const[n,s]=On(e.shape,t),i=Cn(e.dtype,"int32"),o=ct(n,i),a=we(s),c=this.readSync(o.dataId),u=this.readSync(e.dataId);for(let p=0;py&&(y=I,b=w)}c[p]=b}return o}cumsum(e,t,n,s){if(Te(e,"cumsum"),t!==e.rank-1)throw new Error(`backend.cumsum in CPU expects an inner-most axis=${e.rank-1} but got axis=${t}`);const i=Cn(e.dtype,"int32"),o=ct(e.shape,i),a=this.readSync(o.dataId),c=this.readSync(e.dataId),u=e.shape[e.rank-1],p=s?(m,y)=>m+u-y-1:(m,y)=>m+y;for(let m=0;mn===s?1:0)}notEqual(e,t){return Te([e,t],"notEqual"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n!==s?1:0)}less(e,t){return Te([e,t],"less"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>nn<=s?1:0)}greater(e,t){return Te([e,t],"greater"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n>s?1:0)}greaterEqual(e,t){return Te([e,t],"greaterEqual"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n>=s?1:0)}logicalAnd(e,t){return Te([e,t],"logicalAnd"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n&&s)}logicalOr(e,t){return Te([e,t],"logicalOr"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n||s)}select(e,t,n){Te([e,t,n],"select");const s=this.readSync(e.dataId),i=this.readSync(t.dataId),o=this.readSync(n.dataId),a=ct(t.shape,Cn(t.dtype,n.dtype)),c=this.readSync(a.dataId);let u=0;const p=e.rank===0||e.rank>1||t.rank===1?1:we(t.shape.slice(1));for(let m=0;mMath.min(n,s))}mod(e,t){return Te([e,t],"mod"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>{const i=n%s;return n<0&&s<0||n>=0&&s>=0?i:(i+s)%s})}maximum(e,t){return Te([e,t],"maximum"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>Math.max(n,s))}all(e,t){Te(e,"all"),ss("all",t,e.rank);const[n,s]=On(e.shape,t),i=ct(n,e.dtype),o=we(s),a=this.readSync(i.dataId),c=this.readSync(e.dataId);for(let u=0;u{const i=n-s;return i*i})}linear(e){return e}relu(e){Te(e,"relu");const t=ct(e.shape,e.dtype),n=this.readSync(t.dataId),s=this.readSync(e.dataId);for(let i=0;in<0?s*n:n)}eluDer(e,t){Te([e,t],"eluDer");const n=new Float32Array(t.size),s=this.readSync(t.dataId),i=this.readSync(e.dataId);for(let o=0;o=1?n[o]=i[o]:n[o]=i[o]*(a+1)}return this.makeOutput(n,t.shape,"float32")}atan2(e,t){return Te([e,t],"atan2"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>Math.atan2(n,s))}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){let a=this.conv2d(e,t,n);return s&&(a=be(a,s)),i&&(a=iS(this,a,i,o)),a}conv2d(e,t,n){Te([e,t],"conv2d");const s=n.filterHeight,i=n.filterWidth,o=n.dilationHeight,a=n.dilationWidth,c=n.padInfo.left,u=n.padInfo.top,p=n.dataFormat==="channelsLast",m=Qe(n.outShape,e.dtype),y=e.strides[0],b=p?e.strides[1]:e.strides[2],w=p?e.strides[2]:1,I=p?1:e.strides[1],T=m.strides[0],v=p?m.strides[1]:m.strides[2],N=p?m.strides[2]:1,E=p?1:m.strides[1],D=this.readSync(e.dataId),F=this.readSync(t.dataId),_=m.values;for(let B=0;B=n.inHeight)continue;const he=ce*t.strides[0],pe=U+ue*b;for(let le=0;le=n.inWidth)continue;const Ee=he+Ie*t.strides[1],We=pe+Se*w;let Oe=Ee;for(let $e=0;$e=n.inDepth)continue;const Y=B*t.strides[0],q=N+U*e.strides[1];for(let J=0;J=n.inHeight)continue;const pe=Y+ue*t.strides[1],le=q+he*e.strides[2];for(let ye=0;ye=n.inWidth)continue;const We=pe+Se*t.strides[2],Oe=le+Ee*n.inChannels;let $e=We;for(let He=0;He=n.inHeight)continue;const B=F*t.strides[0],U=T+_*e.strides[1];for(let Y=0;Y=n.inWidth)continue;const ue=B+oe*t.strides[1],he=U+ce*n.inChannels;let pe=q,le=ue;for(let ye=0;yep*m),i=Rh(e.shape,t,s),o=Oh(i.length,t.length),a=Eh(e.shape,t,s),c=jb(n,t.length),u=Kb(a,n,t.length);return Pe(e.reshape(i),o).reshape(a).slice(c,u)}pool3d(e,t,n){Te(e,"pool3d");const s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,a=t.dilationDepth,c=t.dilationHeight,u=t.dilationWidth,p=t.effectiveFilterDepth,m=t.effectiveFilterHeight,y=t.effectiveFilterWidth,b=t.padInfo.front,w=t.padInfo.top,I=t.padInfo.left,T=n==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,v=this.readSync(e.dataId),N=Qe(t.outShape,e.dtype),E=N.values,D=t.outShape[1]*t.outShape[2]*t.outShape[3]*t.outShape[4],F=t.outShape[2]*t.outShape[3]*t.outShape[4],_=t.outShape[3]*t.outShape[4],B=t.outShape[4];for(let U=0;Utt?tt=Hs:n==="avg"&&(bt+=Hs,Jt++),isNaN(tt))break}if(isNaN(tt))break}if(isNaN(tt))break}const dn=He+J;E[dn]=n==="avg"?bt/Jt:tt}}}}return N.toTensor()}avgPool3d(e,t){return Te(e,"avgPool3d"),this.pool3d(e,t,"avg").toFloat()}avgPool3dBackprop(e,t,n){Te([e,t],"avgPool3dBackprop");const s=n.strideDepth,i=n.strideHeight,o=n.strideWidth,a=n.filterDepth,c=n.filterHeight,u=n.filterWidth,p=n.dilationDepth,m=n.dilationHeight,y=n.dilationWidth,b=n.effectiveFilterDepth,w=n.effectiveFilterHeight,I=n.effectiveFilterWidth,T=b-1-n.padInfo.front,v=I-1-n.padInfo.left,N=w-1-n.padInfo.top,E=Qe(t.shape,"float32"),D=1/(a*c*u),F=this.bufferSync(e);for(let _=0;_=n.outDepth||Math.floor(pe)!==pe)continue;for(let le=0;le=n.outHeight||Math.floor(ye)!==ye)continue;for(let me=0;me=n.outWidth||Math.floor(Ie)!==Ie)continue;const Se=F.get(_,pe,ye,Ie,B);ue+=Se}}}E.set(ue*D,_,U,Y,q,B)}return E.toTensor()}maxPool3d(e,t){return Te(e,"maxPool3d"),this.pool3d(e,t,"max").toFloat()}maxPool3dPositions(e,t){const n=Qe(t.outShape,"int32"),s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,a=t.dilationDepth,c=t.dilationHeight,u=t.dilationWidth,p=t.effectiveFilterDepth,m=t.effectiveFilterHeight,y=t.effectiveFilterWidth,b=t.padInfo.front,w=t.padInfo.top,I=t.padInfo.left,T=this.bufferSync(e);for(let v=0;v=he&&(he=We,pe=ye*m*y+Ie*m+Ee)}}}n.set(pe,v,E,B,J,N)}}}return n.toTensor()}maxPool3dBackprop(e,t,n,s){Te([t,n],"maxPool3dBackprop");const i=this.maxPool3dPositions(t,s),o=s.strideDepth,a=s.strideHeight,c=s.strideWidth,u=s.dilationDepth,p=s.dilationHeight,m=s.dilationWidth,y=s.effectiveFilterDepth,b=s.effectiveFilterHeight,w=s.effectiveFilterWidth,I=y-1-s.padInfo.front,T=w-1-s.padInfo.left,v=b-1-s.padInfo.top,N=Qe(t.shape,"float32"),E=this.bufferSync(i),D=this.bufferSync(e);for(let F=0;F=s.outDepth||Math.floor(he)!==he)continue;for(let pe=0;pe=s.outHeight||Math.floor(le)!==le)continue;for(let ye=0;ye=s.outWidth||Math.floor(me)!==me)continue;const Ie=y*b*w-1-E.get(F,he,le,me,_),Se=ue*b*w+pe*w+ye,Ee=Ie===Se?1:0;if(Ee===0)continue;const We=D.get(F,he,le,me,_);ce+=We*Ee}}}N.set(ce,F,B,U,Y,_)}return N.toTensor()}resizeBilinear(e,t,n,s){Te(e,"resizeBilinear");const[i,o,a,c]=e.shape,u=this.readSync(e.dataId),p=new Float32Array(we([i,t,n,c])),m=[s&&t>1?o-1:o,s&&n>1?a-1:a],y=[s&&t>1?t-1:t,s&&n>1?n-1:n];let b=0;const w=m[0]/y[0],I=m[1]/y[1];for(let T=0;T1?i-1:i,n&&u>1?o-1:o],y=[n&&c>1?c-1:c,n&&u>1?u-1:u],b=m[0]/y[0],w=m[1]/y[1],I=this.readSync(e.dataId);let T=0;for(let v=0;v1?o-1:o,s&&n>1?a-1:a],y=[s&&t>1?t-1:t,s&&n>1?n-1:n],b=m[0]/y[0],w=m[1]/y[1];let I=0;for(let T=0;T1?i-1:i,n&&u>1?o-1:o],b=[n&&c>1?c-1:c,n&&u>1?u-1:u],w=y[0]/b[0],I=y[1]/b[1],T=1/w,v=1/I,N=Math.ceil(T)*2+2,E=Math.ceil(v)*2+2;for(let D=0;D=c)continue;const ye=F+le*e.strides[1],me=le*w,Ie=Math.min(i-1,n?Math.round(me):Math.floor(me));if(_!==Ie)continue;for(let Se=0;Se=u)continue;const We=ye+Ee*e.strides[2],Oe=Ee*I,$e=Math.min(o-1,n?Math.round(Oe):Math.floor(Oe));q===$e&&(he+=m[We+ue])}}p[J+ue]=he}}}}return Xa(p,t.shape,t.dtype)}localResponseNormalization4D(e,t,n,s,i){Te(e,"localResponseNormalization4D");const o=e.shape[3],a=o-1,c=this.readSync(e.dataId),u=e.size,p=new Float32Array(u);function m(y){const b=y%o;let w=y-b+Math.max(0,b-t);const I=y-b+Math.min(b+t,a);let T=0;for(;w<=I;w++){const v=c[w];T+=v*v}return T}for(let y=0;y=0&&o[a]`Only NHWC dataFormat supported on CPU for depthToSpace. Got ${n}`),k(t>1,()=>`blockSize should be > 1 for depthToSpace, but was: ${t}`);const s=e.shape[0],i=e.shape[1],o=e.shape[2],a=e.shape[3],c=i*t,u=o*t,p=a/(t*t),m=this.readSync(e.dataId),y=new Float32Array(s*c*u*p);let b=0;for(let w=0;wT[D]=0);const v=y.locToIndex(T),N=I.slice(-t.rank);p.forEach(D=>N[D]=0);const E=b.locToIndex(N);m[w]=s(a[v],c[E])}}return o.toTensor()}split(e,t,n){return VY(e,t,n)}dispose(){}floatPrecision(){return 32}epsilon(){return super.epsilon()}cropAndResize(e,t,n,s,i,o){const[a,c,u,p]=e.shape,m=t.shape[0],[y,b]=s,w=Qe([m,y,b,p],"float32"),I=this.readSync(t.dataId),T=this.readSync(n.dataId),v=this.readSync(e.dataId),N=e.strides,E=w.strides;for(let D=0;D=a)continue;const J=y>1?(U-_)*(c-1)/(y-1):0,oe=b>1?(Y-B)*(u-1)/(b-1):0;for(let ce=0;ce1?_*(c-1)+ce*J:.5*(_+U)*(c-1);if(ue<0||ue>c-1){for(let he=0;he1?B*(u-1)+ye*oe:.5*(B+Y)*(u-1);if(me<0||me>u-1){for(let We=0;We1?B*(u-1)+he*oe:.5*(B+Y)*(u-1);if(pe<0||pe>u-1){for(let me=0;me=e.size/a)throw new Error(`Invalid indices: ${b} does not index into ${e.shape}`);for(let I=0;I=s/i)throw new Error(`Invalid indices: ${T} does not index into ${n}`);for(let N=0;N{const{x:t}=e.inputs,n=e.backend;let s=new Float32Array(we(t.shape));if(t.dtype!=="complex64"){const i=n.data.get(t.dataId).values;s=C0(i)}else{const i=n.data.get(t.dataId),o=i.complexTensorInfos.real,a=i.complexTensorInfos.imag,c=n.data.get(o.dataId).values,u=n.data.get(a.dataId).values;for(let p=0;p{const a=nt(t,n),c=a.length,u=Ot(a),p=we(a),m=wn(o,p),y=t.length,b=n.length,w=Ot(t),I=Ot(n),T=Eo(t,a),v=Eo(n,a);if(T.length+v.length===0)for(let N=0;ND[U]=0);const F=ti(D,y,w),_=E.slice(-b);v.forEach(U=>_[U]=0);const B=ti(_,b,I);m[N]=e(s[F],i[B])}return[m,a]}}function pi(e){const{inputs:t,backend:n}=e,{real:s,imag:i}=t,o=n.data.get(s.dataId).values,a=n.data.get(i.dataId).values,c=n.makeTensorInfo(s.shape,"complex64"),u=n.data.get(c.dataId);return u.complexTensorInfos={real:n.makeTensorInfo(s.shape,"float32",o),imag:n.makeTensorInfo(i.shape,"float32",a)},c}const JY={kernelName:Rg,backendName:"cpu",kernelFunc:pi};function uc(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const ZY={kernelName:Sl,backendName:"cpu",kernelFunc:uc};function iu(e){const{inputs:t,backend:n}=e,{input:s}=t,i=n.data.get(s.dataId).complexTensorInfos.real,o=n.data.get(i.dataId).values;return n.makeTensorInfo(i.shape,i.dtype,o)}const QY={kernelName:ey,backendName:"cpu",kernelFunc:iu};function ru(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{dtype:o}=s;if(o==="complex64"){if(i.dtype==="complex64")return uc({inputs:{x:i},backend:n});const a=ct(i.shape),c=ru({inputs:{x:i},backend:n,attrs:{dtype:"float32"}}),u=pi({inputs:{real:c,imag:a},backend:n});return a.dispose(),n.disposeIntermediateTensorInfo(c),u}if(i.dtype==="complex64"){const a=iu({inputs:{input:i},backend:n}),c=ru({inputs:{x:a},backend:n,attrs:{dtype:o}});return n.disposeIntermediateTensorInfo(a),c}if(!xy(i.dtype,o)){const a=uc({inputs:{x:i},backend:n});return{dataId:a.dataId,shape:a.shape,dtype:o}}if(o==="int32"){const a=n.data.get(i.dataId).values,c=Int32Array.from(a);return n.makeTensorInfo(i.shape,"int32",c)}if(o==="bool"){const a=n.data.get(i.dataId).values,c=Dr([0],i.dtype),[u,p]=jo((m,y)=>m!==y?1:0)(i.shape,[],a,c,"bool");return n.makeTensorInfo(p,"bool",u)}throw new Error(`Error in Cast: failed to cast ${i.dtype} to ${o}`)}const eq={kernelName:ul,backendName:"cpu",kernelFunc:ru};function dc(e,t,n,s){return n==null?({inputs:i,backend:o})=>{const{a,b:c}=i,u=o;Te([a,c],e);const p=u.data.get(a.dataId).values,m=u.data.get(c.dataId).values,y=s||a.dtype,[b,w]=t(a.shape,c.shape,p,m,y);return u.makeTensorInfo(w,y,b)}:({inputs:i,backend:o})=>{const{a,b:c}=i,u=o;if(a.dtype==="complex64"||c.dtype==="complex64"){const p=ru({inputs:{x:a},backend:u,attrs:{dtype:"complex64"}}),m=u.data.get(p.dataId),y=m.complexTensorInfos.real,b=m.complexTensorInfos.imag,w=u.data.get(y.dataId).values,I=u.data.get(b.dataId).values,T=ru({inputs:{x:c},backend:u,attrs:{dtype:"complex64"}}),v=u.data.get(T.dataId),N=v.complexTensorInfos.real,E=v.complexTensorInfos.imag,D=u.data.get(N.dataId).values,F=u.data.get(E.dataId).values,[_,B,U]=n(a.shape,c.shape,w,I,D,F),Y=u.makeTensorInfo(U,"float32",_),q=u.makeTensorInfo(U,"float32",B),J=pi({inputs:{real:Y,imag:q},backend:u});return u.disposeIntermediateTensorInfo(p),u.disposeIntermediateTensorInfo(T),u.disposeIntermediateTensorInfo(Y),u.disposeIntermediateTensorInfo(q),J}else{const p=u.data.get(a.dataId).values,m=u.data.get(c.dataId).values,y=s||a.dtype,[b,w]=t(a.shape,c.shape,p,m,y);return u.makeTensorInfo(w,y,b)}}}function rS(e){return(t,n,s,i,o,a)=>{const c=nt(t,n),u=we(c),p=c.length,m=Ot(c),y=wn("float32",u),b=wn("float32",u),w=Eo(t,c),I=Eo(n,c),T=ir(s,i),v=ir(o,a),N=t.length,E=Ot(t),D=n.length,F=Ot(n);if(w.length+I.length===0)for(let _=0;_U[ce]=0);const Y=ti(U,N,E),q=B.slice(-D);I.forEach(ce=>q[ce]=0);const J=ti(q,D,F),oe=e(T[Y*2],T[Y*2+1],v[J*2],v[J*2+1]);y[_]=oe.real,b[_]=oe.imag}return[y,b,c]}}const R0=jo((e,t)=>e+t),tq=rS((e,t,n,s)=>({real:e+n,imag:t+s})),O0=dc(xe,R0,tq),nq={kernelName:xe,backendName:"cpu",kernelFunc:O0};function pc(e){return(t,n,s)=>{const i=wn(n,t.length);for(let o=0;o{const{x:a}=s;if(Te(a,e),a.dtype==="string"||n==="string")throw new Error("unaryKernelFunc does not support string input/output");const c=o,u=c.data.get(a.dataId).values,p=we(a.shape),m=n||a.dtype,y=So(m,p);for(let b=0;b{const{x:a}=s;if(Te(a,e),a.dtype==="string"||n==="string")throw new Error("unaryKernelFunc does not support string input/output");const c=o,u=c.data.get(a.dataId).values,p=n||a.dtype,m=t(u,p,i);return c.makeTensorInfo(a.shape,p,m)}}const E0=pc(e=>Math.ceil(e)),sq=mc(dl,E0),iq={kernelName:dl,backendName:"cpu",kernelFunc:sq};const D0=pc(e=>Math.exp(e)),rq=mc(yl,D0),oq={kernelName:yl,backendName:"cpu",kernelFunc:rq};const k0=pc(e=>Math.expm1(e)),aq=mc(bl,k0),cq={kernelName:bl,backendName:"cpu",kernelFunc:aq};const F0=pc(e=>Math.floor(e)),lq=mc(wl,F0),hq={kernelName:wl,backendName:"cpu",kernelFunc:lq};const _0=pc(e=>Math.log(e)),uq=mc(Al,_0),dq={kernelName:Al,backendName:"cpu",kernelFunc:uq};function W0(e,t,n,s){const i=wn(s,we(n));for(let o=0;oc&&(c=p)}i[o]=c}return i}const $0=jo((e,t)=>e*t),pq=rS((e,t,n,s)=>({real:e*n-t*s,imag:e*s+t*n})),U0=dc(Rl,$0,pq),mq={kernelName:Rl,backendName:"cpu",kernelFunc:U0};const B0=pc(e=>1/Math.sqrt(e)),fq=mc(kl,B0),gq={kernelName:kl,backendName:"cpu",kernelFunc:fq};function M0(e,t,n,s,i){const o=Xy(s,t,n),a=we(n),c=Ot(s);if(o){const p=Jy(t,c);return e.subarray(p,p+a)}const u=wn(i,a);for(let p=0;pT+t[v]),I=ti(w,s.length,c);u[p]=e[I]}return u}function oS(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{begin:o,size:a}=s;Te(i,"slice");const[c,u]=$d(i,o,a);Ky(i,c,u);const p=n.data.get(i.dataId).values,m=M0(p,c,u,i.shape,i.dtype);return n.makeTensorInfo(u,i.dtype,m)}const yq={kernelName:pd,backendName:"cpu",kernelFunc:oS};const P0=jo((e,t)=>e-t),bq=rS((e,t,n,s)=>({real:e-n,imag:t-s})),z0=dc(Ml,P0,bq),wq={kernelName:Ml,backendName:"cpu",kernelFunc:z0};function aS(e,t,n,s,i){const o=t.length,a=we(t),c=Ot(t),u=Ot(i),p=wn(n,we(i));for(let m=0;m{for(let v=0;vnew jY,1);const Iq=Tt(de,e=>Math.acos(e)),xq={kernelName:de,backendName:"cpu",kernelFunc:Iq};const Tq=Tt(Ae,e=>Math.acosh(e)),Aq={kernelName:Ae,backendName:"cpu",kernelFunc:Tq};const vq=Tt(Fn,e=>Math.asin(e)),Nq={kernelName:Fn,backendName:"cpu",kernelFunc:vq};const Cq=Tt(vn,e=>Math.asinh(e)),Rq={kernelName:vn,backendName:"cpu",kernelFunc:Cq};const Oq=Tt(Nn,e=>Math.atan(e)),Eq={kernelName:Nn,backendName:"cpu",kernelFunc:Oq};const Dq=Tt(Qs,e=>Math.atanh(e)),kq={kernelName:Qs,backendName:"cpu",kernelFunc:Dq};function cS(e,t,n,s,i,o){const a=i.strideHeight,c=i.strideWidth,u=i.dilationHeight,p=i.dilationWidth,m=i.effectiveFilterHeight,y=i.effectiveFilterWidth,b=i.padInfo.top,w=i.padInfo.left,I=o==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,T=Qe(i.outShape,n),v=T.values,N=i.outShape[1]*i.outShape[2]*i.outShape[3],E=i.outShape[2]*i.outShape[3],D=i.outShape[3];for(let F=0;Fye?ye=He:o==="avg"&&(me+=He,Ie++)}if(isNaN(ye))break}const Se=ce+ue*D+U;v[Se]=o==="avg"?me/Ie:ye}}}return T}function V0(e,t,n,s,i=!1,o=!1){const a=Qe(s.outShape,"int32"),c=s.strideHeight,u=s.strideWidth,p=s.dilationHeight,m=s.dilationWidth,y=s.effectiveFilterHeight,b=s.effectiveFilterWidth,w=s.padInfo.top,I=s.padInfo.left,T=Qe(t,n,e);for(let v=0;vJ&&(J=le,i?oe=o?((v*s.inHeight+ce)*s.inWidth+he)*s.inChannels+N:(ce*s.inWidth+he)*s.inChannels+N:oe=ue*b+pe)}}a.set(oe,v,E,B,N)}}return a}function Fq(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t;Te(i,"avgPool");const{filterSize:o,strides:a,pad:c,dimRoundingMode:u}=s,p=1;k(on(a,p),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${p}'`);const m=Wn(i.shape,o,a,p,c,u);let y;if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))y=uc({inputs:{x:i},backend:n});else{const b=n.data.get(i.dataId).values,w=Ot(i.shape),I=cS(b,i.shape,i.dtype,w,m,"avg");y=n.makeTensorInfo(m.outShape,i.dtype,I.values)}return y}const _q={kernelName:ei,backendName:"cpu",kernelFunc:Fq};function Wq(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o}=t,a=o;Te([i,o],"avgPoolBackprop");const{filterSize:c,strides:u,pad:p}=s,m=Wn(a.shape,c,u,1,p),y=m.strideHeight,b=m.strideWidth,w=m.filterHeight,I=m.filterWidth,T=m.dilationHeight,v=m.dilationWidth,N=m.effectiveFilterHeight,E=m.effectiveFilterWidth,D=E-1-m.padInfo.left,F=N-1-m.padInfo.top,_=Qe(a.shape,"float32"),B=1/(w*I),U=n.data.get(i.dataId).values,Y=Qe(i.shape,"float32",U);for(let q=0;q=m.outHeight||Math.floor(ye)!==ye)continue;for(let me=0;me=m.outWidth||Math.floor(Ie)!==Ie)continue;const Se=Y.get(q,ye,Ie,J);pe+=Se}}_.set(pe*B,q,oe,ce,J)}return n.makeTensorInfo(_.shape,_.dtype,_.values)}const $q={kernelName:xa,backendName:"cpu",kernelFunc:Wq};function Uq(e){const{inputs:t,backend:n,attrs:s}=e,{x:i,scale:o,offset:a,mean:c,variance:u}=t;k(c.shape.length===u.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),k(a==null||c.shape.length===a.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),k(o==null||c.shape.length===o.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks."),Te([i,c,u,o,a],"batchNorm");let{varianceEpsilon:p}=s;p==null&&(p=.001);const m=n.data.get(i.dataId).values,y=n.data.get(c.dataId).values,b=n.data.get(u.dataId).values,w=o?n.data.get(o.dataId).values:new Float32Array([1]),I=a?n.data.get(a.dataId).values:new Float32Array([0]),T=new Float32Array(m.length),v=I.length,N=w.length,E=b.length,D=y.length;let F=0,_=0,B=0,U=0;for(let Y=0;Y=v&&(F=0),_>=D&&(_=0),B>=N&&(B=0),U>=E&&(U=0);return n.makeTensorInfo(i.shape,i.dtype,T)}const Bq={kernelName:Ll,backendName:"cpu",kernelFunc:Uq};const Mq=Tt(pl,(e,t)=>{const n=t;return e>n.clipValueMax?n.clipValueMax:e`The new shape (${c}) has ${u} elements and the old shape (${i.shape}) has ${a} elements. The new shape and old shape must have the same number of elements.`),n.incRef(i.dataId);const p=n.data.get(i.dataId);if(p.complexTensorInfos!=null){const m=p.complexTensorInfos.real,y=p.complexTensorInfos.imag;m.shape=c,y.shape=c}return{dataId:i.dataId,shape:c,dtype:i.dtype}}const Gq={kernelName:El,backendName:"cpu",kernelFunc:eo};function ou(e){const{inputs:t,backend:n,attrs:s}=e,{axis:i}=s,o=ft(i,t[0].shape)[0];let a=Ur(t.map(w=>w.shape),o);if(we(a)===0)return n.makeTensorInfo(a,t[0].dtype,[]);const c=t.filter(w=>we(w.shape)>0);if(c.length===1)return c[0];const u=c.map(w=>w.shape);if(mb(u,o),c[0].dtype==="complex64"){const w=c.map(E=>iu({inputs:{input:E},backend:n})),I=c.map(E=>dm({inputs:{input:E},backend:n})),T=ou({inputs:w,backend:n,attrs:{axis:i}}),v=ou({inputs:I,backend:n,attrs:{axis:i}}),N=pi({inputs:{real:T,imag:v},backend:n});return w.forEach(E=>n.disposeIntermediateTensorInfo(E)),I.forEach(E=>n.disposeIntermediateTensorInfo(E)),n.disposeIntermediateTensorInfo(T),n.disposeIntermediateTensorInfo(v),N}const p=c.map(w=>{const I=we(w.shape.slice(o)),T=[-1,I];return eo({inputs:{x:w},backend:n,attrs:{shape:T}})});a=Ur(p.map(w=>w.shape),1);const m=wn(c[0].dtype,we(a));if(p[0].shape[0]===1){let w=0;p.forEach(I=>{const T=n.data.get(I.dataId).values,v=we(I.shape);m.set(T,w),w+=v})}else{let w=0;p.forEach(I=>{const T=n.data.get(I.dataId).values;let v=0;for(let N=0;Nw.shape),o),b=n.makeTensorInfo(y,t[0].dtype,m);return p.forEach(w=>n.disposeIntermediateTensorInfo(w)),b}const Vq={kernelName:td,backendName:"cpu",kernelFunc:ou};const Hq=Tt(Ta,e=>Math.cos(e)),Yq={kernelName:Ta,backendName:"cpu",kernelFunc:Hq};const qq=Tt(ml,e=>Math.cosh(e)),jq={kernelName:ml,backendName:"cpu",kernelFunc:qq};const Kq={kernelName:nd,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i}=e,{strides:o,pad:a,dilations:c}=n,u=t,p=u.data.get(s.dataId).values,m=s.shape.length,y=u.data.get(i.dataId).values,b=i.shape.length,{batchSize:w,inHeight:I,inWidth:T,inChannels:v,outHeight:N,outWidth:E,padInfo:D,strideHeight:F,strideWidth:_,filterHeight:B,filterWidth:U,dilationHeight:Y,dilationWidth:q,outShape:J}=zd(s.shape,i.shape,o,a,"NHWC",c),oe=we(J),ce=J.length,ue=So(s.dtype,oe);for(let pe=0;pe=0&&$e=0&&ttEe&&(Ee=dn)}}}const We=ti([pe,le,me,Se],ce,Ot(J));ue[We]=Ee}}}const he=u.write(Dr(ue,s.dtype),J,s.dtype);return{dataId:he,shape:J,dtype:s.dtype}}};const Xq={kernelName:id,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i,dy:o}=e,{strides:a,pad:c,dilations:u}=n,p=t,m=Ls(s.shape,p.data.get(s.dataId).values),y=Ls(i.shape,p.data.get(i.dataId).values),{batchSize:b,inHeight:w,inWidth:I,inChannels:T,outHeight:v,outWidth:N,padInfo:E,strideHeight:D,strideWidth:F,filterHeight:_,filterWidth:B,dilationHeight:U,dilationWidth:Y,outShape:q}=zd(s.shape,i.shape,a,c,"NHWC",u);k(o.rank===q.length,()=>`Error in ${id}, dy must have the same rank as output ${q.length}, but got ${o.rank}`);const J=Ls(q,p.data.get(o.dataId).values),oe=vy(i.shape,i.dtype);for(let ue=0;ue=0&&Oe=0&&HeIe&&(Ie=tt,Se=We,Ee=$e)}}}oe[Se][Ee][me]+=J[ue][he][le][me]}}}const ce=p.write(Dr(oe,s.dtype),i.shape,i.dtype);return{dataId:ce,shape:i.shape,dtype:i.dtype}}};const Jq={kernelName:sd,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i,dy:o}=e,{strides:a,pad:c,dilations:u}=n,p=t,m=Ls(s.shape,p.data.get(s.dataId).values),y=Ls(i.shape,p.data.get(i.dataId).values),{batchSize:b,inHeight:w,inWidth:I,inChannels:T,outHeight:v,outWidth:N,padInfo:E,strideHeight:D,strideWidth:F,filterHeight:_,filterWidth:B,dilationHeight:U,dilationWidth:Y,outShape:q}=zd(s.shape,i.shape,a,c,"NHWC",u);k(o.rank===q.length,()=>`Error in ${sd}, dy must have the same rank as output ${q.length}, but got ${o.rank}`);const J=Ls(q,p.data.get(o.dataId).values),oe=vy(s.shape,s.dtype);for(let ue=0;ue=0&&Oe=0&&HeIe&&(Ie=tt,Se=Oe,Ee=He)}}}oe[ue][Se][Ee][me]+=J[ue][he][le][me]}}}const ce=p.write(Dr(oe,s.dtype),s.shape,s.dtype);return{dataId:ce,shape:s.shape,dtype:s.dtype}}};const Zq=jo((e,t)=>e/t),Qq=dc(Aa,Zq),lS={kernelName:Aa,backendName:"cpu",kernelFunc:Qq};const e4=Tt(fl,e=>e>=0?e:Math.exp(e)-1),t4={kernelName:fl,backendName:"cpu",kernelFunc:e4};const n4=Xb,s4=Jb,i4=Zb,r4=Qb,o4=ew,a4=tw,c4=Tt(gl,e=>{const t=Math.sign(e),n=Math.abs(e),s=1/(1+n4*n);return t*(1-((((a4*s+o4)*s+r4)*s+i4)*s+s4)*s*Math.exp(-n*n))}),l4={kernelName:gl,backendName:"cpu",kernelFunc:c4};function H0(e,t,n){const s=e.shape,i=s[0],o=s[1],a=n.data.get(e.dataId),c=a.complexTensorInfos.real,u=a.complexTensorInfos.imag,p=[i,o],m=we(p),y=wn("float32",m),b=wn("float32",m);for(let v=0;v{const{image:s}=e,i=n,o=wn(s.dtype,we(s.shape)),[a,c,u,p]=s.shape,m=i.data.get(s.dataId).values;for(let b=0;b=0&&_Number.isFinite(e)?1:0,"bool"),w4={kernelName:Il,backendName:"cpu",kernelFunc:b4};const L4=Tt(xl,e=>Math.abs(e)===Infinity?1:0,"bool"),S4={kernelName:xl,backendName:"cpu",kernelFunc:L4};const I4=Tt(Tl,e=>Number.isNaN(e)?1:0,"bool"),x4={kernelName:Tl,backendName:"cpu",kernelFunc:I4};const T4=Tt(vl,e=>Math.log1p(e)),A4={kernelName:vl,backendName:"cpu",kernelFunc:T4};const v4=Tt(od,e=>e?0:1,"bool"),N4={kernelName:od,backendName:"cpu",kernelFunc:v4};const C4={kernelName:Nl,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{reductionIndices:i,keepDims:o}=t,a=n;let c=s.shape;const u=c.length,p=ft(i,c);let m=p;const y=_n(m,u);let b=a.data.get(s.dataId).values;if(y!=null){const D=new Array(u);for(let F=0;F`Error in maxPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${p}'`);const m=Wn(i.shape,o,a,p,c,u);let y;if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))y=uc({inputs:{x:i},backend:n});else{const b=n.data.get(i.dataId).values,w=Ot(i.shape),I=cS(b,i.shape,i.dtype,w,m,"max");y=n.makeTensorInfo(m.outShape,i.dtype,I.values)}return y}const O4={kernelName:Cl,backendName:"cpu",kernelFunc:R4};function E4(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o,output:a}=t,c=o;Te([o,a],"maxPoolBackprop");const{filterSize:u,strides:p,pad:m,dimRoundingMode:y}=s,b=Wn(c.shape,u,p,1,m,y),w=n.data.get(c.dataId).values,I=Qe(b.outShape,c.dtype,V0(w,c.shape,c.dtype,b).values),T=b.strideHeight,v=b.strideWidth,N=b.dilationHeight,E=b.dilationWidth,D=b.effectiveFilterHeight,F=b.effectiveFilterWidth,_=F-1-b.padInfo.left,B=D-1-b.padInfo.top,U=Qe(c.shape,"float32"),Y=n.data.get(i.dataId).values,q=Qe(i.shape,"float32",Y);for(let J=0;J=b.outHeight||Math.floor(me)!==me)continue;for(let Ie=0;Ie=b.outWidth||Math.floor(Se)!==Se)continue;const Ee=D*F-1-I.get(J,me,Se,oe),We=ye*F+Ie,Oe=Ee===We?1:0;if(Oe===0)continue;const $e=q.get(J,me,Se,oe);le+=$e*Oe}}U.set(le,J,ce,ue,oe)}return n.makeTensorInfo(U.shape,U.dtype,U.values)}const D4={kernelName:ad,backendName:"cpu",kernelFunc:E4};function k4(e,t,n,s,i){const o=Ot(t),a=cS(e,t,n,o,i,"max"),c=V0(e,t,n,i,!0,s);return[a.values,c.values]}const F4={kernelName:cd,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:i,strides:o,pad:a,includeBatchInIndex:c}=t,u=n;Te(s,"MaxPoolWithArgmax");const p=u.data.get(s.dataId).values,m=Wn(s.shape,i,o,[1,1],a),[y,b]=k4(p,s.shape,s.dtype,c,m),w=u.write(y,m.outShape,s.dtype),I=u.write(b,m.outShape,s.dtype);return[{dataId:w,shape:m.outShape,dtype:s.dtype},{dataId:I,shape:m.outShape,dtype:"int32"}]}};const _4=Lp,W4={kernelName:hd,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,padToMaxOutputSize:u}=n,p=t;Te(s,"NonMaxSuppressionPadded");const m=p.data.get(s.dataId).values,y=p.data.get(i.dataId).values,{selectedIndices:b,validOutputs:w}=_4(m,y,o,a,c,u);return[b,w]}};const $4=Sp,U4={kernelName:ud,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:u}=n,p=t;Te(s,"NonMaxSuppressionWithScore");const m=p.data.get(s.dataId).values,y=p.data.get(i.dataId).values,b=o,w=a,I=c,T=u,{selectedIndices:v,selectedScores:N}=$4(m,y,b,w,I,T);return[v,N]}};const B4=jo((e,t)=>e!==t?1:0),M4=dc(ld,B4,null,"bool"),P4={kernelName:ld,backendName:"cpu",kernelFunc:M4};function z4(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{paddings:o,constantValue:a}=s;Te(i,"pad");const c=o.map((E,D)=>E[0]+i.shape[D]+E[1]),u=o.map(E=>E[0]),p=n.data.get(i.dataId).values,m=we(i.shape),y=i.shape.length,b=Ot(i.shape),w=we(c),I=c.length,T=Ot(c),v=wn(i.dtype,w);a!==0&&v.fill(a);for(let E=0;EB+u[U]),_=ti(F,I,T);v[_]=p[E]}const N=n.write(v,c,i.dtype);return{dataId:N,shape:c,dtype:i.dtype}}const Y0={kernelName:dd,backendName:"cpu",kernelFunc:z4};const G4=Tt(Ol,e=>1/e),V4={kernelName:Ol,backendName:"cpu",kernelFunc:G4};const H4={kernelName:bd,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,{radians:i,fillValue:o,center:a}=t,c=n,u=wn(s.dtype,we(s.shape)),[p,m,y,b]=s.shape,[w,I]=qb(a,m,y),T=255,v=Math.sin(i),N=Math.cos(i),E=c.data.get(s.dataId).values;for(let F=0;F=0&&he=0&&pe{const t=Math.floor(e);return e-t<.5?Math.floor(e):e-t>.5?Math.ceil(e):t%2===0?t:t+1}),q4={kernelName:Dl,backendName:"cpu",kernelFunc:Y4};const j4=xp,K4=Tp,X4=Tt(Fl,e=>e>=0?K4*e:j4*(Math.exp(e)-1)),J4={kernelName:Fl,backendName:"cpu",kernelFunc:X4};const Z4=Tt($l,e=>1/(1+Math.exp(-e))),Q4={kernelName:$l,backendName:"cpu",kernelFunc:Z4};const ej=Tt(Wl,e=>e<0?-1:e>0?1:0),tj={kernelName:Wl,backendName:"cpu",kernelFunc:ej};const nj=Tt(va,e=>Math.sin(e)),sj={kernelName:va,backendName:"cpu",kernelFunc:nj};const ij=Tt(_l,e=>Math.sinh(e)),rj={kernelName:_l,backendName:"cpu",kernelFunc:ij};const oj=11920928955078125e-23,q0=Math.log(oj)+2,aj=Tt(Ul,e=>{const t=e>-q0,n=eMath.sqrt(e)),pj={kernelName:Bl,backendName:"cpu",kernelFunc:dj};const mj={kernelName:fd,backendName:"cpu",kernelFunc:({inputs:e,backend:t})=>{const{x:n}=e,s=t;Te(n,"square");const i=s.data.get(n.dataId).values,o=new Float32Array(i.length);for(let c=0;c{const n=e-t;return n*n}),gj=dc(Na,fj),yj={kernelName:Na,backendName:"cpu",kernelFunc:gj};const bj=Tt(Gl,(e,t)=>{const n=t;return isNaN(e)?NaN:e>0?1:n.alpha}),wj={kernelName:Gl,backendName:"cpu",kernelFunc:bj};const Lj=Tt(Ca,e=>Math.tan(e)),Sj={kernelName:Ca,backendName:"cpu",kernelFunc:Lj};const Ij=Tt(Pl,e=>Math.tanh(e)),xj={kernelName:Pl,backendName:"cpu",kernelFunc:Ij};function Tj(e){const{inputs:t,attrs:n,backend:s}=e,{axis:i}=n,{x:o}=t;Te(o,"unique");const a=s.data.get(o.dataId).values,{outputValues:c,outputShape:u,indices:p}=G0(a,i,o.shape,o.dtype);return[s.makeTensorInfo(u,o.dtype,c),s.makeTensorInfo([p.length],"int32",p)]}const Aj={kernelName:gd,backendName:"cpu",kernelFunc:Tj};const vj=[XY,xq,Aq,nq,Nq,Rq,Eq,kq,_q,$q,Bq,eq,iq,Pq,JY,Vq,Yq,jq,Kq,Jq,Xq,lS,t4,l4,oq,cq,m4,f4,hq,ZY,y4,zq,w4,S4,x4,dq,A4,N4,O4,D4,F4,C4,mq,W4,U4,P4,Y0,QY,V4,Gq,H4,q4,gq,J4,Q4,tj,sj,rj,yq,cj,uj,pj,mj,yj,wj,wq,Sj,xj,lj,Aj];for(const e of vj)Ld(e);const Ko={},uS={alpha:!1,antialias:!1,premultipliedAlpha:!1,preserveDrawingBuffer:!1,depth:!1,stencil:!1,failIfMajorPerformanceCaveat:!0};function Nj(e,t){Ko[e]=t}function Mi(e){if(!(e in Ko)){const n=Rj(e);if(n!==null)Ko[e]=n;else return console.log("Could not get context for WebGL version",e),null}const t=Ko[e];return t.isContextLost()?(delete Ko[e],Mi(e)):(t.disable(t.DEPTH_TEST),t.disable(t.STENCIL_TEST),t.disable(t.BLEND),t.disable(t.DITHER),t.disable(t.POLYGON_OFFSET_FILL),t.disable(t.SAMPLE_COVERAGE),t.enable(t.SCISSOR_TEST),t.enable(t.CULL_FACE),t.cullFace(t.BACK),Ko[e])}function Cj(e){if(typeof OffscreenCanvas!="undefined"&&e===2)return new OffscreenCanvas(300,150);if(typeof document!="undefined")return document.createElement("canvas");throw new Error("Cannot create a canvas in this context")}function Rj(e){if(e!==1&&e!==2)throw new Error("Cannot get WebGL rendering context, WebGL is disabled.");const t=Cj(e);return t.addEventListener("webglcontextlost",n=>{n.preventDefault(),delete Ko[e]},!1),e===1?t.getContext("webgl",uS)||t.getContext("experimental-webgl",uS):t.getContext("webgl2",uS)}var au;(function(e){e[e.DENSE=0]="DENSE",e[e.SHARED_BATCH=1]="SHARED_BATCH"})(au||(au={}));var Cs;(function(e){e[e.RENDER=0]="RENDER",e[e.UPLOAD=1]="UPLOAD",e[e.PIXELS=2]="PIXELS",e[e.DOWNLOAD=3]="DOWNLOAD"})(Cs||(Cs={}));var In;(function(e){e[e.UNPACKED_FLOAT16=0]="UNPACKED_FLOAT16",e[e.UNPACKED_FLOAT32=1]="UNPACKED_FLOAT32",e[e.PACKED_4X1_UNSIGNED_BYTE=2]="PACKED_4X1_UNSIGNED_BYTE",e[e.PACKED_2X2_FLOAT32=3]="PACKED_2X2_FLOAT32",e[e.PACKED_2X2_FLOAT16=4]="PACKED_2X2_FLOAT16"})(In||(In={}));function cu(e,t){return[t,e]}function Oj(e,t){return e*t}function nte(e,t){return[t*4,e]}function lu(e){const t=we(e),n=Math.ceil(t/4);return Sd(n)}function ste(e,t){if(e%t!==0)throw new Error(`unpackedSize (${e}) must be a multiple of ${t}`);return e/t}function ite(e,t,n){const s=e.length*n/4;if(t.length= ${s}`);let i=0;for(let o=0;oe.getExtension(t),'Extension "'+t+'" not supported on this browser.')}function $j(e,t){const n=ur(e,()=>e.createShader(e.VERTEX_SHADER),"Unable to create vertex WebGLShader.");if(Re(e,()=>e.shaderSource(n,t)),Re(e,()=>e.compileShader(n)),e.getShaderParameter(n,e.COMPILE_STATUS)===!1)throw console.log(e.getShaderInfoLog(n)),new Error("Failed to compile vertex shader.");return n}function Uj(e,t){const n=ur(e,()=>e.createShader(e.FRAGMENT_SHADER),"Unable to create fragment WebGLShader.");if(Re(e,()=>e.shaderSource(n,t)),Re(e,()=>e.compileShader(n)),e.getShaderParameter(n,e.COMPILE_STATUS)===!1)throw Mj(t,e.getShaderInfoLog(n)),new Error("Failed to compile fragment shader.");return n}const Bj=/ERROR: [0-9]+:([0-9]+):/g;function Mj(e,t){const n=Bj.exec(t);if(n==null){console.log(`Couldn't parse line number in error: ${t}`),console.log(e);return}const s=+n[1],i=e.split(` +`),o=i.length.toString().length+2,a=i.map((y,b)=>Lo((b+1).toString(),o)+y);let c=0;for(let y=0;ye.createProgram(),"Unable to create WebGLProgram.")}function zj(e,t){if(Re(e,()=>e.linkProgram(t)),e.getProgramParameter(t,e.LINK_STATUS)===!1)throw console.log(e.getProgramInfoLog(t)),new Error("Failed to link vertex and fragment shaders.")}function pS(e,t){if(Re(e,()=>e.validateProgram(t)),e.getProgramParameter(t,e.VALIDATE_STATUS)===!1)throw console.log(e.getProgramInfoLog(t)),new Error("Shader program validation failed.")}function Gj(e,t){const n=ur(e,()=>e.createBuffer(),"Unable to create WebGLBuffer");return Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,n)),Re(e,()=>e.bufferData(e.ARRAY_BUFFER,t,e.STATIC_DRAW)),n}function Vj(e,t){const n=ur(e,()=>e.createBuffer(),"Unable to create WebGLBuffer");return Re(e,()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,n)),Re(e,()=>e.bufferData(e.ELEMENT_ARRAY_BUFFER,t,e.STATIC_DRAW)),n}function rte(){return C().getNumber("WEBGL_VERSION")===2?1:4}function Hj(e){return ur(e,()=>e.createTexture(),"Unable to create WebGLTexture.")}function Yj(e,t){const n=C().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(e<=0||t<=0){const s=`[${e}x${t}]`;throw new Error("Requested texture size "+s+" is invalid.")}if(e>n||t>n){const s=`[${e}x${t}]`,i=`[${n}x${n}]`;throw new Error("Requested texture size "+s+" greater than WebGL maximum on this browser / GPU "+i+".")}}function qj(e){return ur(e,()=>e.createFramebuffer(),"Unable to create WebGLFramebuffer.")}function K0(e,t,n,s,i,o,a){const c=e.getAttribLocation(t,n);return c===-1?!1:(Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,s)),Re(e,()=>e.vertexAttribPointer(c,i,e.FLOAT,!1,o,a)),Re(e,()=>e.enableVertexAttribArray(c)),!0)}function jj(e,t,n){J0(e,n),Re(e,()=>e.activeTexture(e.TEXTURE0+n)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,t))}function ote(e,t){J0(e,t),Re(e,()=>e.activeTexture(e.TEXTURE0+t)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function Kj(e,t,n){return ur(e,()=>e.getUniformLocation(t,n),'uniform "'+n+'" not present in program.')}function Xj(e,t,n){return e.getUniformLocation(t,n)}function Jj(e,t,n,s){Re(e,()=>jj(e,t,s)),Re(e,()=>e.uniform1i(n,s))}function ate(e){Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,null)),Re(e,()=>e.viewport(0,0,e.canvas.width,e.canvas.height)),Re(e,()=>e.scissor(0,0,e.canvas.width,e.canvas.height))}function mS(e,t,n){Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,n)),Re(e,()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,t,0))}function X0(e,t){Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,t)),Re(e,()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,null,0))}function mm(e){const t=e.checkFramebufferStatus(e.FRAMEBUFFER);if(t!==e.FRAMEBUFFER_COMPLETE)throw new Error("Error binding framebuffer: "+Zj(e,t))}function Zj(e,t){switch(t){case e.FRAMEBUFFER_INCOMPLETE_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_DIMENSIONS:return"FRAMEBUFFER_INCOMPLETE_DIMENSIONS";case e.FRAMEBUFFER_UNSUPPORTED:return"FRAMEBUFFER_UNSUPPORTED";default:return`unknown error ${t}`}}function ur(e,t,n){const s=Re(e,()=>t());if(s==null)throw new Error(n);return s}function J0(e,t){const n=e.MAX_COMBINED_TEXTURE_IMAGE_UNITS-1,s=t+e.TEXTURE0;if(sn){const i=`[gl.TEXTURE0, gl.TEXTURE${n}]`;throw new Error(`textureUnit must be in ${i}.`)}}function mc(e,t=2){return we(e.slice(0,e.length-t))}function fc(e){if(e.length===0)throw Error("Cannot get rows and columns of an empty shape array.");return[e.length>1?e[e.length-2]:1,e[e.length-1]]}function fS(e){let t=[1,1,1];const n=e.length===0||e.length===1&&e[0]===1;return n||(t=[mc(e),...fc(e)]),t}function Qj(e,t=!1){let n=C().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(t&&(n=n*2,e=e.map((i,o)=>o>=e.length-2?Sy(e[o]):e[o]),e.length===1&&(e=[2,e[0]])),e.length!==2){const i=Rr(e);e=i.newShape}let s=we(e);if(e.length<=1&&s<=n)return[1,s];if(e.length===2&&e[0]<=n&&e[1]<=n)return e;if(e.length===3&&e[0]*e[1]<=n&&e[2]<=n)return[e[0]*e[1],e[2]];if(e.length===3&&e[0]<=n&&e[1]*e[2]<=n)return[e[0],e[1]*e[2]];if(e.length===4&&e[0]*e[1]*e[2]<=n&&e[3]<=n)return[e[0]*e[1]*e[2],e[3]];if(e.length===4&&e[0]<=n&&e[1]*e[2]*e[3]<=n)return[e[0],e[1]*e[2]*e[3]];if(t){const i=mc(e);let o=2,a=2;return e.length&&([o,a]=fc(e)),s=i*(o/2)*(a/2),Sd(s).map(c=>c*2)}return Sd(s)}function fm(e){return e%2===0}function gm(e,t){if(e=e.slice(-2),t=t.slice(-2),ot(e,t))return!0;if(!e.length||!t.length)return!0;if(e[0]===0||e[1]===0||t[0]===0||t[1]===0)return!0;if(e.length!==t.length){const n=e.slice(-1)[0],s=t.slice(-1)[0];if(n===s)return!0;if(fm(n)&&fm(s)&&(e[0]===1||t[0]===1))return!0}return e[1]===t[1]&&fm(e[0])&&fm(t[0])}let ym,bm;function eK(e){if(ym==null){const t=Mi(e);ym=t.getParameter(t.MAX_TEXTURE_SIZE)}return ym}function cte(){ym=null}function lte(){bm=null}function tK(e){if(bm==null){const t=Mi(e);bm=t.getParameter(t.MAX_TEXTURE_IMAGE_UNITS)}return Math.min(16,bm)}function nK(e){if(e===0)return 0;let t;const n=Mi(e);return Vs(n,"EXT_disjoint_timer_query_webgl2")&&e===2?t=2:Vs(n,"EXT_disjoint_timer_query")?t=1:t=0,t}function Vs(e,t){const n=e.getExtension(t);return n!=null}function Z0(e){try{const t=Mi(e);if(t!=null)return!0}catch(t){return console.log("Error when getting WebGL context: ",t),!1}return!1}function sK(e){if(e===0)return!1;const t=Mi(e);if(e===1){if(!Vs(t,"OES_texture_float"))return!1}else if(!Vs(t,"EXT_color_buffer_float"))return!1;const n=gS(t);return n}function iK(e){if(e===0)return!1;const t=Mi(e);if(e===1){if(!Vs(t,"OES_texture_float"))return!1;if(!Vs(t,"WEBGL_color_buffer_float"))return!1}else{if(Vs(t,"EXT_color_buffer_float"))return gS(t);const s="EXT_color_buffer_half_float";if(Vs(t,s)){const i=t.getExtension(s);return rK(t,i)}return!1}const n=gS(t);return n}function gS(e){const t=dS(e),n=e.createTexture();e.bindTexture(e.TEXTURE_2D,n);const s=1,i=1;e.texImage2D(e.TEXTURE_2D,0,t.internalFormatFloat,s,i,0,t.textureFormatFloat,t.textureTypeFloat,null);const o=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,o),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,n,0);const a=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(n),e.deleteFramebuffer(o),a}function rK(e,t){const n=dS(e,t),s=e.createTexture();e.bindTexture(e.TEXTURE_2D,s);const i=1,o=1;e.texImage2D(e.TEXTURE_2D,0,n.internalFormatHalfFloat,i,o,0,n.textureFormatFloat,n.textureTypeHalfFloat,null);const a=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,a),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,s,0);const c=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(s),e.deleteFramebuffer(a),c}function oK(e){if(e!==2)return!1;const t=Mi(e),n=t.fenceSync!=null;return n}function hu(e,t){Array.isArray(e)||(e=[e]),e.forEach(n=>{n!=null&&k(n.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the WebGL backend.`)})}const Ve=C();Ve.registerFlag("HAS_WEBGL",()=>Ve.getNumber("WEBGL_VERSION")>0),Ve.registerFlag("WEBGL_VERSION",()=>Z0(2)?2:Z0(1)?1:0),Ve.registerFlag("WEBGL_CHECK_NUMERICAL_PROBLEMS",()=>!1),Ve.registerFlag("WEBGL_BUFFER_SUPPORTED",()=>Ve.get("WEBGL_VERSION")===2),Ve.registerFlag("WEBGL_CPU_FORWARD",()=>!0),Ve.registerFlag("WEBGL_FORCE_F16_TEXTURES",()=>!1),Ve.registerFlag("WEBGL_PACK",()=>Ve.getBool("HAS_WEBGL")),Ve.registerFlag("WEBGL_PACK_NORMALIZATION",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_CLIP",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_DEPTHWISECONV",()=>!1),Ve.registerFlag("WEBGL_PACK_BINARY_OPERATIONS",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_UNARY_OPERATIONS",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_ARRAY_OPERATIONS",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_IMAGE_OPERATIONS",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_REDUCE",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_LAZILY_UNPACK",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_CONV_IM2COL",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_MAX_TEXTURE_SIZE",()=>eK(Ve.getNumber("WEBGL_VERSION"))),Ve.registerFlag("WEBGL_MAX_TEXTURES_IN_SHADER",()=>tK(Ve.getNumber("WEBGL_VERSION"))),Ve.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION",()=>{const e=Ve.getNumber("WEBGL_VERSION");return e===0?0:nK(e)}),Ve.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE",()=>Ve.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0&&!wT()),Ve.registerFlag("WEBGL_RENDER_FLOAT32_CAPABLE",()=>sK(Ve.getNumber("WEBGL_VERSION"))),Ve.registerFlag("WEBGL_RENDER_FLOAT32_ENABLED",()=>Ve.getBool("WEBGL_FORCE_F16_TEXTURES")?!1:Ve.getBool("WEBGL_RENDER_FLOAT32_CAPABLE")),Ve.registerFlag("WEBGL_DOWNLOAD_FLOAT_ENABLED",()=>iK(Ve.getNumber("WEBGL_VERSION"))),Ve.registerFlag("WEBGL_FENCE_API_ENABLED",()=>oK(Ve.getNumber("WEBGL_VERSION"))),Ve.registerFlag("WEBGL_SIZE_UPLOAD_UNIFORM",()=>{const e=Ve.getBool("WEBGL_RENDER_FLOAT32_ENABLED");return e?4:0}),Ve.registerFlag("WEBGL_DELETE_TEXTURE_THRESHOLD",()=>-1,e=>{if(e<0&&e!==-1)throw new Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be -1 (indicating never delete) or at least 0, but got ${e}.`)});const{simpleAbsImpl:aK,addImpl:cK,ceilImpl:lK,expImpl:hK,expm1Impl:uK,floorImpl:dK,logImpl:pK,maxImpl:mK,multiplyImpl:fK,rsqrtImpl:gK,sliceImpl:yK,subImpl:bK,transposeImpl:Q0,uniqueImpl:wK}=Lq;class LK{constructor(e,t){this.outputShape=[],this.outputShape=e,this.variableNames=t.map((i,o)=>`T${o}`);const n=[];this.variableNames.forEach(i=>{n.push(`float v${i} = get${i}AtOutCoords();`)});const s=this.variableNames.map(i=>`v${i}`).join(" + ");this.userCode=` +`)[0]),console.log(`%c ${Lo(p[0],c)}`,"border:1px solid red; background-color:#e3d2d2; color:#a61717"),console.log(m.join(` +`))}function Pj(e){return ur(e,()=>e.createProgram(),"Unable to create WebGLProgram.")}function zj(e,t){if(Re(e,()=>e.linkProgram(t)),e.getProgramParameter(t,e.LINK_STATUS)===!1)throw console.log(e.getProgramInfoLog(t)),new Error("Failed to link vertex and fragment shaders.")}function pS(e,t){if(Re(e,()=>e.validateProgram(t)),e.getProgramParameter(t,e.VALIDATE_STATUS)===!1)throw console.log(e.getProgramInfoLog(t)),new Error("Shader program validation failed.")}function Gj(e,t){const n=ur(e,()=>e.createBuffer(),"Unable to create WebGLBuffer");return Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,n)),Re(e,()=>e.bufferData(e.ARRAY_BUFFER,t,e.STATIC_DRAW)),n}function Vj(e,t){const n=ur(e,()=>e.createBuffer(),"Unable to create WebGLBuffer");return Re(e,()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,n)),Re(e,()=>e.bufferData(e.ELEMENT_ARRAY_BUFFER,t,e.STATIC_DRAW)),n}function rte(){return C().getNumber("WEBGL_VERSION")===2?1:4}function Hj(e){return ur(e,()=>e.createTexture(),"Unable to create WebGLTexture.")}function Yj(e,t){const n=C().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(e<=0||t<=0){const s=`[${e}x${t}]`;throw new Error("Requested texture size "+s+" is invalid.")}if(e>n||t>n){const s=`[${e}x${t}]`,i=`[${n}x${n}]`;throw new Error("Requested texture size "+s+" greater than WebGL maximum on this browser / GPU "+i+".")}}function qj(e){return ur(e,()=>e.createFramebuffer(),"Unable to create WebGLFramebuffer.")}function K0(e,t,n,s,i,o,a){const c=e.getAttribLocation(t,n);return c===-1?!1:(Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,s)),Re(e,()=>e.vertexAttribPointer(c,i,e.FLOAT,!1,o,a)),Re(e,()=>e.enableVertexAttribArray(c)),!0)}function jj(e,t,n){J0(e,n),Re(e,()=>e.activeTexture(e.TEXTURE0+n)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,t))}function ote(e,t){J0(e,t),Re(e,()=>e.activeTexture(e.TEXTURE0+t)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function Kj(e,t,n){return ur(e,()=>e.getUniformLocation(t,n),'uniform "'+n+'" not present in program.')}function Xj(e,t,n){return e.getUniformLocation(t,n)}function Jj(e,t,n,s){Re(e,()=>jj(e,t,s)),Re(e,()=>e.uniform1i(n,s))}function ate(e){Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,null)),Re(e,()=>e.viewport(0,0,e.canvas.width,e.canvas.height)),Re(e,()=>e.scissor(0,0,e.canvas.width,e.canvas.height))}function mS(e,t,n){Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,n)),Re(e,()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,t,0))}function X0(e,t){Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,t)),Re(e,()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,null,0))}function mm(e){const t=e.checkFramebufferStatus(e.FRAMEBUFFER);if(t!==e.FRAMEBUFFER_COMPLETE)throw new Error("Error binding framebuffer: "+Zj(e,t))}function Zj(e,t){switch(t){case e.FRAMEBUFFER_INCOMPLETE_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_DIMENSIONS:return"FRAMEBUFFER_INCOMPLETE_DIMENSIONS";case e.FRAMEBUFFER_UNSUPPORTED:return"FRAMEBUFFER_UNSUPPORTED";default:return`unknown error ${t}`}}function ur(e,t,n){const s=Re(e,()=>t());if(s==null)throw new Error(n);return s}function J0(e,t){const n=e.MAX_COMBINED_TEXTURE_IMAGE_UNITS-1,s=t+e.TEXTURE0;if(sn){const i=`[gl.TEXTURE0, gl.TEXTURE${n}]`;throw new Error(`textureUnit must be in ${i}.`)}}function gc(e,t=2){return we(e.slice(0,e.length-t))}function yc(e){if(e.length===0)throw Error("Cannot get rows and columns of an empty shape array.");return[e.length>1?e[e.length-2]:1,e[e.length-1]]}function fS(e){let t=[1,1,1];const n=e.length===0||e.length===1&&e[0]===1;return n||(t=[gc(e),...yc(e)]),t}function Qj(e,t=!1){let n=C().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(t&&(n=n*2,e=e.map((i,o)=>o>=e.length-2?Sy(e[o]):e[o]),e.length===1&&(e=[2,e[0]])),e.length!==2){const i=Rr(e);e=i.newShape}let s=we(e);if(e.length<=1&&s<=n)return[1,s];if(e.length===2&&e[0]<=n&&e[1]<=n)return e;if(e.length===3&&e[0]*e[1]<=n&&e[2]<=n)return[e[0]*e[1],e[2]];if(e.length===3&&e[0]<=n&&e[1]*e[2]<=n)return[e[0],e[1]*e[2]];if(e.length===4&&e[0]*e[1]*e[2]<=n&&e[3]<=n)return[e[0]*e[1]*e[2],e[3]];if(e.length===4&&e[0]<=n&&e[1]*e[2]*e[3]<=n)return[e[0],e[1]*e[2]*e[3]];if(t){const i=gc(e);let o=2,a=2;return e.length&&([o,a]=yc(e)),s=i*(o/2)*(a/2),Sd(s).map(c=>c*2)}return Sd(s)}function fm(e){return e%2===0}function gm(e,t){if(e=e.slice(-2),t=t.slice(-2),ot(e,t))return!0;if(!e.length||!t.length)return!0;if(e[0]===0||e[1]===0||t[0]===0||t[1]===0)return!0;if(e.length!==t.length){const n=e.slice(-1)[0],s=t.slice(-1)[0];if(n===s)return!0;if(fm(n)&&fm(s)&&(e[0]===1||t[0]===1))return!0}return e[1]===t[1]&&fm(e[0])&&fm(t[0])}let ym,bm;function eK(e){if(ym==null){const t=Mi(e);ym=t.getParameter(t.MAX_TEXTURE_SIZE)}return ym}function cte(){ym=null}function lte(){bm=null}function tK(e){if(bm==null){const t=Mi(e);bm=t.getParameter(t.MAX_TEXTURE_IMAGE_UNITS)}return Math.min(16,bm)}function nK(e){if(e===0)return 0;let t;const n=Mi(e);return Vs(n,"EXT_disjoint_timer_query_webgl2")&&e===2?t=2:Vs(n,"EXT_disjoint_timer_query")?t=1:t=0,t}function Vs(e,t){const n=e.getExtension(t);return n!=null}function Z0(e){try{const t=Mi(e);if(t!=null)return!0}catch(t){return console.log("Error when getting WebGL context: ",t),!1}return!1}function sK(e){if(e===0)return!1;const t=Mi(e);if(e===1){if(!Vs(t,"OES_texture_float"))return!1}else if(!Vs(t,"EXT_color_buffer_float"))return!1;const n=gS(t);return n}function iK(e){if(e===0)return!1;const t=Mi(e);if(e===1){if(!Vs(t,"OES_texture_float"))return!1;if(!Vs(t,"WEBGL_color_buffer_float"))return!1}else{if(Vs(t,"EXT_color_buffer_float"))return gS(t);const s="EXT_color_buffer_half_float";if(Vs(t,s)){const i=t.getExtension(s);return rK(t,i)}return!1}const n=gS(t);return n}function gS(e){const t=dS(e),n=e.createTexture();e.bindTexture(e.TEXTURE_2D,n);const s=1,i=1;e.texImage2D(e.TEXTURE_2D,0,t.internalFormatFloat,s,i,0,t.textureFormatFloat,t.textureTypeFloat,null);const o=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,o),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,n,0);const a=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(n),e.deleteFramebuffer(o),a}function rK(e,t){const n=dS(e,t),s=e.createTexture();e.bindTexture(e.TEXTURE_2D,s);const i=1,o=1;e.texImage2D(e.TEXTURE_2D,0,n.internalFormatHalfFloat,i,o,0,n.textureFormatFloat,n.textureTypeHalfFloat,null);const a=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,a),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,s,0);const c=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(s),e.deleteFramebuffer(a),c}function oK(e){if(e!==2)return!1;const t=Mi(e),n=t.fenceSync!=null;return n}function hu(e,t){Array.isArray(e)||(e=[e]),e.forEach(n=>{n!=null&&k(n.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the WebGL backend.`)})}const Ve=C();Ve.registerFlag("HAS_WEBGL",()=>Ve.getNumber("WEBGL_VERSION")>0),Ve.registerFlag("WEBGL_VERSION",()=>Z0(2)?2:Z0(1)?1:0),Ve.registerFlag("WEBGL_CHECK_NUMERICAL_PROBLEMS",()=>!1),Ve.registerFlag("WEBGL_BUFFER_SUPPORTED",()=>Ve.get("WEBGL_VERSION")===2),Ve.registerFlag("WEBGL_CPU_FORWARD",()=>!0),Ve.registerFlag("WEBGL_FORCE_F16_TEXTURES",()=>!1),Ve.registerFlag("WEBGL_PACK",()=>Ve.getBool("HAS_WEBGL")),Ve.registerFlag("WEBGL_PACK_NORMALIZATION",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_CLIP",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_DEPTHWISECONV",()=>!1),Ve.registerFlag("WEBGL_PACK_BINARY_OPERATIONS",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_UNARY_OPERATIONS",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_ARRAY_OPERATIONS",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_IMAGE_OPERATIONS",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_PACK_REDUCE",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_LAZILY_UNPACK",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_CONV_IM2COL",()=>Ve.getBool("WEBGL_PACK")),Ve.registerFlag("WEBGL_MAX_TEXTURE_SIZE",()=>eK(Ve.getNumber("WEBGL_VERSION"))),Ve.registerFlag("WEBGL_MAX_TEXTURES_IN_SHADER",()=>tK(Ve.getNumber("WEBGL_VERSION"))),Ve.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION",()=>{const e=Ve.getNumber("WEBGL_VERSION");return e===0?0:nK(e)}),Ve.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE",()=>Ve.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0&&!wT()),Ve.registerFlag("WEBGL_RENDER_FLOAT32_CAPABLE",()=>sK(Ve.getNumber("WEBGL_VERSION"))),Ve.registerFlag("WEBGL_RENDER_FLOAT32_ENABLED",()=>Ve.getBool("WEBGL_FORCE_F16_TEXTURES")?!1:Ve.getBool("WEBGL_RENDER_FLOAT32_CAPABLE")),Ve.registerFlag("WEBGL_DOWNLOAD_FLOAT_ENABLED",()=>iK(Ve.getNumber("WEBGL_VERSION"))),Ve.registerFlag("WEBGL_FENCE_API_ENABLED",()=>oK(Ve.getNumber("WEBGL_VERSION"))),Ve.registerFlag("WEBGL_SIZE_UPLOAD_UNIFORM",()=>{const e=Ve.getBool("WEBGL_RENDER_FLOAT32_ENABLED");return e?4:0}),Ve.registerFlag("WEBGL_DELETE_TEXTURE_THRESHOLD",()=>-1,e=>{if(e<0&&e!==-1)throw new Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be -1 (indicating never delete) or at least 0, but got ${e}.`)});const{simpleAbsImpl:aK,addImpl:cK,ceilImpl:lK,expImpl:hK,expm1Impl:uK,floorImpl:dK,logImpl:pK,maxImpl:mK,multiplyImpl:fK,rsqrtImpl:gK,sliceImpl:yK,subImpl:bK,transposeImpl:Q0,uniqueImpl:wK}=Lq;class LK{constructor(e,t){this.outputShape=[],this.outputShape=e,this.variableNames=t.map((i,o)=>`T${o}`);const n=[];this.variableNames.forEach(i=>{n.push(`float v${i} = get${i}AtOutCoords();`)});const s=this.variableNames.map(i=>`v${i}`).join(" + ");this.userCode=` void main() { ${n.join(` `)} @@ -114,7 +114,7 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi ivec4 round(vec4 value) { return ivec4(floor(value + vec4(0.5))); } - `),{version:e,attribute:t,varyingVs:n,varyingFs:s,texture2D:i,output:o,defineOutput:a,defineSpecialNaN:c,defineSpecialInf:u,defineRound:p}}function jo(e,t,n="index"){const s=Ot(t);return s.map((i,o)=>{const a=`int ${e[o]} = ${n} / ${i}`,c=o===s.length-1?`int ${e[o+1]} = ${n} - ${e[o]} * ${i}`:`index -= ${e[o]} * ${i}`;return`${a}; ${c};`}).join("")}function wm(e){return e.length===1?`${e[0]}`:`vec${e.length}(${e.join(",")})`}function hte(e,t){if(e.length!==t.length)throw new Error(`Vectors to be dotted must be of the same length -got ${e.length} and ${t.length}`);const n=[],s=Math.floor(e.length/4),i=e.length%4;for(let o=0;o`float(${c})`),a=a.map(c=>`float(${c})`)),n.push(`${wm(o)}, ${wm(a)}`)}return n.map((o,a)=>`dot(${o})`).join("+")}function yS(e){const t=Ot(e).map(n=>n.toString());return` + `),{version:e,attribute:t,varyingVs:n,varyingFs:s,texture2D:i,output:o,defineOutput:a,defineSpecialNaN:c,defineSpecialInf:u,defineRound:p}}function Xo(e,t,n="index"){const s=Ot(t);return s.map((i,o)=>{const a=`int ${e[o]} = ${n} / ${i}`,c=o===s.length-1?`int ${e[o+1]} = ${n} - ${e[o]} * ${i}`:`index -= ${e[o]} * ${i}`;return`${a}; ${c};`}).join("")}function wm(e){return e.length===1?`${e[0]}`:`vec${e.length}(${e.join(",")})`}function hte(e,t){if(e.length!==t.length)throw new Error(`Vectors to be dotted must be of the same length -got ${e.length} and ${t.length}`);const n=[],s=Math.floor(e.length/4),i=e.length%4;for(let o=0;o`float(${c})`),a=a.map(c=>`float(${c})`)),n.push(`${wm(o)}, ${wm(a)}`)}return n.map((o,a)=>`dot(${o})`).join("+")}function yS(e){const t=Ot(e).map(n=>n.toString());return` int getFlatIndex(ivec3 coords) { return coords.x * ${t[0]} + coords.y * ${t[1]} + coords.z; } @@ -160,7 +160,7 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi `;const{getBroadcastDims:nC}=iw;function TK(e,t,n,s){const i=[];e.forEach(I=>{const T=we(I.shapeInfo.logicalShape);I.shapeInfo.isUniform?i.push(`uniform float ${I.name}${T>1?`[${T}]`:""};`):(i.push(`uniform sampler2D ${I.name};`),i.push(`uniform int offset${I.name};`))});const o=i.join(` `),a=e.map(I=>AK(I,t,s)).join(` `),c=t.texShape,u=Un(),p=CK(u);let m,y,b=EK(u);t.isPacked?(m=vK(t.logicalShape,c),y=OK(u)):(m=NK(t.logicalShape,c),y=RK(u)),s&&(b+=_K);const w=[b,p,y,o,m,a,n].join(` -`);return w}function gc(e){const t=e.shapeInfo.logicalShape;switch(t.length){case 0:return qK(e);case 1:return KK(e);case 2:return JK(e);case 3:return QK(e);case 4:return t5(e);case 5:return n5(e);case 6:return s5(e);default:throw new Error(`${t.length}-D input sampling is not yet supported`)}}function sC(e){const t=e.shapeInfo.logicalShape;switch(t.length){case 0:return YK(e);case 1:return jK(e);case 2:return XK(e);case 3:return ZK(e);default:return e5(e)}}function AK(e,t,n=!1){let s="";n?s+=sC(e):s+=gc(e);const i=e.shapeInfo.logicalShape,o=t.logicalShape;return i.length<=o.length&&(n?s+=i5(e,t):s+=r5(e,t)),s}function vK(e,t){switch(e.length){case 0:return iC();case 1:return WK(e,t);case 2:return VK(e,t);case 3:return UK(e,t);default:return MK(e,t)}}function NK(e,t){switch(e.length){case 0:return iC();case 1:return $K(e,t);case 2:return HK(e,t);case 3:return BK(e,t);case 4:return PK(e,t);case 5:return zK(e,t);case 6:return GK(e,t);default:throw new Error(`${e.length}-D output sampling is not yet supported`)}}function CK(e){return` +`);return w}function bc(e){const t=e.shapeInfo.logicalShape;switch(t.length){case 0:return qK(e);case 1:return KK(e);case 2:return JK(e);case 3:return QK(e);case 4:return t5(e);case 5:return n5(e);case 6:return s5(e);default:throw new Error(`${t.length}-D input sampling is not yet supported`)}}function sC(e){const t=e.shapeInfo.logicalShape;switch(t.length){case 0:return YK(e);case 1:return jK(e);case 2:return XK(e);case 3:return ZK(e);default:return e5(e)}}function AK(e,t,n=!1){let s="";n?s+=sC(e):s+=bc(e);const i=e.shapeInfo.logicalShape,o=t.logicalShape;return i.length<=o.length&&(n?s+=i5(e,t):s+=r5(e,t)),s}function vK(e,t){switch(e.length){case 0:return iC();case 1:return WK(e,t);case 2:return VK(e,t);case 3:return UK(e,t);default:return MK(e,t)}}function NK(e,t){switch(e.length){case 0:return iC();case 1:return $K(e,t);case 2:return HK(e,t);case 3:return BK(e,t);case 4:return PK(e,t);case 5:return zK(e,t);case 6:return GK(e,t);default:throw new Error(`${e.length}-D output sampling is not yet supported`)}}function CK(e){return` float sampleTexture(sampler2D textureSampler, vec2 uv) { return ${e.texture2D}(textureSampler, uv).r; } @@ -316,7 +316,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, return ivec3(b, r, c); } - `}function BK(e,t){const n=jo(["r","c","d"],e);return` + `}function BK(e,t){const n=Xo(["r","c","d"],e);return` ivec3 getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(${t[0]}, ${t[1]})); @@ -343,7 +343,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, return ivec${e.length}(${c}); } - `}function PK(e,t){const n=jo(["r","c","d","d2"],e);return` + `}function PK(e,t){const n=Xo(["r","c","d","d2"],e);return` ivec4 getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(${t[0]}, ${t[1]})); @@ -351,7 +351,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, ${n} return ivec4(r, c, d, d2); } - `}function zK(e,t){const n=jo(["r","c","d","d2","d3"],e);return` + `}function zK(e,t){const n=Xo(["r","c","d","d2","d3"],e);return` ivec5 getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(${t[0]}, ${t[1]})); @@ -363,7 +363,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, ivec5 outShape = ivec5(r, c, d, d2, d3); return outShape; } - `}function GK(e,t){const n=jo(["r","c","d","d2","d3","d4"],e);return` + `}function GK(e,t){const n=Xo(["r","c","d","d2","d3","d4"],e);return` ivec6 getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(${t[0]}, ${t[1]})); @@ -416,7 +416,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, int c = index - r * ${e[1]}; return ivec2(r, c); } - `}function Ko(e){return`offset${e}`}function YK(e){const t=e.name,n="get"+t.charAt(0).toUpperCase()+t.slice(1),s=Un();return` + `}function Jo(e){return`offset${e}`}function YK(e){const t=e.name,n="get"+t.charAt(0).toUpperCase()+t.slice(1),s=Un();return` vec4 ${n}() { return ${s.texture2D}(${t}, halfCR); } @@ -424,7 +424,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, float ${n}() { return sampleTexture(${t}, halfCR); } - `;const[o,a]=e.shapeInfo.texShape,c=Ko(t);return` + `;const[o,a]=e.shapeInfo.texShape,c=Jo(t);return` float ${n}() { vec2 uv = uvFromFlat(${o}, ${a}, ${c}); return sampleTexture(${t}, uv); @@ -437,13 +437,13 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, } `}function KK(e){const t=e.name,n="get"+t.charAt(0).toUpperCase()+t.slice(1);if(e.shapeInfo.isUniform)return` float ${n}(int index) { - ${yc(e)} + ${wc(e)} } `;const s=e.shapeInfo.texShape,i=s[0],o=s[1];if(o===1&&i===1)return` float ${n}(int index) { return sampleTexture(${t}, halfCR); } - `;const a=Ko(t);return o===1?` + `;const a=Jo(t);return o===1?` float ${n}(int index) { vec2 uv = vec2(0.5, (float(index + ${a}) + 0.5) / ${i}.0); return sampleTexture(${t}, uv); @@ -474,17 +474,17 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, vec2 uv = (vec2(col, row) + halfCR) / vec2(${b}.0, ${y}.0); return sampleTexture(${n}, uv); } - `}const{newShape:o,keptDims:a}=Rr(t),c=o;if(c.lengthe[n]).join(", ")}class o5{constructor(e,t,n,s){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,k(e.length>2,()=>`Packed arg${n.charAt(0).toUpperCase()+n.slice(1)} supports only inputs with rank above 2.`);const i=e[e.length-1],o=Math.ceil(i/t);this.outputShape=e.slice(0,-1),o>1&&this.outputShape.push(o),s||this.variableNames.push("bestIndicesA");const a=this.outputShape,c=a.length,u=Et(c),p=us("coords",c);let m,y;if(o===1){y=c+1;const U=Et(y);m=` + `}function Et(e){if(e<=1)return"int";if(e===2)return"ivec2";if(e===3)return"ivec3";if(e===4)return"ivec4";if(e===5)return"ivec5";if(e===6)return"ivec6";throw Error(`GPU for rank ${e} is not yet supported`)}function Lc(e,t){const n=JSON.parse(JSON.stringify(e));return n.shapeInfo.logicalShape=t,n}function Sc(e,t){return t.map(n=>e[n]).join(", ")}class o5{constructor(e,t,n,s){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,k(e.length>2,()=>`Packed arg${n.charAt(0).toUpperCase()+n.slice(1)} supports only inputs with rank above 2.`);const i=e[e.length-1],o=Math.ceil(i/t);this.outputShape=e.slice(0,-1),o>1&&this.outputShape.push(o),s||this.variableNames.push("bestIndicesA");const a=this.outputShape,c=a.length,u=Et(c),p=us("coords",c);let m,y;if(o===1){y=c+1;const U=Et(y);m=` ${U} sourceLocR = ${U}(${p.join()}, 0); ++${p[c-1]}; ${U} sourceLocG = ${U}(${p.join()}, 0); @@ -915,7 +915,7 @@ return (round(mod(b, 2.0)) != 1) ? `,L5=aC+` return min(a, b); `,S5=`if (b == 0.0) return NAN; - return mod(a, b);`,I5="return (b >= 1.0) ? a : a * (b + 1.0);",lC="return (a < 0.) ? b * a : a;";class hn{constructor(e,t,n){this.variableNames=["A","B"],this.outputShape=nt(t,n),this.userCode=` + return mod(a, b);`,I5="return (b >= 1.0) ? a : a * (b + 1.0);",lC="return (a < 0.) ? b * a : a;";class un{constructor(e,t,n){this.variableNames=["A","B"],this.outputShape=nt(t,n),this.userCode=` float binaryOperation(float a, float b) { ${e} } @@ -1900,7 +1900,7 @@ return (round(mod(b, 2.0)) != 1) ? } `}getCustomSetupFunc(e){return(t,n)=>{this.index==null&&(this.index=t.getUniformLocation(n,"index")),t.gl.uniform1f(this.index,e)}}}function fC(e,t){if(e===1)return`${t}`;if(e===2)return`${t}.x, ${t}.y`;if(e===3)return`${t}.x, ${t}.y, ${t}.z`;if(e===4)return`${t}.x, ${t}.y, ${t}.z, ${t}.w`;throw Error(`Cumulative sum for rank ${e} is not yet supported`)}function gC(e,t){if(e===1)return`${t}`;if(e===2)return`${t}.y`;if(e===3)return`${t}.z`;if(e===4)return`${t}.w`;throw Error(`Cumulative sum for rank ${e} is not yet supported`)}class X5{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outPackingScheme=au.DENSE;const t=lu(e),n=Un();this.outputShape=e,this.userCode=` ivec3 outCoordsFromFlatIndex(int index) { - ${jo(["r","c","d"],e)} + ${Xo(["r","c","d"],e)} return ivec3(r, c, d); } @@ -1921,7 +1921,7 @@ return (round(mod(b, 2.0)) != 1) ? } `}}class J5{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outPackingScheme=au.DENSE;const t=lu(e),n=Un();this.outputShape=e,this.userCode=` ivec3 outCoordsFromFlatIndex(int index) { - ${jo(["r","c","d"],e)} + ${Xo(["r","c","d"],e)} return ivec3(r, c, d); } @@ -2118,7 +2118,7 @@ return (round(mod(b, 2.0)) != 1) ? void main() { gl_Position = vec4(clipSpacePos, 1); resultUV = uv; - }`;return $j(e,n)}function l8(e){const t=new Float32Array([-1,1,0,0,1,-1,-1,0,0,0,1,1,0,1,1,1,-1,0,1,0]);return Gj(e,t)}function h8(e){const t=new Uint16Array([0,1,2,2,1,3]);return Vj(e,t)}function uu(e,t,n,s,i,o){Yj(t,n);const a=Hj(e),c=e.TEXTURE_2D;return Re(e,()=>e.bindTexture(c,a)),Re(e,()=>e.texParameteri(c,e.TEXTURE_WRAP_S,e.CLAMP_TO_EDGE)),Re(e,()=>e.texParameteri(c,e.TEXTURE_WRAP_T,e.CLAMP_TO_EDGE)),Re(e,()=>e.texParameteri(c,e.TEXTURE_MIN_FILTER,e.NEAREST)),Re(e,()=>e.texParameteri(c,e.TEXTURE_MAG_FILTER,e.NEAREST)),Re(e,()=>e.texImage2D(c,0,s,t,n,0,i,o,null)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null)),a}function wC(e){return e.internalFormatFloat}function u8(e,t,n,s){const[i,o]=cu(t,n);return uu(e,i,o,wC(s),s.textureFormatFloat,e.FLOAT)}function LC(e){return e.internalFormatHalfFloat}function d8(e,t,n,s){const[i,o]=cu(t,n);return uu(e,i,o,LC(s),s.textureFormatFloat,s.textureTypeHalfFloat)}function SC(e){return e.downloadTextureFormat}function p8(e,t,n,s){const[i,o]=cu(t,n);return uu(e,i,o,SC(s),e.RGBA,e.UNSIGNED_BYTE)}function IC(e){return e.internalFormatPackedFloat}function m8(e,t,n,s){const[i,o]=pc(t,n);return uu(e,i,o,IC(s),e.RGBA,e.FLOAT)}function xC(e){return e.internalFormatPackedHalfFloat}function f8(e,t,n,s){const[i,o]=pc(t,n);return uu(e,i,o,xC(s),e.RGBA,s.textureTypeHalfFloat)}function g8(e,t,n){const s=0,i=3*4,o=3*4+2*4;Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,n));const a=K0(e,t,"clipSpacePos",n,3,o,s);return a&&K0(e,t,"uv",n,2,o,i)}function y8(e,t,n,s,i,o){Re(e,()=>e.bindTexture(e.TEXTURE_2D,t));let a,c,u;i instanceof Uint8Array?(a=new Uint8Array(n*s*4),c=e.UNSIGNED_BYTE,u=e.RGBA):(a=new Float32Array(n*s*4),c=e.FLOAT,u=o.internalFormatPackedFloat),a.set(i),Re(e,()=>e.texImage2D(e.TEXTURE_2D,0,u,n,s,0,e.RGBA,c,a)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function b8(e,t,n){Re(e,()=>e.bindTexture(e.TEXTURE_2D,t)),n.data instanceof Uint8Array?Re(e,()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,n.width,n.height,0,e.RGBA,e.UNSIGNED_BYTE,n.data)):Re(e,()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,e.RGBA,e.UNSIGNED_BYTE,n)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function w8(e,t,n,s){const i=e.createBuffer();Re(e,()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,i));const o=4,a=4,c=o*a*t*n;return Re(e,()=>e.bufferData(e.PIXEL_PACK_BUFFER,c,e.STREAM_READ)),Re(e,()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,0)),Re(e,()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,null)),i}function L8(e,t,n){const s=e,i=new Float32Array(n);return s.bindBuffer(s.PIXEL_PACK_BUFFER,t),s.getBufferSubData(s.PIXEL_PACK_BUFFER,0,i),s.bindBuffer(s.PIXEL_PACK_BUFFER,null),i}function S8(e,t,n,s){const[i,o]=cu(t,n),a=4,c=new Uint8Array(Oj(t*n,a));return Re(e,()=>e.readPixels(0,0,i,o,s.downloadTextureFormat,e.UNSIGNED_BYTE,c)),new Float32Array(c.buffer)}function I8(e,t,n,s,i,o,a,c){const u=e,p=new Float32Array(Ej(o,a));return u.bindBuffer(u.PIXEL_PACK_BUFFER,t),u.getBufferSubData(u.PIXEL_PACK_BUFFER,0,p),u.bindBuffer(u.PIXEL_PACK_BUFFER,null),p}function x8(e,t,n){const s=new Float32Array(t*n*4);return Re(e,()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,s)),s}class T8{constructor(e){this.outputTexture=null,this.program=null,this.disposed=!1,this.vertexAttrsAreBound=!1,this.itemsToPoll=[];const t=C().getNumber("WEBGL_VERSION");e!=null?(this.gl=e,Nj(t,e)):this.gl=Mi(t);let n="WEBGL_color_buffer_float";const s="EXT_color_buffer_half_float";if(C().getNumber("WEBGL_VERSION")===1){const i="OES_texture_float",o="OES_texture_half_float";if(this.textureFloatExtension=pm(this.gl,i),Vs(this.gl,o))this.textureHalfFloatExtension=pm(this.gl,o);else if(C().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support half float textures, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.");if(this.colorBufferFloatExtension=this.gl.getExtension(n),Vs(this.gl,s))this.colorBufferHalfFloatExtension=pm(this.gl,s);else if(C().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support color renderable half floats, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.")}else if(n="EXT_color_buffer_float",Vs(this.gl,n))this.colorBufferFloatExtension=this.gl.getExtension(n);else if(Vs(this.gl,s))this.colorBufferHalfFloatExtension=this.gl.getExtension(s);else throw new Error("GL context does not support color renderable floats");this.vertexBuffer=l8(this.gl),this.indexBuffer=h8(this.gl),this.framebuffer=qj(this.gl),this.textureConfig=dS(this.gl,this.textureHalfFloatExtension)}get debug(){return C().getBool("DEBUG")}dispose(){if(this.disposed)return;this.program!=null&&console.warn("Disposing a GPGPUContext that still has a bound WebGLProgram. This is probably a resource leak, delete the program with GPGPUContext.deleteProgram before disposing."),this.outputTexture!=null&&console.warn("Disposing a GPGPUContext that still has a bound output matrix texture. This is probably a resource leak, delete the output matrix texture with GPGPUContext.deleteMatrixTexture before disposing.");const e=this.gl;Re(e,()=>e.finish()),Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,null)),Re(e,()=>e.deleteFramebuffer(this.framebuffer)),Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,null)),Re(e,()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,null)),Re(e,()=>e.deleteBuffer(this.indexBuffer)),this.disposed=!0}createFloat32MatrixTexture(e,t){return this.throwIfDisposed(),u8(this.gl,e,t,this.textureConfig)}createFloat16MatrixTexture(e,t){return this.throwIfDisposed(),d8(this.gl,e,t,this.textureConfig)}createUnsignedBytesMatrixTexture(e,t){return this.throwIfDisposed(),p8(this.gl,e,t,this.textureConfig)}uploadPixelDataToTexture(e,t){this.throwIfDisposed(),b8(this.gl,e,t)}uploadDenseMatrixToTexture(e,t,n,s){this.throwIfDisposed(),y8(this.gl,e,t,n,s,this.textureConfig)}createFloat16PackedMatrixTexture(e,t){return this.throwIfDisposed(),f8(this.gl,e,t,this.textureConfig)}createPackedMatrixTexture(e,t){return this.throwIfDisposed(),m8(this.gl,e,t,this.textureConfig)}deleteMatrixTexture(e){this.throwIfDisposed(),this.outputTexture===e&&(X0(this.gl,this.framebuffer),this.outputTexture=null),Re(this.gl,()=>this.gl.deleteTexture(e))}downloadByteEncodedFloatMatrixFromOutputTexture(e,t,n){return this.downloadMatrixDriver(e,()=>S8(this.gl,t,n,this.textureConfig))}downloadPackedMatrixFromBuffer(e,t,n,s,i,o){return I8(this.gl,e,t,n,s,i,o,this.textureConfig)}downloadFloat32MatrixFromBuffer(e,t){return L8(this.gl,e,t)}createBufferFromTexture(e,t,n){this.bindTextureToFrameBuffer(e);const s=w8(this.gl,t,n,this.textureConfig);return this.unbindTextureToFrameBuffer(),s}createAndWaitForFence(){const e=this.createFence(this.gl);return this.pollFence(e)}createFence(e){let t,n;if(C().getBool("WEBGL_FENCE_API_ENABLED")){const s=e,i=s.fenceSync(s.SYNC_GPU_COMMANDS_COMPLETE,0);e.flush(),n=()=>{const o=s.clientWaitSync(i,0,0);return o===s.ALREADY_SIGNALED||o===s.CONDITION_SATISFIED},t=i}else C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0?(t=this.beginQuery(),this.endQuery(),n=()=>this.isQueryAvailable(t,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))):n=()=>!0;return{query:t,isFencePassed:n}}downloadMatrixFromPackedTexture(e,t,n){return this.downloadMatrixDriver(e,()=>x8(this.gl,t,n))}createProgram(e){this.throwIfDisposed();const t=this.gl,n=Uj(t,e),s=c8(t),i=Pj(t);return Re(t,()=>t.attachShader(i,s)),Re(t,()=>t.attachShader(i,n)),zj(t,i),this.debug&&pS(t,i),this.vertexAttrsAreBound||(this.setProgram(i),this.vertexAttrsAreBound=g8(t,this.program,this.vertexBuffer)),i}deleteProgram(e){this.throwIfDisposed(),e===this.program&&(this.program=null),e!=null&&Re(this.gl,()=>this.gl.deleteProgram(e))}setProgram(e){this.throwIfDisposed(),this.program=e,this.program!=null&&this.debug&&pS(this.gl,this.program),Re(this.gl,()=>this.gl.useProgram(e))}getUniformLocation(e,t,n=!0){return this.throwIfDisposed(),n?Kj(this.gl,e,t):Xj(this.gl,e,t)}getAttributeLocation(e,t){return this.throwIfDisposed(),Re(this.gl,()=>this.gl.getAttribLocation(e,t))}getUniformLocationNoThrow(e,t){return this.throwIfDisposed(),this.gl.getUniformLocation(e,t)}setInputMatrixTexture(e,t,n){this.throwIfDisposed(),this.throwIfNoProgram(),Jj(this.gl,e,t,n)}setOutputMatrixTexture(e,t,n){this.setOutputMatrixTextureDriver(e,n,t)}setOutputPackedMatrixTexture(e,t,n){this.throwIfDisposed();const[s,i]=pc(t,n);this.setOutputMatrixTextureDriver(e,s,i)}setOutputMatrixWriteRegion(e,t,n,s){this.setOutputMatrixWriteRegionDriver(n,e,s,t)}setOutputPackedMatrixWriteRegion(e,t,n,s){throw new Error("setOutputPackedMatrixWriteRegion not implemented.")}debugValidate(){this.program!=null&&pS(this.gl,this.program),mm(this.gl)}executeProgram(){this.throwIfDisposed(),this.throwIfNoProgram();const e=this.gl;this.debug&&this.debugValidate(),Re(e,()=>e.drawElements(e.TRIANGLES,6,e.UNSIGNED_SHORT,0))}blockUntilAllProgramsCompleted(){this.throwIfDisposed(),Re(this.gl,()=>this.gl.finish())}getQueryTimerExtension(){return this.disjointQueryTimerExtension==null&&(this.disjointQueryTimerExtension=pm(this.gl,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2?"EXT_disjoint_timer_query_webgl2":"EXT_disjoint_timer_query")),this.disjointQueryTimerExtension}getQueryTimerExtensionWebGL2(){return this.getQueryTimerExtension()}getQueryTimerExtensionWebGL1(){return this.getQueryTimerExtension()}beginQuery(){if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){const n=this.gl,s=this.getQueryTimerExtensionWebGL2(),i=n.createQuery();return n.beginQuery(s.TIME_ELAPSED_EXT,i),i}const e=this.getQueryTimerExtensionWebGL1(),t=e.createQueryEXT();return e.beginQueryEXT(e.TIME_ELAPSED_EXT,t),t}endQuery(){if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){const t=this.gl,n=this.getQueryTimerExtensionWebGL2();t.endQuery(n.TIME_ELAPSED_EXT);return}const e=this.getQueryTimerExtensionWebGL1();e.endQueryEXT(e.TIME_ELAPSED_EXT)}async waitForQueryAndGetTime(e){return await Iy(()=>this.disposed||this.isQueryAvailable(e,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))),this.getQueryTime(e,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))}getQueryTime(e,t){if(t===0)return null;if(t===2){const n=this.gl,s=n.getQueryParameter(e,n.QUERY_RESULT);return s/1e6}else{const n=this.getQueryTimerExtensionWebGL1(),s=n.getQueryObjectEXT(e,n.QUERY_RESULT_EXT);return s/1e6}}isQueryAvailable(e,t){if(t===0)return!0;if(t===2){const n=this.gl,s=this.getQueryTimerExtensionWebGL2(),i=n.getQueryParameter(e,n.QUERY_RESULT_AVAILABLE);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(s.GPU_DISJOINT_EXT)),i&&!this.disjoint}else{const n=this.getQueryTimerExtensionWebGL1(),s=n.getQueryObjectEXT(e,n.QUERY_RESULT_AVAILABLE_EXT);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(n.GPU_DISJOINT_EXT)),s&&!this.disjoint}}pollFence(e){return new Promise(t=>{this.addItemToPoll(()=>e.isFencePassed(),()=>t())})}pollItems(){const e=A8(this.itemsToPoll.map(t=>t.isDoneFn));for(let t=0;t<=e;++t){const{resolveFn:n}=this.itemsToPoll[t];n()}this.itemsToPoll=this.itemsToPoll.slice(e+1)}addItemToPoll(e,t){if(this.itemsToPoll.push({isDoneFn:e,resolveFn:t}),this.itemsToPoll.length>1)return;Iy(()=>(this.pollItems(),this.itemsToPoll.length===0))}bindTextureToFrameBuffer(e){this.throwIfDisposed(),mS(this.gl,e,this.framebuffer),this.debug&&mm(this.gl)}unbindTextureToFrameBuffer(){this.outputTexture!=null?(mS(this.gl,this.outputTexture,this.framebuffer),this.debug&&mm(this.gl)):X0(this.gl,this.framebuffer)}downloadMatrixDriver(e,t){this.bindTextureToFrameBuffer(e);const n=t();return this.unbindTextureToFrameBuffer(),n}setOutputMatrixTextureDriver(e,t,n){this.throwIfDisposed();const s=this.gl;mS(s,e,this.framebuffer),this.debug&&mm(s),this.outputTexture=e,Re(s,()=>s.viewport(0,0,t,n)),Re(s,()=>s.scissor(0,0,t,n))}setOutputMatrixWriteRegionDriver(e,t,n,s){this.throwIfDisposed(),Re(this.gl,()=>this.gl.scissor(e,t,n,s))}throwIfDisposed(){if(this.disposed)throw new Error("Attempted to use disposed GPGPUContext.")}throwIfNoProgram(){if(this.program==null)throw new Error("No GPU program is currently set.")}}function A8(e){let t=0;for(;t{const T={logicalShape:w.shape,texShape:w.isUniform?null:w.texData.texShape,isUniform:w.isUniform,isPacked:w.isUniform?!1:w.texData.isPacked,flatOffset:null};return w.texData!=null&&w.texData.slice!=null&&w.texData.slice.flatOffset>0&&(T.flatOffset=w.texData.slice.flatOffset),{name:t.variableNames[I],shapeInfo:T}}),a=o.map(w=>w.shapeInfo),c={logicalShape:s.shape,texShape:s.texData.texShape,isUniform:!1,isPacked:s.texData.isPacked,flatOffset:null},u=TK(o,c,i,t.packedInputs),p=e.createProgram(u);let m=null;const y=e.getUniformLocation(p,"NAN",!1);C().getNumber("WEBGL_VERSION")===1&&(m=e.getUniformLocation(p,"INFINITY",!1));const b={};for(let w=0;w{const i=n.logicalShape,o=t[s],a=o.shape;if(!ot(i,a))throw Error(`Binary was compiled with different shapes than the current args. Shapes ${i} and ${a} must match`);if(n.isUniform&&o.isUniform)return;const c=n.texShape,u=o.isUniform?null:o.texData.texShape;if(!ot(c,u))throw Error(`Binary was compiled with different texture shapes than the current args. Shape ${c} and ${u} must match`)})}function N8(e,t,n,s,i){TC(t.inShapeInfos,n),TC([t.outShapeInfo],[s]);const o=s.texData.texture,a=s.texData.texShape;s.texData.isPacked?e.setOutputPackedMatrixTexture(o,a[0],a[1]):e.setOutputMatrixTexture(o,a[0],a[1]),e.setProgram(t.webGLProgram),C().getNumber("WEBGL_VERSION")===1&&(t.infLoc!==null&&e.gl.uniform1f(t.infLoc,Infinity)),t.nanLoc!==null&&e.gl.uniform1f(t.nanLoc,NaN),n.forEach((c,u)=>{const p=t.program.variableNames[u],m=t.uniformLocations[p],y=t.uniformLocations[`offset${p}`];if(m==null)return;if(c.isUniform){if(we(c.shape)<2)e.gl.uniform1f(m,c.uniformValues[0]);else{let b=c.uniformValues;b instanceof Float32Array||(b=new Float32Array(b)),e.gl.uniform1fv(m,b)}return}c.texData.slice!=null&&y!=null&&e.gl.uniform1i(y,c.texData.slice.flatOffset),e.setInputMatrixTexture(c.texData.texture,m,u)}),i!=null&&i(e,t.webGLProgram),e.executeProgram()}function C8(e,t,n){let s="";t.concat(n).forEach(a=>{const c=a.texData!=null&&a.texData.slice!=null&&a.texData.slice.flatOffset>0,u=a.isUniform?"uniform":a.texData.texShape;s+=`${a.shape}_${u}_${c}`});const i=e.userCode;let o=e.constructor.name;return o+="_"+s+"_"+i,o}class R8{constructor(e,t,n){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e;const{filterWidth:s,inChannels:i,strideWidth:o,strideHeight:a,padInfo:c,outWidth:u,dilationWidth:p,dilationHeight:m,dataFormat:y}=n,{left:b,top:w}=c,I=i*s,T=Un(),v=y==="channelsLast",N=v?0:1,E=v?1:2;let D="";for(let F=0;F<=1;F++)for(let _=0;_<=1;_++)D+=` + }`;return $j(e,n)}function l8(e){const t=new Float32Array([-1,1,0,0,1,-1,-1,0,0,0,1,1,0,1,1,1,-1,0,1,0]);return Gj(e,t)}function h8(e){const t=new Uint16Array([0,1,2,2,1,3]);return Vj(e,t)}function uu(e,t,n,s,i,o){Yj(t,n);const a=Hj(e),c=e.TEXTURE_2D;return Re(e,()=>e.bindTexture(c,a)),Re(e,()=>e.texParameteri(c,e.TEXTURE_WRAP_S,e.CLAMP_TO_EDGE)),Re(e,()=>e.texParameteri(c,e.TEXTURE_WRAP_T,e.CLAMP_TO_EDGE)),Re(e,()=>e.texParameteri(c,e.TEXTURE_MIN_FILTER,e.NEAREST)),Re(e,()=>e.texParameteri(c,e.TEXTURE_MAG_FILTER,e.NEAREST)),Re(e,()=>e.texImage2D(c,0,s,t,n,0,i,o,null)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null)),a}function wC(e){return e.internalFormatFloat}function u8(e,t,n,s){const[i,o]=cu(t,n);return uu(e,i,o,wC(s),s.textureFormatFloat,e.FLOAT)}function LC(e){return e.internalFormatHalfFloat}function d8(e,t,n,s){const[i,o]=cu(t,n);return uu(e,i,o,LC(s),s.textureFormatFloat,s.textureTypeHalfFloat)}function SC(e){return e.downloadTextureFormat}function p8(e,t,n,s){const[i,o]=cu(t,n);return uu(e,i,o,SC(s),e.RGBA,e.UNSIGNED_BYTE)}function IC(e){return e.internalFormatPackedFloat}function m8(e,t,n,s){const[i,o]=fc(t,n);return uu(e,i,o,IC(s),e.RGBA,e.FLOAT)}function xC(e){return e.internalFormatPackedHalfFloat}function f8(e,t,n,s){const[i,o]=fc(t,n);return uu(e,i,o,xC(s),e.RGBA,s.textureTypeHalfFloat)}function g8(e,t,n){const s=0,i=3*4,o=3*4+2*4;Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,n));const a=K0(e,t,"clipSpacePos",n,3,o,s);return a&&K0(e,t,"uv",n,2,o,i)}function y8(e,t,n,s,i,o){Re(e,()=>e.bindTexture(e.TEXTURE_2D,t));let a,c,u;i instanceof Uint8Array?(a=new Uint8Array(n*s*4),c=e.UNSIGNED_BYTE,u=e.RGBA):(a=new Float32Array(n*s*4),c=e.FLOAT,u=o.internalFormatPackedFloat),a.set(i),Re(e,()=>e.texImage2D(e.TEXTURE_2D,0,u,n,s,0,e.RGBA,c,a)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function b8(e,t,n){Re(e,()=>e.bindTexture(e.TEXTURE_2D,t)),n.data instanceof Uint8Array?Re(e,()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,n.width,n.height,0,e.RGBA,e.UNSIGNED_BYTE,n.data)):Re(e,()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,e.RGBA,e.UNSIGNED_BYTE,n)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function w8(e,t,n,s){const i=e.createBuffer();Re(e,()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,i));const o=4,a=4,c=o*a*t*n;return Re(e,()=>e.bufferData(e.PIXEL_PACK_BUFFER,c,e.STREAM_READ)),Re(e,()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,0)),Re(e,()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,null)),i}function L8(e,t,n){const s=e,i=new Float32Array(n);return s.bindBuffer(s.PIXEL_PACK_BUFFER,t),s.getBufferSubData(s.PIXEL_PACK_BUFFER,0,i),s.bindBuffer(s.PIXEL_PACK_BUFFER,null),i}function S8(e,t,n,s){const[i,o]=cu(t,n),a=4,c=new Uint8Array(Oj(t*n,a));return Re(e,()=>e.readPixels(0,0,i,o,s.downloadTextureFormat,e.UNSIGNED_BYTE,c)),new Float32Array(c.buffer)}function I8(e,t,n,s,i,o,a,c){const u=e,p=new Float32Array(Ej(o,a));return u.bindBuffer(u.PIXEL_PACK_BUFFER,t),u.getBufferSubData(u.PIXEL_PACK_BUFFER,0,p),u.bindBuffer(u.PIXEL_PACK_BUFFER,null),p}function x8(e,t,n){const s=new Float32Array(t*n*4);return Re(e,()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,s)),s}class T8{constructor(e){this.outputTexture=null,this.program=null,this.disposed=!1,this.vertexAttrsAreBound=!1,this.itemsToPoll=[];const t=C().getNumber("WEBGL_VERSION");e!=null?(this.gl=e,Nj(t,e)):this.gl=Mi(t);let n="WEBGL_color_buffer_float";const s="EXT_color_buffer_half_float";if(C().getNumber("WEBGL_VERSION")===1){const i="OES_texture_float",o="OES_texture_half_float";if(this.textureFloatExtension=pm(this.gl,i),Vs(this.gl,o))this.textureHalfFloatExtension=pm(this.gl,o);else if(C().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support half float textures, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.");if(this.colorBufferFloatExtension=this.gl.getExtension(n),Vs(this.gl,s))this.colorBufferHalfFloatExtension=pm(this.gl,s);else if(C().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support color renderable half floats, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.")}else if(n="EXT_color_buffer_float",Vs(this.gl,n))this.colorBufferFloatExtension=this.gl.getExtension(n);else if(Vs(this.gl,s))this.colorBufferHalfFloatExtension=this.gl.getExtension(s);else throw new Error("GL context does not support color renderable floats");this.vertexBuffer=l8(this.gl),this.indexBuffer=h8(this.gl),this.framebuffer=qj(this.gl),this.textureConfig=dS(this.gl,this.textureHalfFloatExtension)}get debug(){return C().getBool("DEBUG")}dispose(){if(this.disposed)return;this.program!=null&&console.warn("Disposing a GPGPUContext that still has a bound WebGLProgram. This is probably a resource leak, delete the program with GPGPUContext.deleteProgram before disposing."),this.outputTexture!=null&&console.warn("Disposing a GPGPUContext that still has a bound output matrix texture. This is probably a resource leak, delete the output matrix texture with GPGPUContext.deleteMatrixTexture before disposing.");const e=this.gl;Re(e,()=>e.finish()),Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,null)),Re(e,()=>e.deleteFramebuffer(this.framebuffer)),Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,null)),Re(e,()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,null)),Re(e,()=>e.deleteBuffer(this.indexBuffer)),this.disposed=!0}createFloat32MatrixTexture(e,t){return this.throwIfDisposed(),u8(this.gl,e,t,this.textureConfig)}createFloat16MatrixTexture(e,t){return this.throwIfDisposed(),d8(this.gl,e,t,this.textureConfig)}createUnsignedBytesMatrixTexture(e,t){return this.throwIfDisposed(),p8(this.gl,e,t,this.textureConfig)}uploadPixelDataToTexture(e,t){this.throwIfDisposed(),b8(this.gl,e,t)}uploadDenseMatrixToTexture(e,t,n,s){this.throwIfDisposed(),y8(this.gl,e,t,n,s,this.textureConfig)}createFloat16PackedMatrixTexture(e,t){return this.throwIfDisposed(),f8(this.gl,e,t,this.textureConfig)}createPackedMatrixTexture(e,t){return this.throwIfDisposed(),m8(this.gl,e,t,this.textureConfig)}deleteMatrixTexture(e){this.throwIfDisposed(),this.outputTexture===e&&(X0(this.gl,this.framebuffer),this.outputTexture=null),Re(this.gl,()=>this.gl.deleteTexture(e))}downloadByteEncodedFloatMatrixFromOutputTexture(e,t,n){return this.downloadMatrixDriver(e,()=>S8(this.gl,t,n,this.textureConfig))}downloadPackedMatrixFromBuffer(e,t,n,s,i,o){return I8(this.gl,e,t,n,s,i,o,this.textureConfig)}downloadFloat32MatrixFromBuffer(e,t){return L8(this.gl,e,t)}createBufferFromTexture(e,t,n){this.bindTextureToFrameBuffer(e);const s=w8(this.gl,t,n,this.textureConfig);return this.unbindTextureToFrameBuffer(),s}createAndWaitForFence(){const e=this.createFence(this.gl);return this.pollFence(e)}createFence(e){let t,n;if(C().getBool("WEBGL_FENCE_API_ENABLED")){const s=e,i=s.fenceSync(s.SYNC_GPU_COMMANDS_COMPLETE,0);e.flush(),n=()=>{const o=s.clientWaitSync(i,0,0);return o===s.ALREADY_SIGNALED||o===s.CONDITION_SATISFIED},t=i}else C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0?(t=this.beginQuery(),this.endQuery(),n=()=>this.isQueryAvailable(t,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))):n=()=>!0;return{query:t,isFencePassed:n}}downloadMatrixFromPackedTexture(e,t,n){return this.downloadMatrixDriver(e,()=>x8(this.gl,t,n))}createProgram(e){this.throwIfDisposed();const t=this.gl,n=Uj(t,e),s=c8(t),i=Pj(t);return Re(t,()=>t.attachShader(i,s)),Re(t,()=>t.attachShader(i,n)),zj(t,i),this.debug&&pS(t,i),this.vertexAttrsAreBound||(this.setProgram(i),this.vertexAttrsAreBound=g8(t,this.program,this.vertexBuffer)),i}deleteProgram(e){this.throwIfDisposed(),e===this.program&&(this.program=null),e!=null&&Re(this.gl,()=>this.gl.deleteProgram(e))}setProgram(e){this.throwIfDisposed(),this.program=e,this.program!=null&&this.debug&&pS(this.gl,this.program),Re(this.gl,()=>this.gl.useProgram(e))}getUniformLocation(e,t,n=!0){return this.throwIfDisposed(),n?Kj(this.gl,e,t):Xj(this.gl,e,t)}getAttributeLocation(e,t){return this.throwIfDisposed(),Re(this.gl,()=>this.gl.getAttribLocation(e,t))}getUniformLocationNoThrow(e,t){return this.throwIfDisposed(),this.gl.getUniformLocation(e,t)}setInputMatrixTexture(e,t,n){this.throwIfDisposed(),this.throwIfNoProgram(),Jj(this.gl,e,t,n)}setOutputMatrixTexture(e,t,n){this.setOutputMatrixTextureDriver(e,n,t)}setOutputPackedMatrixTexture(e,t,n){this.throwIfDisposed();const[s,i]=fc(t,n);this.setOutputMatrixTextureDriver(e,s,i)}setOutputMatrixWriteRegion(e,t,n,s){this.setOutputMatrixWriteRegionDriver(n,e,s,t)}setOutputPackedMatrixWriteRegion(e,t,n,s){throw new Error("setOutputPackedMatrixWriteRegion not implemented.")}debugValidate(){this.program!=null&&pS(this.gl,this.program),mm(this.gl)}executeProgram(){this.throwIfDisposed(),this.throwIfNoProgram();const e=this.gl;this.debug&&this.debugValidate(),Re(e,()=>e.drawElements(e.TRIANGLES,6,e.UNSIGNED_SHORT,0))}blockUntilAllProgramsCompleted(){this.throwIfDisposed(),Re(this.gl,()=>this.gl.finish())}getQueryTimerExtension(){return this.disjointQueryTimerExtension==null&&(this.disjointQueryTimerExtension=pm(this.gl,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2?"EXT_disjoint_timer_query_webgl2":"EXT_disjoint_timer_query")),this.disjointQueryTimerExtension}getQueryTimerExtensionWebGL2(){return this.getQueryTimerExtension()}getQueryTimerExtensionWebGL1(){return this.getQueryTimerExtension()}beginQuery(){if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){const n=this.gl,s=this.getQueryTimerExtensionWebGL2(),i=n.createQuery();return n.beginQuery(s.TIME_ELAPSED_EXT,i),i}const e=this.getQueryTimerExtensionWebGL1(),t=e.createQueryEXT();return e.beginQueryEXT(e.TIME_ELAPSED_EXT,t),t}endQuery(){if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){const t=this.gl,n=this.getQueryTimerExtensionWebGL2();t.endQuery(n.TIME_ELAPSED_EXT);return}const e=this.getQueryTimerExtensionWebGL1();e.endQueryEXT(e.TIME_ELAPSED_EXT)}async waitForQueryAndGetTime(e){return await Iy(()=>this.disposed||this.isQueryAvailable(e,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))),this.getQueryTime(e,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))}getQueryTime(e,t){if(t===0)return null;if(t===2){const n=this.gl,s=n.getQueryParameter(e,n.QUERY_RESULT);return s/1e6}else{const n=this.getQueryTimerExtensionWebGL1(),s=n.getQueryObjectEXT(e,n.QUERY_RESULT_EXT);return s/1e6}}isQueryAvailable(e,t){if(t===0)return!0;if(t===2){const n=this.gl,s=this.getQueryTimerExtensionWebGL2(),i=n.getQueryParameter(e,n.QUERY_RESULT_AVAILABLE);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(s.GPU_DISJOINT_EXT)),i&&!this.disjoint}else{const n=this.getQueryTimerExtensionWebGL1(),s=n.getQueryObjectEXT(e,n.QUERY_RESULT_AVAILABLE_EXT);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(n.GPU_DISJOINT_EXT)),s&&!this.disjoint}}pollFence(e){return new Promise(t=>{this.addItemToPoll(()=>e.isFencePassed(),()=>t())})}pollItems(){const e=A8(this.itemsToPoll.map(t=>t.isDoneFn));for(let t=0;t<=e;++t){const{resolveFn:n}=this.itemsToPoll[t];n()}this.itemsToPoll=this.itemsToPoll.slice(e+1)}addItemToPoll(e,t){if(this.itemsToPoll.push({isDoneFn:e,resolveFn:t}),this.itemsToPoll.length>1)return;Iy(()=>(this.pollItems(),this.itemsToPoll.length===0))}bindTextureToFrameBuffer(e){this.throwIfDisposed(),mS(this.gl,e,this.framebuffer),this.debug&&mm(this.gl)}unbindTextureToFrameBuffer(){this.outputTexture!=null?(mS(this.gl,this.outputTexture,this.framebuffer),this.debug&&mm(this.gl)):X0(this.gl,this.framebuffer)}downloadMatrixDriver(e,t){this.bindTextureToFrameBuffer(e);const n=t();return this.unbindTextureToFrameBuffer(),n}setOutputMatrixTextureDriver(e,t,n){this.throwIfDisposed();const s=this.gl;mS(s,e,this.framebuffer),this.debug&&mm(s),this.outputTexture=e,Re(s,()=>s.viewport(0,0,t,n)),Re(s,()=>s.scissor(0,0,t,n))}setOutputMatrixWriteRegionDriver(e,t,n,s){this.throwIfDisposed(),Re(this.gl,()=>this.gl.scissor(e,t,n,s))}throwIfDisposed(){if(this.disposed)throw new Error("Attempted to use disposed GPGPUContext.")}throwIfNoProgram(){if(this.program==null)throw new Error("No GPU program is currently set.")}}function A8(e){let t=0;for(;t{const T={logicalShape:w.shape,texShape:w.isUniform?null:w.texData.texShape,isUniform:w.isUniform,isPacked:w.isUniform?!1:w.texData.isPacked,flatOffset:null};return w.texData!=null&&w.texData.slice!=null&&w.texData.slice.flatOffset>0&&(T.flatOffset=w.texData.slice.flatOffset),{name:t.variableNames[I],shapeInfo:T}}),a=o.map(w=>w.shapeInfo),c={logicalShape:s.shape,texShape:s.texData.texShape,isUniform:!1,isPacked:s.texData.isPacked,flatOffset:null},u=TK(o,c,i,t.packedInputs),p=e.createProgram(u);let m=null;const y=e.getUniformLocation(p,"NAN",!1);C().getNumber("WEBGL_VERSION")===1&&(m=e.getUniformLocation(p,"INFINITY",!1));const b={};for(let w=0;w{const i=n.logicalShape,o=t[s],a=o.shape;if(!ot(i,a))throw Error(`Binary was compiled with different shapes than the current args. Shapes ${i} and ${a} must match`);if(n.isUniform&&o.isUniform)return;const c=n.texShape,u=o.isUniform?null:o.texData.texShape;if(!ot(c,u))throw Error(`Binary was compiled with different texture shapes than the current args. Shape ${c} and ${u} must match`)})}function N8(e,t,n,s,i){TC(t.inShapeInfos,n),TC([t.outShapeInfo],[s]);const o=s.texData.texture,a=s.texData.texShape;s.texData.isPacked?e.setOutputPackedMatrixTexture(o,a[0],a[1]):e.setOutputMatrixTexture(o,a[0],a[1]),e.setProgram(t.webGLProgram),C().getNumber("WEBGL_VERSION")===1&&(t.infLoc!==null&&e.gl.uniform1f(t.infLoc,Infinity)),t.nanLoc!==null&&e.gl.uniform1f(t.nanLoc,NaN),n.forEach((c,u)=>{const p=t.program.variableNames[u],m=t.uniformLocations[p],y=t.uniformLocations[`offset${p}`];if(m==null)return;if(c.isUniform){if(we(c.shape)<2)e.gl.uniform1f(m,c.uniformValues[0]);else{let b=c.uniformValues;b instanceof Float32Array||(b=new Float32Array(b)),e.gl.uniform1fv(m,b)}return}c.texData.slice!=null&&y!=null&&e.gl.uniform1i(y,c.texData.slice.flatOffset),e.setInputMatrixTexture(c.texData.texture,m,u)}),i!=null&&i(e,t.webGLProgram),e.executeProgram()}function C8(e,t,n){let s="";t.concat(n).forEach(a=>{const c=a.texData!=null&&a.texData.slice!=null&&a.texData.slice.flatOffset>0,u=a.isUniform?"uniform":a.texData.texShape;s+=`${a.shape}_${u}_${c}`});const i=e.userCode;let o=e.constructor.name;return o+="_"+s+"_"+i,o}class R8{constructor(e,t,n){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e;const{filterWidth:s,inChannels:i,strideWidth:o,strideHeight:a,padInfo:c,outWidth:u,dilationWidth:p,dilationHeight:m,dataFormat:y}=n,{left:b,top:w}=c,I=i*s,T=Un(),v=y==="channelsLast",N=v?0:1,E=v?1:2;let D="";for(let F=0;F<=1;F++)for(let _=0;_<=1;_++)D+=` blockIndex = rc.y + ${_}; pos = rc.x + ${F}; @@ -2966,7 +2966,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(result); } - `}}function V8(e){const t=jo(["r","c","d"],e);return` + `}}function V8(e){const t=Xo(["r","c","d"],e);return` ivec3 inputCoordsFromReshapedOutCoords(int index) { ${t} return ivec3(r, c, d); @@ -3478,7 +3478,7 @@ return (round(mod(b, 2.0)) != 1) ? ${o} coords = getOutputCoords(); setOutput(getX(${a})); } - `}}class i6{constructor(e){this.gpgpu=e,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0,this.freeTextures={},this.logEnabled=!1,this.usedTextures={}}acquireTexture(e,t,n){const s=RC(t,n),i=OC(e,s,n);i in this.freeTextures||(this.freeTextures[i]=[]),i in this.usedTextures||(this.usedTextures[i]=[]);const o=CC(e,s,this.gpgpu.gl,this.gpgpu.textureConfig,n);if(this.freeTextures[i].length>0){this.numFreeTextures--,this.numUsedTextures++,this._numBytesFree-=o,this.log();const c=this.freeTextures[i].shift();return this.usedTextures[i].push(c),c}let a;return s===In.PACKED_2X2_FLOAT32?a=this.gpgpu.createPackedMatrixTexture(e[0],e[1]):s===In.PACKED_2X2_FLOAT16?a=this.gpgpu.createFloat16PackedMatrixTexture(e[0],e[1]):s===In.UNPACKED_FLOAT32?a=this.gpgpu.createFloat32MatrixTexture(e[0],e[1]):s===In.UNPACKED_FLOAT16?a=this.gpgpu.createFloat16MatrixTexture(e[0],e[1]):s===In.PACKED_4X1_UNSIGNED_BYTE&&(a=this.gpgpu.createUnsignedBytesMatrixTexture(e[0],e[1])),this.usedTextures[i].push(a),this.numUsedTextures++,this._numBytesAllocated+=o,this.log(),a}releaseTexture(e,t,n,s){if(this.freeTextures==null)return;const i=RC(n,s),o=OC(t,i,s);o in this.freeTextures||(this.freeTextures[o]=[]);const a=CC(t,i,this.gpgpu.gl,this.gpgpu.textureConfig,s),c=C().get("WEBGL_DELETE_TEXTURE_THRESHOLD");c!==-1&&this._numBytesAllocated>c?(this.gpgpu.deleteMatrixTexture(e),this._numBytesAllocated-=a):(this.freeTextures[o].push(e),this.numFreeTextures++,this._numBytesFree+=a),this.numUsedTextures--;const u=this.usedTextures[o],p=u.indexOf(e);if(p<0)throw new Error("Cannot release a texture that was never provided by this texture manager");u.splice(p,1),this.log()}log(){if(!this.logEnabled)return;const e=this.numFreeTextures+this.numUsedTextures;console.log("Free/Used",`${this.numFreeTextures} / ${this.numUsedTextures}`,`(${e})`);const t=this._numBytesFree/this._numBytesAllocated;console.log(`Bytes allocated: ${this._numBytesAllocated}`),console.log(`Bytes unused: ${this._numBytesFree} (${Math.round(100*t)}%)`)}get numBytesAllocated(){return this._numBytesAllocated}get numBytesFree(){return this._numBytesFree}getNumUsedTextures(){return this.numUsedTextures}getNumFreeTextures(){return this.numFreeTextures}dispose(){if(this.freeTextures==null)return;for(const e in this.freeTextures)this.freeTextures[e].forEach(t=>{this.gpgpu.deleteMatrixTexture(t)});for(const e in this.usedTextures)this.usedTextures[e].forEach(t=>{this.gpgpu.deleteMatrixTexture(t)});this.freeTextures=null,this.usedTextures=null,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0}}function r6(e,t){const n=e;if(t===n.R32F)return 4;if(t===n.R16F)return 2;if(t===n.RGBA32F)return 16;if(t===e.RGBA)return 16;if(t===n.RGBA16F)return 8;throw new Error(`Unknown internal format ${t}`)}function CC(e,t,n,s,i){const o=o6(t,s);let a;if(i){const[u,p]=pc(e[0],e[1]);a=u*p}else{const[u,p]=cu(e[0],e[1]);a=u*p}const c=r6(n,o);return a*c}function o6(e,t){switch(e){case In.PACKED_2X2_FLOAT32:return IC(t);case In.PACKED_2X2_FLOAT16:return xC(t);case In.UNPACKED_FLOAT32:return wC(t);case In.UNPACKED_FLOAT16:return LC(t);case In.PACKED_4X1_UNSIGNED_BYTE:return SC(t);default:throw new Error(`Unknown physical texture type ${e}`)}}function a6(e){return C().getBool("WEBGL_RENDER_FLOAT32_ENABLED")?e?In.PACKED_2X2_FLOAT32:In.UNPACKED_FLOAT32:e?In.PACKED_2X2_FLOAT16:In.UNPACKED_FLOAT16}function RC(e,t){if(e===Cs.UPLOAD)return In.PACKED_2X2_FLOAT32;if(e===Cs.RENDER||e==null)return a6(t);if(e===Cs.DOWNLOAD||e===Cs.PIXELS)return In.PACKED_4X1_UNSIGNED_BYTE;throw new Error(`Unknown logical texture type ${e}`)}function OC(e,t,n){return`${e[0]}_${e[1]}_${t}_${n}`}class c6{constructor(e,t){this.variableNames=["A"];const n=new Array(e.length);for(let o=0;o0){this.numFreeTextures--,this.numUsedTextures++,this._numBytesFree-=o,this.log();const c=this.freeTextures[i].shift();return this.usedTextures[i].push(c),c}let a;return s===In.PACKED_2X2_FLOAT32?a=this.gpgpu.createPackedMatrixTexture(e[0],e[1]):s===In.PACKED_2X2_FLOAT16?a=this.gpgpu.createFloat16PackedMatrixTexture(e[0],e[1]):s===In.UNPACKED_FLOAT32?a=this.gpgpu.createFloat32MatrixTexture(e[0],e[1]):s===In.UNPACKED_FLOAT16?a=this.gpgpu.createFloat16MatrixTexture(e[0],e[1]):s===In.PACKED_4X1_UNSIGNED_BYTE&&(a=this.gpgpu.createUnsignedBytesMatrixTexture(e[0],e[1])),this.usedTextures[i].push(a),this.numUsedTextures++,this._numBytesAllocated+=o,this.log(),a}releaseTexture(e,t,n,s){if(this.freeTextures==null)return;const i=RC(n,s),o=OC(t,i,s);o in this.freeTextures||(this.freeTextures[o]=[]);const a=CC(t,i,this.gpgpu.gl,this.gpgpu.textureConfig,s),c=C().get("WEBGL_DELETE_TEXTURE_THRESHOLD");c!==-1&&this._numBytesAllocated>c?(this.gpgpu.deleteMatrixTexture(e),this._numBytesAllocated-=a):(this.freeTextures[o].push(e),this.numFreeTextures++,this._numBytesFree+=a),this.numUsedTextures--;const u=this.usedTextures[o],p=u.indexOf(e);if(p<0)throw new Error("Cannot release a texture that was never provided by this texture manager");u.splice(p,1),this.log()}log(){if(!this.logEnabled)return;const e=this.numFreeTextures+this.numUsedTextures;console.log("Free/Used",`${this.numFreeTextures} / ${this.numUsedTextures}`,`(${e})`);const t=this._numBytesFree/this._numBytesAllocated;console.log(`Bytes allocated: ${this._numBytesAllocated}`),console.log(`Bytes unused: ${this._numBytesFree} (${Math.round(100*t)}%)`)}get numBytesAllocated(){return this._numBytesAllocated}get numBytesFree(){return this._numBytesFree}getNumUsedTextures(){return this.numUsedTextures}getNumFreeTextures(){return this.numFreeTextures}dispose(){if(this.freeTextures==null)return;for(const e in this.freeTextures)this.freeTextures[e].forEach(t=>{this.gpgpu.deleteMatrixTexture(t)});for(const e in this.usedTextures)this.usedTextures[e].forEach(t=>{this.gpgpu.deleteMatrixTexture(t)});this.freeTextures=null,this.usedTextures=null,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0}}function r6(e,t){const n=e;if(t===n.R32F)return 4;if(t===n.R16F)return 2;if(t===n.RGBA32F)return 16;if(t===e.RGBA)return 16;if(t===n.RGBA16F)return 8;throw new Error(`Unknown internal format ${t}`)}function CC(e,t,n,s,i){const o=o6(t,s);let a;if(i){const[u,p]=fc(e[0],e[1]);a=u*p}else{const[u,p]=cu(e[0],e[1]);a=u*p}const c=r6(n,o);return a*c}function o6(e,t){switch(e){case In.PACKED_2X2_FLOAT32:return IC(t);case In.PACKED_2X2_FLOAT16:return xC(t);case In.UNPACKED_FLOAT32:return wC(t);case In.UNPACKED_FLOAT16:return LC(t);case In.PACKED_4X1_UNSIGNED_BYTE:return SC(t);default:throw new Error(`Unknown physical texture type ${e}`)}}function a6(e){return C().getBool("WEBGL_RENDER_FLOAT32_ENABLED")?e?In.PACKED_2X2_FLOAT32:In.UNPACKED_FLOAT32:e?In.PACKED_2X2_FLOAT16:In.UNPACKED_FLOAT16}function RC(e,t){if(e===Cs.UPLOAD)return In.PACKED_2X2_FLOAT32;if(e===Cs.RENDER||e==null)return a6(t);if(e===Cs.DOWNLOAD||e===Cs.PIXELS)return In.PACKED_4X1_UNSIGNED_BYTE;throw new Error(`Unknown logical texture type ${e}`)}function OC(e,t,n){return`${e[0]}_${e[1]}_${t}_${n}`}class c6{constructor(e,t){this.variableNames=["A"];const n=new Array(e.length);for(let o=0;ow.push(I))}const t=this.texData.get(e),{values:n,shape:s,slice:i,dtype:o,complexTensors:a,isPacked:c}=t;if(i!=null){let w;c?w=new pu(s,Im):w=new it(s,Im);const I=this.runWebGLProgram(w,[{dataId:e,shape:s,dtype:o}],o),T=this.read(I.dataId);return this.disposeIntermediateTensorInfo(I),T}if(n!=null)return this.convertAndCacheOnCPU(e);if(!C().getBool("WEBGL_DOWNLOAD_FLOAT_ENABLED")&&C().getNumber("WEBGL_VERSION")===2)throw new Error("tensor.data() with WEBGL_DOWNLOAD_FLOAT_ENABLED=false and WEBGL_VERSION=2 not yet supported.");let u=null,p;if(o!=="complex64"&&C().get("WEBGL_BUFFER_SUPPORTED")){p=this.decode(e);const w=this.texData.get(p.dataId);u=this.gpgpu.createBufferFromTexture(w.texture,...lu(s))}this.pendingRead.set(e,[]),o!=="complex64"&&await this.gpgpu.createAndWaitForFence();let m;if(o==="complex64"){const w=await Promise.all([a.real.data(),a.imag.data()]),I=w[0],T=w[1];m=ir(I,T)}else if(u==null)m=this.getValuesFromTexture(e);else{const w=we(s);m=this.gpgpu.downloadFloat32MatrixFromBuffer(u,w)}p!=null&&this.disposeIntermediateTensorInfo(p);const y=this.convertAndCacheOnCPU(e,m),b=this.pendingRead.get(e);return this.pendingRead.delete(e),b.forEach(w=>w(y)),this.pendingDisposal.has(e)&&(this.pendingDisposal.delete(e),this.disposeData(e),this.pendingDeletes--),y}checkNumericalProblems(e){if(e==null)return;for(let t=0;tc.query)).filter(c=>c!=null),o=Ji(this.activeTimers.map(c=>c.name)).filter(c=>c!=null);this.activeTimers=t,s&&(this.programTimersStack=null);const a={uploadWaitMs:this.uploadWaitMs,downloadWaitMs:this.downloadWaitMs,kernelMs:null,wallMs:null};if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0){const c=await Promise.all(i);a.kernelMs=iT(c),a.getExtraProfileInfo=()=>c.map((u,p)=>({name:o[p],ms:u})).map(u=>`${u.name}: ${u.ms}`).join(", ")}else a.kernelMs={error:"WebGL query timers are not supported in this environment."};return this.uploadWaitMs=0,this.downloadWaitMs=0,a}memory(){return{unreliable:!1,numBytesInGPU:this.numBytesInGPU,numBytesInGPUAllocated:this.textureManager.numBytesAllocated,numBytesInGPUFree:this.textureManager.numBytesFree}}startTimer(){return C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?this.gpgpu.beginQuery():{startMs:qn(),endMs:null}}endTimer(e){return C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?(this.gpgpu.endQuery(),e):(e.endMs=qn(),e)}async getQueryTime(e){if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0)return this.gpgpu.waitForQueryAndGetTime(e);const t=e;return t.endMs-t.startMs}disposeData(e){if(this.pendingDisposal.has(e))return;if(this.pendingRead.has(e)){this.pendingDisposal.add(e),this.pendingDeletes++;return}if(!this.texData.has(e))return;this.releaseGPUData(e);const{complexTensors:t}=this.texData.get(e);t!=null&&(t.real.dispose(),t.imag.dispose()),this.texData.delete(e)}releaseGPUData(e){const{texture:t,dtype:n,texShape:s,usage:i,isPacked:o,slice:a}=this.texData.get(e),c=a&&a.origDataId||e,u=this.dataRefCount.get(c);u>1?this.dataRefCount.set(c,u-1):(this.dataRefCount.delete(c),t!=null&&(this.numBytesInGPU-=this.computeBytes(s,n),this.textureManager.releaseTexture(t,s,i,o)));const p=this.texData.get(e);p.texture=null,p.texShape=null,p.isPacked=!1,p.slice=null}getTexture(e){return this.uploadToGPU(e),this.texData.get(e).texture}getDataInfo(e){return this.texData.get(e)}getCPUBackend(){return C().getBool("WEBGL_CPU_FORWARD")?(this.cpuBackend==null&&(this.cpuBackend=$s().findBackend("cpu")),this.cpuBackend):null}shouldExecuteOnCPU(e,t=q6){const n=this.getCPUBackend();return!this.warnedAboutCPUBackend&&n==null&&(console.warn("Your application contains ops that are small enough to be executed on the CPU backend, however the CPU backend cannot be found. Consider importing the CPU backend (@tensorflow/tfjs-backend-cpu) for better performance."),this.warnedAboutCPUBackend=!0),n!=null&&e.every(s=>this.texData.get(s.dataId).texture==null&&we(s.shape)this.cpuBackend.stridedSlice(e,t,n,s));if(i)return i;const o=Wd(t,n,s);if(o.some(c=>c===0))return en([],o);const a=new s6(t,s,o);return this.compileAndRun(a,[e])}reverse(e,t){const n=C().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new J8(e.shape,t):new X8(e.shape,t);return this.compileAndRun(n,[e])}concat(e,t){if(e[0].dtype==="complex64"){const a=e.map(u=>Do(u)),c=e.map(u=>Pa(u));return Ci(this.concat(a,t),this.concat(c,t))}if(e.length===1)return e[0];if(e.length>C().getNumber("WEBGL_MAX_TEXTURES_IN_SHADER")){const a=Math.floor(e.length/2),c=this.concat(e.slice(0,a),t),u=this.concat(e.slice(a),t);return this.concat([c,u],t)}if(C().getBool("WEBGL_PACK_ARRAY_OPERATIONS")&&e[0].rank>1){const a=new P5(e.map(c=>c.shape),t);return this.compileAndRun(a,e)}const n=Ur(e.map(a=>a.shape),t),s=e.map(a=>a.as2D(-1,we(a.shape.slice(t)))),i=new M5(s.map(a=>a.shape)),o=this.compileAndRun(i,s);return o.reshape(n)}neg(e){const t=this.tryRunOnCpuOrThrow([e],()=>this.cpuBackend.neg(e));if(t)return t;if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,_C,e.dtype);const n=new it(e.shape,_C);return this.compileAndRun(n,[e])}batchMatMul(e,t,n,s){const i=n?e.shape[2]:e.shape[1],o=s?t.shape[1]:t.shape[2],a=n?e.shape[1]:e.shape[2],[c,,]=e.shape;if((i===1||o===1)&&a>VC){n&&(e=Pe(e,[0,2,1])),s&&(t=Pe(t,[0,2,1]));const m=o===1?e:e.as3D(c,a,1),y=o===1?2:1,b=o===1?t.as3D(c,1,a):t;return this.multiply(m,b).sum(y,!0)}const u=Cn(e.dtype,t.dtype),p=new LS(e.shape,[c,i,o],n,s);return this.compileAndRun(p,[e,t],u)}fusedBatchMatMul({a:e,b:t,transposeA:n,transposeB:s,bias:i,activation:o,preluActivationWeights:a}){const c=n?e.shape[2]:e.shape[1],u=s?t.shape[1]:t.shape[2],[p,,]=e.shape,m=Cn(e.dtype,t.dtype),y=i!=null,b=a!=null,w=o?Tm(o,!0):null,I=new LS(e.shape,[p,c,u],n,s,y,w,b),T=[e,t];return i&&T.push(i),a&&T.push(a),this.compileAndRun(I,T,m)}multiply(e,t){if(e.dtype==="complex64"){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),a=new oC(rC.REAL,e.shape,t.shape),c=new oC(rC.IMAG,e.shape,t.shape),u=[this.makeComplexComponentTensorInfo(e,i.complexTensors.real),this.makeComplexComponentTensorInfo(e,i.complexTensors.imag),this.makeComplexComponentTensorInfo(t,o.complexTensors.real),this.makeComplexComponentTensorInfo(t,o.complexTensors.imag)],p=this.compileAndRun(a,u),m=this.compileAndRun(c,u),y=this.complex(p,m);return p.dispose(),m.dispose(),y}const n=Cn(e.dtype,t.dtype);if(this.shouldExecuteOnCPU([e,t])){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),[a,c]=fK(e.shape,t.shape,i.values,o.values,n);return this.makeOutput(c,n,a)}if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,cC,e.dtype);const s=new hn(cC,e.shape,t.shape);return this.compileAndRun(s,[e,t],e.dtype)}localResponseNormalization4D(e,t,n,s,i){const o=C().getBool("WEBGL_PACK_NORMALIZATION")?new D8(e.shape,t,n,s,i):new O8(e.shape,t,n,s,i);return this.compileAndRun(o,[e])}LRNGrad(e,t,n,s,i,o,a){const c=new E8(t.shape,s,i,o,a);return this.compileAndRun(c,[t,n,e])}tile(e,t){if(e.dtype==="string"){const s=this.readSync(e.dataId),i=s.map(a=>Yl(a)),o=Qe(e.shape,e.dtype,i);return P6(o,t)}const n=new c6(e.shape,t);return this.compileAndRun(n,[e])}pad(e,t,n){const s=C().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new G8(e.shape,t,n):new z8(e.shape,t,n);return this.compileAndRun(s,[e])}gather(e,t,n){const s=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.gather(e,t,n));if(s)return s;const i=new r8(e.shape,t.size,n);return this.compileAndRun(i,[e,t])}batchToSpaceND(e,t,n){k(e.rank<=4,()=>"batchToSpaceND for rank > 4 with a WebGL backend not implemented yet");const s=t.reduce((p,m)=>p*m),i=Rh(e.shape,t,s),o=Oh(i.length,t.length),a=Eh(e.shape,t,s),c=jb(n,t.length),u=Kb(a,n,t.length);return Pe(e.reshape(i),o).reshape(a).slice(c,u)}spaceToBatchND(e,t,n){k(e.rank<=4,()=>"spaceToBatchND for rank > 4 with a WebGL backend not implemented yet");const s=t.reduce((m,y)=>m*y),i=[[0,0]];i.push(...n);for(let m=1+t.length;mthis.cpuBackend.prod(e,t));if(n)return n;const[s,i]=On(e.shape,t),o=we(i),a=e.as2D(-1,o),c=vd(e.dtype);return this.reduce(a,"prod",c).reshape(s)}unsortedSegmentSum(e,t,n){let s=0;const i=_n([s],e.rank);let o=e;i!=null&&(o=Pe(e,i),s=Is(1,e.rank)[0]);const a=GC.computeOutShape(o.shape,s,n),c=we([o.shape[s]]),u=o.as2D(-1,c),p=vd(e.dtype);let m=this.segOpCompute(u,"unsortedSegmentSum",t,p,n).reshape(a);return i!=null&&(m=Pe(m,eh(i))),m}segOpCompute(e,t,n,s,i){const o=e.shape[0],a=e.shape[1],c=GC.segOpComputeOptimalWindowSize(a,i),u={windowSize:c,inSize:a,batchSize:o,numSegments:i},p=new Z8(u,t),m=this.compileAndRun(p,[e,n],s);return m.shape[1]===i?m:(n=yh(0,i).tile([a/c]),this.segOpCompute(m,t,n,s,i))}argMinMaxReduce(e,t,n){const s=[t];if(ss("arg"+n.charAt(0).toUpperCase()+n.slice(1),s,e.rank),!C().getBool("WEBGL_PACK_REDUCE")||e.rank<=2){const[i,o]=On(e.shape,s),a=we(o),c=e.as2D(-1,a);return this.argReduce(c,n).reshape(i)}return this.argReducePacked(e,n)}argMin(e,t){return this.argMinMaxReduce(e,t,"min")}argMax(e,t){return this.argMinMaxReduce(e,t,"max")}cumsum(e,t,n,s){if(t!==e.rank-1)throw new Error(`WebGL cumsum shader expects an inner-most axis=${e.rank-1} but got axis=${t}`);const i=e.shape[t];let o=e;for(let a=0;a<=Math.ceil(Math.log2(i))-1;a++){const c=new mC(e.shape,!1,s),u=c.getCustomSetupFunc(a),p=o;o=this.compileAndRun(c,[o],o.dtype,u),p.dispose()}if(n){const a=new mC(e.shape,n,s),c=o;o=this.compileAndRun(a,[o]),c.dispose()}return o}equal(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,v5,"bool");const n=new hn(u5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}notEqual(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,N5,"bool");const n=new hn(d5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}less(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.less(e,t));if(n)return n;if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,C5,"bool");const s=new hn(p5,e.shape,t.shape);return this.compileAndRun(s,[e,t],"bool")}lessEqual(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,R5,"bool");const n=new hn(m5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}greater(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.greater(e,t));if(n)return n;if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,O5,"bool");const s=new hn(f5,e.shape,t.shape);return this.compileAndRun(s,[e,t],"bool")}greaterEqual(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,E5,"bool");const n=new hn(g5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}logicalNot(e){const t=new it(e.shape,_6);return this.compileAndRun(t,[e])}logicalAnd(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,D5,"bool");const n=new hn(y5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}logicalOr(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,k5,"bool");const n=new hn(b5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}select(e,t,n){const s=new Q8(e.rank,t.shape,t.rank);return this.compileAndRun(s,[e,t,n],Cn(t.dtype,n.dtype))}where(e){Ja("tf.where() in webgl locks the UI thread. Call tf.whereAsync() instead");const t=e.dataSync();return G6(e.shape,t)}topk(e,t,n){const s=e.dataSync();return z6(s,e.shape,e.dtype,t,n)}min(e,t){ss("min",t,e.rank);const[n,s]=On(e.shape,t),i=we(s),o=e.as2D(-1,i);return this.reduce(o,"min",o.dtype).reshape(n)}minimum(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.minimum(e,t));if(n)return n;const s=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(_5,e.shape,t.shape):new hn(L5,e.shape,t.shape);return this.compileAndRun(s,[e,t])}mod(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(W5,e.shape,t.shape):new hn(S5,e.shape,t.shape);return this.compileAndRun(n,[e,t])}maximum(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.maximum(e,t));if(n)return n;const s=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(F5,e.shape,t.shape):new hn(w5,e.shape,t.shape);return this.compileAndRun(s,[e,t])}all(e,t){ss("all",t,e.rank);const[n,s]=On(e.shape,t),i=we(s),o=e.as2D(-1,i);return this.reduce(o,"all",o.dtype).reshape(n)}any(e,t){ss("any",t,e.rank);const[n,s]=On(e.shape,t),i=we(s),o=e.as2D(-1,i);return this.reduce(o,"any",o.dtype).reshape(n)}floorDiv(e,t){const n=l5,s="int32";if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,x5,s);const i=new hn(n,e.shape,t.shape);return this.compileAndRun(i,[e,t],s)}add(e,t){if(e.dtype==="complex64"&&t.dtype==="complex64")return this.complexSeparableBinaryOp(e,t,bS);const n=Cn(e.dtype,t.dtype);if(this.shouldExecuteOnCPU([e,t])){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),[a,c]=cK(e.shape,t.shape,i.values,o.values,n);return this.makeOutput(c,n,a)}if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,bS,n);const s=new hn(bS,e.shape,t.shape);return this.compileAndRun(s,[e,t],n)}packedUnaryOp(e,t,n){const s=new pu(e.shape,t);return this.compileAndRun(s,[e],n)}packedBinaryOp(e,t,n,s,i=!1){const o=new to(n,e.shape,t.shape,i);return this.compileAndRun(o,[e,t],s)}complexSeparableBinaryOp(e,t,n){const s=this.texData.get(e.dataId),i=this.texData.get(t.dataId),[o,a]=[[s.complexTensors.real,i.complexTensors.real],[s.complexTensors.imag,i.complexTensors.imag]].map(u=>{const[p,m]=u,y=this.makeComplexComponentTensorInfo(e,p),b=this.makeComplexComponentTensorInfo(t,m),w=new hn(n,e.shape,t.shape);return this.compileAndRun(w,[y,b],Cn(p.dtype,m.dtype))}),c=this.complex(o,a);return o.dispose(),a.dispose(),c}makeComplexComponentTensorInfo(e,t){return{dataId:t.dataId,dtype:t.dtype,shape:e.shape}}addN(e){if(e.length===1)return e[0];if(e.length>C().get("WEBGL_MAX_TEXTURES_IN_SHADER")){const o=Math.floor(e.length/2),a=this.addN(e.slice(0,o)),c=this.addN(e.slice(o));return this.addN([a,c])}const t=e.map(o=>o.dtype).reduce((o,a)=>Cn(o,a)),n=e.map(o=>o.shape),s=C().getBool("WEBGL_PACK"),i=s?new SK(e[0].shape,n):new LK(e[0].shape,n);return this.compileAndRun(i,e,t)}subtract(e,t){if(e.dtype==="complex64"&&t.dtype==="complex64")return this.complexSeparableBinaryOp(e,t,wS);const n=Cn(e.dtype,t.dtype);if(this.shouldExecuteOnCPU([e,t])){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),[a,c]=bK(e.shape,t.shape,i.values,o.values,n);return this.makeOutput(c,n,a)}if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,wS,e.dtype);const s=new hn(wS,e.shape,t.shape);return this.compileAndRun(s,[e,t],n)}pow(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS"),s=n?new to(T5,e.shape,t.shape):new hn(h5,e.shape,t.shape),i=Cn(e.dtype,t.dtype);return this.compileAndRun(s,[e,t],i)}ceil(e){if(this.shouldExecuteOnCPU([e])){const n=lK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,WC,e.dtype);const t=new it(e.shape,WC);return this.compileAndRun(t,[e])}floor(e){if(this.shouldExecuteOnCPU([e])){const n=dK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,$C,e.dtype);const t=new it(e.shape,$C);return this.compileAndRun(t,[e])}sign(e){const t=new it(e.shape,p6);return this.compileAndRun(t,[e])}isNaN(e){const t=new it(e.shape,m6);return this.compileAndRun(t,[e],"bool")}isInf(e){const t=new it(e.shape,f6);return this.compileAndRun(t,[e],"bool")}isFinite(e){const t=new it(e.shape,g6);return this.compileAndRun(t,[e],"bool")}round(e){const t=new it(e.shape,y6);return this.compileAndRun(t,[e])}exp(e){if(this.shouldExecuteOnCPU([e])){const n=hK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,UC,e.dtype);const t=new it(e.shape,UC);return this.compileAndRun(t,[e])}expm1(e){if(this.shouldExecuteOnCPU([e])){const n=uK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,BC,e.dtype);const t=new it(e.shape,BC);return this.compileAndRun(t,[e])}softmax(e,t){const n=ft([t],e.shape),s=Xn(e,n),i=En(s.shape,n),o=this.subtract(e,s.reshape(i)),a=this.exp(o),c=this.sum(a,n).reshape(i);return _e(a,c)}log(e){if(this.shouldExecuteOnCPU([e])){const n=pK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,U6,e.dtype);const t=new it(e.shape,b6);return this.compileAndRun(t,[e])}log1p(e){const t=new it(e.shape,w6);return this.compileAndRun(t,[e])}sqrt(e){const t=new it(e.shape,L6);return this.compileAndRun(t,[e])}rsqrt(e){if(this.shouldExecuteOnCPU([e])){const n=gK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}const t=new it(e.shape,S6);return this.compileAndRun(t,[e])}reciprocal(e){const t=new it(e.shape,F6);return this.compileAndRun(t,[e])}relu(e){let t;return C().getBool("WEBGL_PACK")?t=new pu(e.shape,MC):t=new it(e.shape,DC),this.compileAndRun(t,[e])}relu6(e){let t;return C().getBool("WEBGL_PACK")?t=new pu(e.shape,PC):t=new it(e.shape,kC),this.compileAndRun(t,[e])}prelu(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(hC,e.shape,t.shape):new hn(lC,e.shape,t.shape);return this.compileAndRun(n,[e,t])}elu(e){if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,zC,e.dtype);const t=new it(e.shape,FC);return this.compileAndRun(t,[e])}eluDer(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(A5,e.shape,t.shape):new hn(I5,e.shape,t.shape);return this.compileAndRun(n,[e,t])}selu(e){const t=new it(e.shape,u6);return this.compileAndRun(t,[e])}int(e){const t=new it(e.shape,W6);return this.compileAndRun(t,[e],"int32")}clip(e,t,n){let s;C().getBool("WEBGL_PACK_CLIP")?s=new U5(e.shape):s=new $5(e.shape);const i=s.getCustomSetupFunc(t,n);return this.compileAndRun(s,[e],null,i)}abs(e){if(this.shouldExecuteOnCPU([e])&&e.dtype!=="complex64"){const n=aK(this.texData.get(e.dataId).values);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,EC,e.dtype);const t=new it(e.shape,EC);return this.compileAndRun(t,[e])}complexAbs(e){const t=this.texData.get(e.dataId),n=new B5(e.shape),s=[this.makeComplexComponentTensorInfo(e,t.complexTensors.real),this.makeComplexComponentTensorInfo(e,t.complexTensors.imag)];return this.compileAndRun(n,s)}sigmoid(e){const t=new it(e.shape,I6);return this.compileAndRun(t,[e])}softplus(e){const t=new it(e.shape,x6);return this.compileAndRun(t,[e])}asin(e){const t=new it(e.shape,T6);return this.compileAndRun(t,[e])}acos(e){const t=new it(e.shape,A6);return this.compileAndRun(t,[e])}atan(e){const t=new it(e.shape,v6);return this.compileAndRun(t,[e])}sinh(e){const t=new it(e.shape,N6);return this.compileAndRun(t,[e])}cosh(e){const t=new it(e.shape,C6);return this.compileAndRun(t,[e])}tanh(e){const t=new it(e.shape,R6);return this.compileAndRun(t,[e])}asinh(e){const t=new it(e.shape,O6);return this.compileAndRun(t,[e])}acosh(e){const t=new it(e.shape,E6);return this.compileAndRun(t,[e])}atanh(e){const t=new it(e.shape,D6);return this.compileAndRun(t,[e])}erf(e){const t=new it(e.shape,k6);return this.compileAndRun(t,[e])}step(e,t){const n=new it(e.shape,d6(t));return this.compileAndRun(n,[e])}conv2dByMatMul(e,t,n,s,i,o){const a=e.shape,c=this.texData.get(e.dataId),u=n.inChannels,p=a[0]*a[1]*a[2],m=n.outChannels,y=n.dataFormat==="channelsLast",b=!1,w=!1,I=(p===1||m===1)&&u>VC,T=a[2]%2!==0&&!!c.isPacked;if(I||!C().getBool("WEBGL_LAZILY_UNPACK")||!C().getBool("WEBGL_PACK_BINARY_OPERATIONS")||!T){const B=y?a[0]*a[1]*a[2]:a[0]*a[2]*a[3],U=K(e,[1,B,n.inChannels]),Y=K(t,[1,n.inChannels,n.outChannels]),q=this.fusedBatchMatMul({a:U,b:Y,transposeA:b,transposeB:w,bias:s,activation:i,preluActivationWeights:o});return K(q,n.outShape)}const v=y?a[0]*a[1]*(a[2]+1):a[0]*a[2]*(a[3]+1),N={dataId:e.dataId,shape:[1,v,n.inChannels],dtype:e.dtype},E=c.shape;c.shape=c.shape.slice(),c.shape[c.shape.length-2]++,k(gm(c.shape,N.shape),()=>`packed reshape ${c.shape} to ${N.shape} isn't free`);const D=K(t,[1,n.inChannels,n.outChannels]),F=this.fusedBatchMatMul({a:N,b:D,transposeA:b,transposeB:w,bias:s,activation:i,preluActivationWeights:o}),_=this.texData.get(F.dataId);return k(_.isPacked,()=>"batchMatMul result is expected to be packed"),c.shape=E,_.shape=n.outShape,$s().makeTensorFromDataId(F.dataId,n.outShape,F.dtype)}conv2dWithIm2Row(e,t,n,s,i,o){const{filterWidth:a,filterHeight:c,inChannels:u,outWidth:p,outHeight:m,dataFormat:y}=n,b=y==="channelsLast",w=a*c*u,I=m*p,T=[w,I],v=!0,N=!1,E=e.squeeze([0]),D=t.reshape([1,w,-1]),F=new R8(T,E.shape,n),_=this.compileAndRun(F,[E]).reshape([1,T[0],T[1]]),B=s!=null,U=o!=null,Y=i?Tm(i,!0):null,q=new LS(_.shape,[1,I,n.outChannels],v,N,B,Y,U),J=[_,D];s&&J.push(s),U&&J.push(o);const oe=this.compileAndRun(q,J);return b?oe.reshape([1,m,p,n.outChannels]):oe.reshape([1,n.outChannels,m,p])}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){if(n.filterHeight===1&&n.filterWidth===1&&n.dilationHeight===1&&n.dilationWidth===1&&n.strideHeight===1&&n.strideWidth===1&&(n.padInfo.type==="SAME"||n.padInfo.type==="VALID"))return this.conv2dByMatMul(e,t,n,s,i,o);if(C().getBool("WEBGL_CONV_IM2COL")&&e.shape[0]===1)return this.conv2dWithIm2Row(e,t,n,s,i,o);const a=s!=null,c=o!=null,u=i?Tm(i,!1):null,p=new uC(n,a,u,c),m=[e,t];return s&&m.push(s),o&&m.push(o),this.compileAndRun(p,m)}conv2d(e,t,n){if(n.filterHeight===1&&n.filterWidth===1&&n.dilationHeight===1&&n.dilationWidth===1&&n.strideHeight===1&&n.strideWidth===1&&(n.padInfo.type==="SAME"||n.padInfo.type==="VALID"))return this.conv2dByMatMul(e,t,n);if(C().getBool("WEBGL_CONV_IM2COL")&&e.shape[0]===1)return this.conv2dWithIm2Row(e,t,n);const s=new uC(n);return this.compileAndRun(s,[e,t])}conv2dDerInput(e,t,n){const s=new G5(n);return this.compileAndRun(s,[e,t])}conv2dDerFilter(e,t,n){const s=new z5(n);return this.compileAndRun(s,[e,t])}fusedDepthwiseConv2D({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){const a=C().getBool("WEBGL_PACK_DEPTHWISECONV")&&n.strideWidth<=2&&n.outChannels/n.inChannels===1,c=i?Tm(i,a):null,u=[e,t],p=s!=null,m=o!=null;p&&u.push(s),m&&u.push(o);let y;return a?(y=new pC(n,p,c,m),this.compileAndRun(y,u)):(y=new dC(n,p,c,m),this.compileAndRun(y,u))}depthwiseConv2D(e,t,n){let s;return C().getBool("WEBGL_PACK_DEPTHWISECONV")&&n.strideWidth<=2&&n.outChannels/n.inChannels===1?(s=new pC(n),this.compileAndRun(s,[e,t])):(s=new dC(n),this.compileAndRun(s,[e,t]))}depthwiseConv2DDerInput(e,t,n){const s=new q5(n);return this.compileAndRun(s,[e,t])}depthwiseConv2DDerFilter(e,t,n){const s=new Y5(n);return this.compileAndRun(s,[e,t])}conv3d(e,t,n){const s=new j5(n);return this.compileAndRun(s,[e,t])}conv3dDerInput(e,t,n){const s=new H5(n);return this.compileAndRun(s,[e,t])}conv3dDerFilter(e,t,n){const s=new V5(n);return this.compileAndRun(s,[e,t])}cast(e,t){return ov(e,t,this)}unstack(e,t){const n=e.shape[t],s=new Array(e.rank-1);let i=0;for(let u=0;u1,()=>`blockSize should be > 1 for depthToSpace, but was: ${t}`);const s=e.shape[0],i=n==="NHWC"?e.shape[1]:e.shape[2],o=n==="NHWC"?e.shape[2]:e.shape[3],a=n==="NHWC"?e.shape[3]:e.shape[1],c=i*t,u=o*t,p=a/(t*t),m=n==="NHWC"?[s,c,u,p]:[s,p,c,u],y=new Z5(m,t,n);return this.compileAndRun(y,[e])}split(e,t,n){return M6(e,t,n)}scatterND(e,t,n){const{sliceRank:s,numUpdates:i,sliceSize:o,strides:a,outputSize:c}=Wa(t,e,n),u=[c/o,o],p=e.reshape([i,s]),m=t.reshape([i,o]);if(c===0)return av(en([]),n);const y=Ne(0),b=new NC(i,s,p.rank,m.rank,a,u),w=this.compileAndRun(b,[m,p,y]);return w.reshape(n)}sparseToDense(e,t,n,s){const{sliceRank:i,numUpdates:o,strides:a,outputSize:c}=Wa(t,e,n),u=!1,p=new NC(o,i,e.rank,t.rank,a,[c,1],u),m=this.compileAndRun(p,[t,e,s]);return m.reshape(n)}fft(e){const t=!1;return this.fftImpl(e,t)}ifft(e){const t=!0;return this.fftImpl(e,t)}fftImpl(e,t){const n=this.texData.get(e.dataId),s=new bC(yC.REAL,e.shape,t),i=new bC(yC.IMAG,e.shape,t),o=[this.makeComplexComponentTensorInfo(e,n.complexTensors.real),this.makeComplexComponentTensorInfo(e,n.complexTensors.imag)],a=this.compileAndRun(s,o),c=this.compileAndRun(i,o),u=this.complex(a,c).as2D(e.shape[0],e.shape[1]);return a.dispose(),c.dispose(),u}gatherND(e,t){const n=t.shape,s=n[n.length-1],[i,o,a,c]=Fd(e,t),u=t.reshape([o,s]),p=e.reshape([e.size/a,a]),m=new a8(s,c,[o,a]),y=this.compileAndRun(m,[p,u]);return y.reshape(i)}fill(e,t,n){if(n=n||Ca(t),n==="string"){const s=wo(n,we(e));return s.fill(t),$s().makeTensor(s,e,n,this)}else{const s=new i8(e,t),i=s.getCustomSetupFunc(t);return this.compileAndRun(s,[],n,i)}}onesLike(e){if(e.dtype==="string")throw new Error("onesLike is not supported under string dtype");return this.fill(e.shape,1,e.dtype)}zerosLike(e){return this.fill(e.shape,e.dtype==="string"?"":0,e.dtype)}linspace(e,t,n){return sw(e,t,n)}makeTensorInfo(e,t,n){const s=this.write(n,e,t);return this.texData.get(s).usage=null,{dataId:s,shape:e,dtype:t}}makeOutput(e,t,n){const{dataId:s}=this.makeTensorInfo(e,t,n);return $s().makeTensorFromDataId(s,e,t,this)}unpackTensor(e){const t=new B6(e.shape);return this.runWebGLProgram(t,[e],e.dtype)}packTensor(e){const t=new $8(e.shape),n=!0;return this.runWebGLProgram(t,[e],e.dtype,null,n)}packedReshape(e,t){const n=[mc(e.shape),...fc(e.shape)],s={dtype:e.dtype,shape:n,dataId:e.dataId},i=[mc(t),...fc(t)],o=new vC(i,n),a=!0,c=this.runWebGLProgram(o,[s],e.dtype,null,a);return{dataId:c.dataId,shape:t,dtype:c.dtype}}decode(e){const t=this.texData.get(e),{isPacked:n,shape:s,dtype:i}=t,o=fS(s);let a;n?a=new J5(o):a=new X5(o);const c=!0,u=this.runWebGLProgram(a,[{shape:o,dtype:i,dataId:e}],i,null,c);return{dtype:i,shape:s,dataId:u.dataId}}runWebGLProgram(e,t,n,s,i=!1){const o=this.makeTensorInfo(e.outputShape,n),a=this.texData.get(o.dataId);if(e.packedOutput&&(a.isPacked=!0),e.outPackingScheme===au.DENSE){const I=lu(e.outputShape);a.texShape=I.map(T=>T*2)}if(e.outTexUsage!=null&&(a.usage=e.outTexUsage),we(o.shape)===0)return a.values=wn(o.dtype,0),o;const c=[],u=t.map(I=>{if(I.dtype==="complex64")throw new Error("GPGPUProgram does not support complex64 input. For complex64 dtypes, please separate the program into real and imaginary parts.");let T=this.texData.get(I.dataId);if(T.texture==null){if(!e.packedInputs&&we(I.shape)<=C().getNumber("WEBGL_SIZE_UPLOAD_UNIFORM"))return{shape:I.shape,texData:null,isUniform:!0,uniformValues:T.values};e.packedInputs&&(T.isPacked=!0,T.shape=I.shape)}else if(!!T.isPacked!==!!e.packedInputs)I=T.isPacked?this.unpackTensor(I):this.packTensor(I),c.push(I),T=this.texData.get(I.dataId);else if(T.isPacked&&!gm(T.shape,I.shape)){const v=I,N=I.shape;I.shape=T.shape,I=this.packedReshape(I,N),c.push(I),T=this.texData.get(I.dataId),v.shape=N}return this.uploadToGPU(I.dataId),{shape:I.shape,texData:T,isUniform:!1}});this.uploadToGPU(o.dataId);const p={shape:o.shape,texData:a,isUniform:!1},m=C8(e,u,p),y=this.getAndSaveBinary(m,()=>v8(this.gpgpu,e,u,p)),b=this.activeTimers!=null;let w;if(b&&(w=this.startTimer()),N8(this.gpgpu,y,u,p,s),c.forEach(I=>this.disposeIntermediateTensorInfo(I)),b&&(w=this.endTimer(w),this.activeTimers.push({name:e.constructor.name,query:this.getQueryTime(w)})),!C().getBool("WEBGL_LAZILY_UNPACK")&&a.isPacked&&i===!1){const I=this.unpackTensor(o);return this.disposeIntermediateTensorInfo(o),I}return o}compileAndRun(e,t,n,s,i=!1){n=n||t[0].dtype;const o=this.runWebGLProgram(e,t,n,s,i);return $s().makeTensorFromDataId(o.dataId,o.shape,o.dtype)}getAndSaveBinary(e,t){return e in this.binaryCache||(this.binaryCache[e]=t()),this.binaryCache[e]}getTextureManager(){return this.textureManager}dispose(){if(this.disposed)return;if(!C().getBool("IS_TEST")){const e=Object.keys(this.binaryCache);e.forEach(t=>{this.gpgpu.deleteProgram(this.binaryCache[t].webGLProgram),delete this.binaryCache[t]})}this.textureManager.dispose(),this.canvas!=null&&typeof HTMLCanvasElement!="undefined"&&this.canvas instanceof HTMLCanvasElement?this.canvas.remove():this.canvas=null,this.gpgpuCreatedLocally&&(this.gpgpu.program=null,this.gpgpu.dispose()),this.disposed=!0}floatPrecision(){return this.floatPrecisionValue==null&&(this.floatPrecisionValue=ee(()=>{if(!C().get("WEBGL_RENDER_FLOAT32_ENABLED")){const e=C().getBool("DEBUG");C().set("DEBUG",!1);const t=this.abs(Ne(1e-8)).dataSync()[0];if(C().set("DEBUG",e),t>0)return 32}return 16})),this.floatPrecisionValue}epsilon(){return this.floatPrecision()===32?V6:H6}uploadToGPU(e){const t=this.texData.get(e),{shape:n,dtype:s,values:i,texture:o,usage:a,isPacked:c}=t;if(o!=null)return;const u=this.activeTimers!=null;let p;u&&(p=qn());let m=t.texShape;if(m==null&&(m=Qj(n,c),t.texShape=m),i!=null){const y=fS(n);let b,w=m[1],I=m[0];const T=i instanceof Uint8Array;c?([w,I]=pc(m[0],m[1]),b=new s8(y,[I,w],T)):b=new n8(y,[I,w],T);const v=this.makeTensorInfo([I,w],s);T?this.texData.get(v.dataId).usage=Cs.PIXELS:this.texData.get(v.dataId).usage=Cs.UPLOAD,this.gpgpu.uploadDenseMatrixToTexture(this.getTexture(v.dataId),w,I,i);const N=!0,E=this.runWebGLProgram(b,[v],s,null,N),D=this.texData.get(E.dataId);t.texture=D.texture,t.texShape=D.texShape,t.isPacked=D.isPacked,t.usage=D.usage,this.disposeIntermediateTensorInfo(v),this.texData.delete(E.dataId),t.values=null,u&&(this.uploadWaitMs+=qn()-p)}else{const y=this.acquireTexture(m,a,s,c);t.texture=y}}convertAndCacheOnCPU(e,t){const n=this.texData.get(e),{dtype:s}=n;return this.releaseGPUData(e),t!=null&&(n.values=J6(t,s)),n.values}acquireTexture(e,t,n,s){if(this.numBytesInGPU+=this.computeBytes(e,n),!this.warnedAboutMemory&&this.numBytesInGPU>this.numMBBeforeWarning*1024*1024){const i=(this.numBytesInGPU/1024/1024).toFixed(2);this.warnedAboutMemory=!0,console.warn(`High memory usage in GPU: ${i} MB, most likely due to a memory leak`)}return this.textureManager.acquireTexture(e,t,s)}computeBytes(e,t){return e[0]*e[1]*Ty(t)}tryRunOnCpuOrThrow(e,t){if(this.shouldExecuteOnCPU(e))try{return t()}catch(n){if(C().getBool("IS_TEST"))throw new Error("CPU forwarding failed")}return null}}function J6(e,t){if(t==="float32"||t==="complex64")return e;if(t==="int32"||t==="bool"){const n=t==="int32"?new Int32Array(e.length):new Uint8Array(e.length);for(let s=0;snew X6,2);const dte={forceHalfFloat:Q6},HC="if (isnan(x)) return x;",e7=` + `}}const{segment_util:GC}=iw,M6=rw,P6=ow,z6=aw,G6=dp,V6=1e-7,H6=1e-4,xm={};function Y6(e){return e in xm||(xm[e]={}),xm[e]}function Tm(e,t=!1){if(e==="linear")return t?$6:h6;if(e==="relu")return t?MC:DC;if(e==="elu")return t?zC:FC;if(e==="relu6")return t?PC:kC;if(e==="prelu")return t?hC:lC;throw new Error(`Activation ${e} has not been implemented for the WebGL backend.`)}const q6=128,j6=600;function K6(){return C().global.screen==null?1024:C().global.screen.height*C().global.screen.width*window.devicePixelRatio*j6/1024/1024}const VC=1e3;class X6 extends f{constructor(e){super();if(this.pendingRead=new WeakMap,this.pendingDisposal=new WeakSet,this.dataRefCount=new WeakMap,this.numBytesInGPU=0,this.uploadWaitMs=0,this.downloadWaitMs=0,this.warnedAboutMemory=!1,this.warnedAboutCPUBackend=!1,this.pendingDeletes=0,this.disposed=!1,!C().getBool("HAS_WEBGL"))throw new Error("WebGL is not supported on this device");if(e==null){const t=Mi(C().getNumber("WEBGL_VERSION"));this.binaryCache=Y6(C().getNumber("WEBGL_VERSION")),this.gpgpu=new T8(t),this.canvas=t.canvas,this.gpgpuCreatedLocally=!0}else this.gpgpu=e,this.binaryCache={},this.gpgpuCreatedLocally=!1,this.canvas=e.gl.canvas;this.textureManager=new i6(this.gpgpu),this.numMBBeforeWarning=K6(),this.texData=new d(this,$s())}numDataIds(){return this.texData.numDataIds()+(this.cpuBackend?this.cpuBackend.numDataIds():0)-this.pendingDeletes}write(e,t,n){if((C().getBool("WEBGL_CHECK_NUMERICAL_PROBLEMS")||C().getBool("DEBUG"))&&this.checkNumericalProblems(e),n==="complex64"&&e!=null)throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");const s={};return this.texData.set(s,{shape:t,dtype:n,values:e,usage:Cs.UPLOAD,refCount:1}),s}incRef(e){const t=this.texData.get(e);t.refCount++}decRef(e){if(this.texData.has(e)){const t=this.texData.get(e);t.refCount--}}move(e,t,n,s){if(C().getBool("DEBUG")&&this.checkNumericalProblems(t),s==="complex64")throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");this.texData.set(e,{shape:n,dtype:s,values:t,usage:Cs.UPLOAD,refCount:1})}disposeIntermediateTensorInfo(e){const t=e.dataId;if(this.texData.has(t)){const n=this.texData.get(t);n.refCount--,n.refCount<1&&this.disposeData(t)}}readSync(e){const t=this.texData.get(e),{values:n,dtype:s,complexTensors:i,slice:o,shape:a,isPacked:c}=t;if(o!=null){let y;c?y=new pu(a,Im):y=new it(a,Im);const b=this.runWebGLProgram(y,[{dataId:e,shape:a,dtype:s}],s),w=this.readSync(b.dataId);return this.disposeIntermediateTensorInfo(b),w}if(n!=null)return this.convertAndCacheOnCPU(e);if(s==="string")return n;const u=this.activeTimers!=null;let p;u&&(p=qn());let m;if(s==="complex64"){const y=i.real.dataSync(),b=i.imag.dataSync();m=ir(y,b)}else m=this.getValuesFromTexture(e);return u&&(this.downloadWaitMs+=qn()-p),this.convertAndCacheOnCPU(e,m)}async read(e){if(this.pendingRead.has(e)){const w=this.pendingRead.get(e);return new Promise(I=>w.push(I))}const t=this.texData.get(e),{values:n,shape:s,slice:i,dtype:o,complexTensors:a,isPacked:c}=t;if(i!=null){let w;c?w=new pu(s,Im):w=new it(s,Im);const I=this.runWebGLProgram(w,[{dataId:e,shape:s,dtype:o}],o),T=this.read(I.dataId);return this.disposeIntermediateTensorInfo(I),T}if(n!=null)return this.convertAndCacheOnCPU(e);if(!C().getBool("WEBGL_DOWNLOAD_FLOAT_ENABLED")&&C().getNumber("WEBGL_VERSION")===2)throw new Error("tensor.data() with WEBGL_DOWNLOAD_FLOAT_ENABLED=false and WEBGL_VERSION=2 not yet supported.");let u=null,p;if(o!=="complex64"&&C().get("WEBGL_BUFFER_SUPPORTED")){p=this.decode(e);const w=this.texData.get(p.dataId);u=this.gpgpu.createBufferFromTexture(w.texture,...lu(s))}this.pendingRead.set(e,[]),o!=="complex64"&&await this.gpgpu.createAndWaitForFence();let m;if(o==="complex64"){const w=await Promise.all([a.real.data(),a.imag.data()]),I=w[0],T=w[1];m=ir(I,T)}else if(u==null)m=this.getValuesFromTexture(e);else{const w=we(s);m=this.gpgpu.downloadFloat32MatrixFromBuffer(u,w)}p!=null&&this.disposeIntermediateTensorInfo(p);const y=this.convertAndCacheOnCPU(e,m),b=this.pendingRead.get(e);return this.pendingRead.delete(e),b.forEach(w=>w(y)),this.pendingDisposal.has(e)&&(this.pendingDisposal.delete(e),this.disposeData(e),this.pendingDeletes--),y}checkNumericalProblems(e){if(e==null)return;for(let t=0;tc.query)).filter(c=>c!=null),o=Ji(this.activeTimers.map(c=>c.name)).filter(c=>c!=null);this.activeTimers=t,s&&(this.programTimersStack=null);const a={uploadWaitMs:this.uploadWaitMs,downloadWaitMs:this.downloadWaitMs,kernelMs:null,wallMs:null};if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0){const c=await Promise.all(i);a.kernelMs=iT(c),a.getExtraProfileInfo=()=>c.map((u,p)=>({name:o[p],ms:u})).map(u=>`${u.name}: ${u.ms}`).join(", ")}else a.kernelMs={error:"WebGL query timers are not supported in this environment."};return this.uploadWaitMs=0,this.downloadWaitMs=0,a}memory(){return{unreliable:!1,numBytesInGPU:this.numBytesInGPU,numBytesInGPUAllocated:this.textureManager.numBytesAllocated,numBytesInGPUFree:this.textureManager.numBytesFree}}startTimer(){return C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?this.gpgpu.beginQuery():{startMs:qn(),endMs:null}}endTimer(e){return C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?(this.gpgpu.endQuery(),e):(e.endMs=qn(),e)}async getQueryTime(e){if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0)return this.gpgpu.waitForQueryAndGetTime(e);const t=e;return t.endMs-t.startMs}disposeData(e){if(this.pendingDisposal.has(e))return;if(this.pendingRead.has(e)){this.pendingDisposal.add(e),this.pendingDeletes++;return}if(!this.texData.has(e))return;this.releaseGPUData(e);const{complexTensors:t}=this.texData.get(e);t!=null&&(t.real.dispose(),t.imag.dispose()),this.texData.delete(e)}releaseGPUData(e){const{texture:t,dtype:n,texShape:s,usage:i,isPacked:o,slice:a}=this.texData.get(e),c=a&&a.origDataId||e,u=this.dataRefCount.get(c);u>1?this.dataRefCount.set(c,u-1):(this.dataRefCount.delete(c),t!=null&&(this.numBytesInGPU-=this.computeBytes(s,n),this.textureManager.releaseTexture(t,s,i,o)));const p=this.texData.get(e);p.texture=null,p.texShape=null,p.isPacked=!1,p.slice=null}getTexture(e){return this.uploadToGPU(e),this.texData.get(e).texture}getDataInfo(e){return this.texData.get(e)}getCPUBackend(){return C().getBool("WEBGL_CPU_FORWARD")?(this.cpuBackend==null&&(this.cpuBackend=$s().findBackend("cpu")),this.cpuBackend):null}shouldExecuteOnCPU(e,t=q6){const n=this.getCPUBackend();return!this.warnedAboutCPUBackend&&n==null&&(console.warn("Your application contains ops that are small enough to be executed on the CPU backend, however the CPU backend cannot be found. Consider importing the CPU backend (@tensorflow/tfjs-backend-cpu) for better performance."),this.warnedAboutCPUBackend=!0),n!=null&&e.every(s=>this.texData.get(s.dataId).texture==null&&we(s.shape)this.cpuBackend.stridedSlice(e,t,n,s));if(i)return i;const o=Wd(t,n,s);if(o.some(c=>c===0))return en([],o);const a=new s6(t,s,o);return this.compileAndRun(a,[e])}reverse(e,t){const n=C().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new J8(e.shape,t):new X8(e.shape,t);return this.compileAndRun(n,[e])}concat(e,t){if(e[0].dtype==="complex64"){const a=e.map(u=>Fo(u)),c=e.map(u=>Ga(u));return Ci(this.concat(a,t),this.concat(c,t))}if(e.length===1)return e[0];if(e.length>C().getNumber("WEBGL_MAX_TEXTURES_IN_SHADER")){const a=Math.floor(e.length/2),c=this.concat(e.slice(0,a),t),u=this.concat(e.slice(a),t);return this.concat([c,u],t)}if(C().getBool("WEBGL_PACK_ARRAY_OPERATIONS")&&e[0].rank>1){const a=new P5(e.map(c=>c.shape),t);return this.compileAndRun(a,e)}const n=Ur(e.map(a=>a.shape),t),s=e.map(a=>a.as2D(-1,we(a.shape.slice(t)))),i=new M5(s.map(a=>a.shape)),o=this.compileAndRun(i,s);return o.reshape(n)}neg(e){const t=this.tryRunOnCpuOrThrow([e],()=>this.cpuBackend.neg(e));if(t)return t;if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,_C,e.dtype);const n=new it(e.shape,_C);return this.compileAndRun(n,[e])}batchMatMul(e,t,n,s){const i=n?e.shape[2]:e.shape[1],o=s?t.shape[1]:t.shape[2],a=n?e.shape[1]:e.shape[2],[c,,]=e.shape;if((i===1||o===1)&&a>VC){n&&(e=Pe(e,[0,2,1])),s&&(t=Pe(t,[0,2,1]));const m=o===1?e:e.as3D(c,a,1),y=o===1?2:1,b=o===1?t.as3D(c,1,a):t;return this.multiply(m,b).sum(y,!0)}const u=Cn(e.dtype,t.dtype),p=new LS(e.shape,[c,i,o],n,s);return this.compileAndRun(p,[e,t],u)}fusedBatchMatMul({a:e,b:t,transposeA:n,transposeB:s,bias:i,activation:o,preluActivationWeights:a}){const c=n?e.shape[2]:e.shape[1],u=s?t.shape[1]:t.shape[2],[p,,]=e.shape,m=Cn(e.dtype,t.dtype),y=i!=null,b=a!=null,w=o?Tm(o,!0):null,I=new LS(e.shape,[p,c,u],n,s,y,w,b),T=[e,t];return i&&T.push(i),a&&T.push(a),this.compileAndRun(I,T,m)}multiply(e,t){if(e.dtype==="complex64"){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),a=new oC(rC.REAL,e.shape,t.shape),c=new oC(rC.IMAG,e.shape,t.shape),u=[this.makeComplexComponentTensorInfo(e,i.complexTensors.real),this.makeComplexComponentTensorInfo(e,i.complexTensors.imag),this.makeComplexComponentTensorInfo(t,o.complexTensors.real),this.makeComplexComponentTensorInfo(t,o.complexTensors.imag)],p=this.compileAndRun(a,u),m=this.compileAndRun(c,u),y=this.complex(p,m);return p.dispose(),m.dispose(),y}const n=Cn(e.dtype,t.dtype);if(this.shouldExecuteOnCPU([e,t])){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),[a,c]=fK(e.shape,t.shape,i.values,o.values,n);return this.makeOutput(c,n,a)}if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,cC,e.dtype);const s=new un(cC,e.shape,t.shape);return this.compileAndRun(s,[e,t],e.dtype)}localResponseNormalization4D(e,t,n,s,i){const o=C().getBool("WEBGL_PACK_NORMALIZATION")?new D8(e.shape,t,n,s,i):new O8(e.shape,t,n,s,i);return this.compileAndRun(o,[e])}LRNGrad(e,t,n,s,i,o,a){const c=new E8(t.shape,s,i,o,a);return this.compileAndRun(c,[t,n,e])}tile(e,t){if(e.dtype==="string"){const s=this.readSync(e.dataId),i=s.map(a=>Yl(a)),o=Qe(e.shape,e.dtype,i);return P6(o,t)}const n=new c6(e.shape,t);return this.compileAndRun(n,[e])}pad(e,t,n){const s=C().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new G8(e.shape,t,n):new z8(e.shape,t,n);return this.compileAndRun(s,[e])}gather(e,t,n){const s=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.gather(e,t,n));if(s)return s;const i=new r8(e.shape,t.size,n);return this.compileAndRun(i,[e,t])}batchToSpaceND(e,t,n){k(e.rank<=4,()=>"batchToSpaceND for rank > 4 with a WebGL backend not implemented yet");const s=t.reduce((p,m)=>p*m),i=Rh(e.shape,t,s),o=Oh(i.length,t.length),a=Eh(e.shape,t,s),c=jb(n,t.length),u=Kb(a,n,t.length);return Pe(e.reshape(i),o).reshape(a).slice(c,u)}spaceToBatchND(e,t,n){k(e.rank<=4,()=>"spaceToBatchND for rank > 4 with a WebGL backend not implemented yet");const s=t.reduce((m,y)=>m*y),i=[[0,0]];i.push(...n);for(let m=1+t.length;mthis.cpuBackend.prod(e,t));if(n)return n;const[s,i]=On(e.shape,t),o=we(i),a=e.as2D(-1,o),c=vd(e.dtype);return this.reduce(a,"prod",c).reshape(s)}unsortedSegmentSum(e,t,n){let s=0;const i=_n([s],e.rank);let o=e;i!=null&&(o=Pe(e,i),s=Is(1,e.rank)[0]);const a=GC.computeOutShape(o.shape,s,n),c=we([o.shape[s]]),u=o.as2D(-1,c),p=vd(e.dtype);let m=this.segOpCompute(u,"unsortedSegmentSum",t,p,n).reshape(a);return i!=null&&(m=Pe(m,eh(i))),m}segOpCompute(e,t,n,s,i){const o=e.shape[0],a=e.shape[1],c=GC.segOpComputeOptimalWindowSize(a,i),u={windowSize:c,inSize:a,batchSize:o,numSegments:i},p=new Z8(u,t),m=this.compileAndRun(p,[e,n],s);return m.shape[1]===i?m:(n=yh(0,i).tile([a/c]),this.segOpCompute(m,t,n,s,i))}argMinMaxReduce(e,t,n){const s=[t];if(ss("arg"+n.charAt(0).toUpperCase()+n.slice(1),s,e.rank),!C().getBool("WEBGL_PACK_REDUCE")||e.rank<=2){const[i,o]=On(e.shape,s),a=we(o),c=e.as2D(-1,a);return this.argReduce(c,n).reshape(i)}return this.argReducePacked(e,n)}argMin(e,t){return this.argMinMaxReduce(e,t,"min")}argMax(e,t){return this.argMinMaxReduce(e,t,"max")}cumsum(e,t,n,s){if(t!==e.rank-1)throw new Error(`WebGL cumsum shader expects an inner-most axis=${e.rank-1} but got axis=${t}`);const i=e.shape[t];let o=e;for(let a=0;a<=Math.ceil(Math.log2(i))-1;a++){const c=new mC(e.shape,!1,s),u=c.getCustomSetupFunc(a),p=o;o=this.compileAndRun(c,[o],o.dtype,u),p.dispose()}if(n){const a=new mC(e.shape,n,s),c=o;o=this.compileAndRun(a,[o]),c.dispose()}return o}equal(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,v5,"bool");const n=new un(u5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}notEqual(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,N5,"bool");const n=new un(d5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}less(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.less(e,t));if(n)return n;if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,C5,"bool");const s=new un(p5,e.shape,t.shape);return this.compileAndRun(s,[e,t],"bool")}lessEqual(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,R5,"bool");const n=new un(m5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}greater(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.greater(e,t));if(n)return n;if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,O5,"bool");const s=new un(f5,e.shape,t.shape);return this.compileAndRun(s,[e,t],"bool")}greaterEqual(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,E5,"bool");const n=new un(g5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}logicalNot(e){const t=new it(e.shape,_6);return this.compileAndRun(t,[e])}logicalAnd(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,D5,"bool");const n=new un(y5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}logicalOr(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,k5,"bool");const n=new un(b5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}select(e,t,n){const s=new Q8(e.rank,t.shape,t.rank);return this.compileAndRun(s,[e,t,n],Cn(t.dtype,n.dtype))}where(e){Qa("tf.where() in webgl locks the UI thread. Call tf.whereAsync() instead");const t=e.dataSync();return G6(e.shape,t)}topk(e,t,n){const s=e.dataSync();return z6(s,e.shape,e.dtype,t,n)}min(e,t){ss("min",t,e.rank);const[n,s]=On(e.shape,t),i=we(s),o=e.as2D(-1,i);return this.reduce(o,"min",o.dtype).reshape(n)}minimum(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.minimum(e,t));if(n)return n;const s=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(_5,e.shape,t.shape):new un(L5,e.shape,t.shape);return this.compileAndRun(s,[e,t])}mod(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(W5,e.shape,t.shape):new un(S5,e.shape,t.shape);return this.compileAndRun(n,[e,t])}maximum(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.maximum(e,t));if(n)return n;const s=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(F5,e.shape,t.shape):new un(w5,e.shape,t.shape);return this.compileAndRun(s,[e,t])}all(e,t){ss("all",t,e.rank);const[n,s]=On(e.shape,t),i=we(s),o=e.as2D(-1,i);return this.reduce(o,"all",o.dtype).reshape(n)}any(e,t){ss("any",t,e.rank);const[n,s]=On(e.shape,t),i=we(s),o=e.as2D(-1,i);return this.reduce(o,"any",o.dtype).reshape(n)}floorDiv(e,t){const n=l5,s="int32";if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,x5,s);const i=new un(n,e.shape,t.shape);return this.compileAndRun(i,[e,t],s)}add(e,t){if(e.dtype==="complex64"&&t.dtype==="complex64")return this.complexSeparableBinaryOp(e,t,bS);const n=Cn(e.dtype,t.dtype);if(this.shouldExecuteOnCPU([e,t])){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),[a,c]=cK(e.shape,t.shape,i.values,o.values,n);return this.makeOutput(c,n,a)}if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,bS,n);const s=new un(bS,e.shape,t.shape);return this.compileAndRun(s,[e,t],n)}packedUnaryOp(e,t,n){const s=new pu(e.shape,t);return this.compileAndRun(s,[e],n)}packedBinaryOp(e,t,n,s,i=!1){const o=new to(n,e.shape,t.shape,i);return this.compileAndRun(o,[e,t],s)}complexSeparableBinaryOp(e,t,n){const s=this.texData.get(e.dataId),i=this.texData.get(t.dataId),[o,a]=[[s.complexTensors.real,i.complexTensors.real],[s.complexTensors.imag,i.complexTensors.imag]].map(u=>{const[p,m]=u,y=this.makeComplexComponentTensorInfo(e,p),b=this.makeComplexComponentTensorInfo(t,m),w=new un(n,e.shape,t.shape);return this.compileAndRun(w,[y,b],Cn(p.dtype,m.dtype))}),c=this.complex(o,a);return o.dispose(),a.dispose(),c}makeComplexComponentTensorInfo(e,t){return{dataId:t.dataId,dtype:t.dtype,shape:e.shape}}addN(e){if(e.length===1)return e[0];if(e.length>C().get("WEBGL_MAX_TEXTURES_IN_SHADER")){const o=Math.floor(e.length/2),a=this.addN(e.slice(0,o)),c=this.addN(e.slice(o));return this.addN([a,c])}const t=e.map(o=>o.dtype).reduce((o,a)=>Cn(o,a)),n=e.map(o=>o.shape),s=C().getBool("WEBGL_PACK"),i=s?new SK(e[0].shape,n):new LK(e[0].shape,n);return this.compileAndRun(i,e,t)}subtract(e,t){if(e.dtype==="complex64"&&t.dtype==="complex64")return this.complexSeparableBinaryOp(e,t,wS);const n=Cn(e.dtype,t.dtype);if(this.shouldExecuteOnCPU([e,t])){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),[a,c]=bK(e.shape,t.shape,i.values,o.values,n);return this.makeOutput(c,n,a)}if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,wS,e.dtype);const s=new un(wS,e.shape,t.shape);return this.compileAndRun(s,[e,t],n)}pow(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS"),s=n?new to(T5,e.shape,t.shape):new un(h5,e.shape,t.shape),i=Cn(e.dtype,t.dtype);return this.compileAndRun(s,[e,t],i)}ceil(e){if(this.shouldExecuteOnCPU([e])){const n=lK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,WC,e.dtype);const t=new it(e.shape,WC);return this.compileAndRun(t,[e])}floor(e){if(this.shouldExecuteOnCPU([e])){const n=dK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,$C,e.dtype);const t=new it(e.shape,$C);return this.compileAndRun(t,[e])}sign(e){const t=new it(e.shape,p6);return this.compileAndRun(t,[e])}isNaN(e){const t=new it(e.shape,m6);return this.compileAndRun(t,[e],"bool")}isInf(e){const t=new it(e.shape,f6);return this.compileAndRun(t,[e],"bool")}isFinite(e){const t=new it(e.shape,g6);return this.compileAndRun(t,[e],"bool")}round(e){const t=new it(e.shape,y6);return this.compileAndRun(t,[e])}exp(e){if(this.shouldExecuteOnCPU([e])){const n=hK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,UC,e.dtype);const t=new it(e.shape,UC);return this.compileAndRun(t,[e])}expm1(e){if(this.shouldExecuteOnCPU([e])){const n=uK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,BC,e.dtype);const t=new it(e.shape,BC);return this.compileAndRun(t,[e])}softmax(e,t){const n=ft([t],e.shape),s=Xn(e,n),i=En(s.shape,n),o=this.subtract(e,s.reshape(i)),a=this.exp(o),c=this.sum(a,n).reshape(i);return _e(a,c)}log(e){if(this.shouldExecuteOnCPU([e])){const n=pK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,U6,e.dtype);const t=new it(e.shape,b6);return this.compileAndRun(t,[e])}log1p(e){const t=new it(e.shape,w6);return this.compileAndRun(t,[e])}sqrt(e){const t=new it(e.shape,L6);return this.compileAndRun(t,[e])}rsqrt(e){if(this.shouldExecuteOnCPU([e])){const n=gK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}const t=new it(e.shape,S6);return this.compileAndRun(t,[e])}reciprocal(e){const t=new it(e.shape,F6);return this.compileAndRun(t,[e])}relu(e){let t;return C().getBool("WEBGL_PACK")?t=new pu(e.shape,MC):t=new it(e.shape,DC),this.compileAndRun(t,[e])}relu6(e){let t;return C().getBool("WEBGL_PACK")?t=new pu(e.shape,PC):t=new it(e.shape,kC),this.compileAndRun(t,[e])}prelu(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(hC,e.shape,t.shape):new un(lC,e.shape,t.shape);return this.compileAndRun(n,[e,t])}elu(e){if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,zC,e.dtype);const t=new it(e.shape,FC);return this.compileAndRun(t,[e])}eluDer(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(A5,e.shape,t.shape):new un(I5,e.shape,t.shape);return this.compileAndRun(n,[e,t])}selu(e){const t=new it(e.shape,u6);return this.compileAndRun(t,[e])}int(e){const t=new it(e.shape,W6);return this.compileAndRun(t,[e],"int32")}clip(e,t,n){let s;C().getBool("WEBGL_PACK_CLIP")?s=new U5(e.shape):s=new $5(e.shape);const i=s.getCustomSetupFunc(t,n);return this.compileAndRun(s,[e],null,i)}abs(e){if(this.shouldExecuteOnCPU([e])&&e.dtype!=="complex64"){const n=aK(this.texData.get(e.dataId).values);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,EC,e.dtype);const t=new it(e.shape,EC);return this.compileAndRun(t,[e])}complexAbs(e){const t=this.texData.get(e.dataId),n=new B5(e.shape),s=[this.makeComplexComponentTensorInfo(e,t.complexTensors.real),this.makeComplexComponentTensorInfo(e,t.complexTensors.imag)];return this.compileAndRun(n,s)}sigmoid(e){const t=new it(e.shape,I6);return this.compileAndRun(t,[e])}softplus(e){const t=new it(e.shape,x6);return this.compileAndRun(t,[e])}asin(e){const t=new it(e.shape,T6);return this.compileAndRun(t,[e])}acos(e){const t=new it(e.shape,A6);return this.compileAndRun(t,[e])}atan(e){const t=new it(e.shape,v6);return this.compileAndRun(t,[e])}sinh(e){const t=new it(e.shape,N6);return this.compileAndRun(t,[e])}cosh(e){const t=new it(e.shape,C6);return this.compileAndRun(t,[e])}tanh(e){const t=new it(e.shape,R6);return this.compileAndRun(t,[e])}asinh(e){const t=new it(e.shape,O6);return this.compileAndRun(t,[e])}acosh(e){const t=new it(e.shape,E6);return this.compileAndRun(t,[e])}atanh(e){const t=new it(e.shape,D6);return this.compileAndRun(t,[e])}erf(e){const t=new it(e.shape,k6);return this.compileAndRun(t,[e])}step(e,t){const n=new it(e.shape,d6(t));return this.compileAndRun(n,[e])}conv2dByMatMul(e,t,n,s,i,o){const a=e.shape,c=this.texData.get(e.dataId),u=n.inChannels,p=a[0]*a[1]*a[2],m=n.outChannels,y=n.dataFormat==="channelsLast",b=!1,w=!1,I=(p===1||m===1)&&u>VC,T=a[2]%2!==0&&!!c.isPacked;if(I||!C().getBool("WEBGL_LAZILY_UNPACK")||!C().getBool("WEBGL_PACK_BINARY_OPERATIONS")||!T){const B=y?a[0]*a[1]*a[2]:a[0]*a[2]*a[3],U=K(e,[1,B,n.inChannels]),Y=K(t,[1,n.inChannels,n.outChannels]),q=this.fusedBatchMatMul({a:U,b:Y,transposeA:b,transposeB:w,bias:s,activation:i,preluActivationWeights:o});return K(q,n.outShape)}const v=y?a[0]*a[1]*(a[2]+1):a[0]*a[2]*(a[3]+1),N={dataId:e.dataId,shape:[1,v,n.inChannels],dtype:e.dtype},E=c.shape;c.shape=c.shape.slice(),c.shape[c.shape.length-2]++,k(gm(c.shape,N.shape),()=>`packed reshape ${c.shape} to ${N.shape} isn't free`);const D=K(t,[1,n.inChannels,n.outChannels]),F=this.fusedBatchMatMul({a:N,b:D,transposeA:b,transposeB:w,bias:s,activation:i,preluActivationWeights:o}),_=this.texData.get(F.dataId);return k(_.isPacked,()=>"batchMatMul result is expected to be packed"),c.shape=E,_.shape=n.outShape,$s().makeTensorFromDataId(F.dataId,n.outShape,F.dtype)}conv2dWithIm2Row(e,t,n,s,i,o){const{filterWidth:a,filterHeight:c,inChannels:u,outWidth:p,outHeight:m,dataFormat:y}=n,b=y==="channelsLast",w=a*c*u,I=m*p,T=[w,I],v=!0,N=!1,E=e.squeeze([0]),D=t.reshape([1,w,-1]),F=new R8(T,E.shape,n),_=this.compileAndRun(F,[E]).reshape([1,T[0],T[1]]),B=s!=null,U=o!=null,Y=i?Tm(i,!0):null,q=new LS(_.shape,[1,I,n.outChannels],v,N,B,Y,U),J=[_,D];s&&J.push(s),U&&J.push(o);const oe=this.compileAndRun(q,J);return b?oe.reshape([1,m,p,n.outChannels]):oe.reshape([1,n.outChannels,m,p])}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){if(n.filterHeight===1&&n.filterWidth===1&&n.dilationHeight===1&&n.dilationWidth===1&&n.strideHeight===1&&n.strideWidth===1&&(n.padInfo.type==="SAME"||n.padInfo.type==="VALID"))return this.conv2dByMatMul(e,t,n,s,i,o);if(C().getBool("WEBGL_CONV_IM2COL")&&e.shape[0]===1)return this.conv2dWithIm2Row(e,t,n,s,i,o);const a=s!=null,c=o!=null,u=i?Tm(i,!1):null,p=new uC(n,a,u,c),m=[e,t];return s&&m.push(s),o&&m.push(o),this.compileAndRun(p,m)}conv2d(e,t,n){if(n.filterHeight===1&&n.filterWidth===1&&n.dilationHeight===1&&n.dilationWidth===1&&n.strideHeight===1&&n.strideWidth===1&&(n.padInfo.type==="SAME"||n.padInfo.type==="VALID"))return this.conv2dByMatMul(e,t,n);if(C().getBool("WEBGL_CONV_IM2COL")&&e.shape[0]===1)return this.conv2dWithIm2Row(e,t,n);const s=new uC(n);return this.compileAndRun(s,[e,t])}conv2dDerInput(e,t,n){const s=new G5(n);return this.compileAndRun(s,[e,t])}conv2dDerFilter(e,t,n){const s=new z5(n);return this.compileAndRun(s,[e,t])}fusedDepthwiseConv2D({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){const a=C().getBool("WEBGL_PACK_DEPTHWISECONV")&&n.strideWidth<=2&&n.outChannels/n.inChannels===1,c=i?Tm(i,a):null,u=[e,t],p=s!=null,m=o!=null;p&&u.push(s),m&&u.push(o);let y;return a?(y=new pC(n,p,c,m),this.compileAndRun(y,u)):(y=new dC(n,p,c,m),this.compileAndRun(y,u))}depthwiseConv2D(e,t,n){let s;return C().getBool("WEBGL_PACK_DEPTHWISECONV")&&n.strideWidth<=2&&n.outChannels/n.inChannels===1?(s=new pC(n),this.compileAndRun(s,[e,t])):(s=new dC(n),this.compileAndRun(s,[e,t]))}depthwiseConv2DDerInput(e,t,n){const s=new q5(n);return this.compileAndRun(s,[e,t])}depthwiseConv2DDerFilter(e,t,n){const s=new Y5(n);return this.compileAndRun(s,[e,t])}conv3d(e,t,n){const s=new j5(n);return this.compileAndRun(s,[e,t])}conv3dDerInput(e,t,n){const s=new H5(n);return this.compileAndRun(s,[e,t])}conv3dDerFilter(e,t,n){const s=new V5(n);return this.compileAndRun(s,[e,t])}cast(e,t){return ov(e,t,this)}unstack(e,t){const n=e.shape[t],s=new Array(e.rank-1);let i=0;for(let u=0;u1,()=>`blockSize should be > 1 for depthToSpace, but was: ${t}`);const s=e.shape[0],i=n==="NHWC"?e.shape[1]:e.shape[2],o=n==="NHWC"?e.shape[2]:e.shape[3],a=n==="NHWC"?e.shape[3]:e.shape[1],c=i*t,u=o*t,p=a/(t*t),m=n==="NHWC"?[s,c,u,p]:[s,p,c,u],y=new Z5(m,t,n);return this.compileAndRun(y,[e])}split(e,t,n){return M6(e,t,n)}scatterND(e,t,n){const{sliceRank:s,numUpdates:i,sliceSize:o,strides:a,outputSize:c}=Ua(t,e,n),u=[c/o,o],p=e.reshape([i,s]),m=t.reshape([i,o]);if(c===0)return av(en([]),n);const y=Ne(0),b=new NC(i,s,p.rank,m.rank,a,u),w=this.compileAndRun(b,[m,p,y]);return w.reshape(n)}sparseToDense(e,t,n,s){const{sliceRank:i,numUpdates:o,strides:a,outputSize:c}=Ua(t,e,n),u=!1,p=new NC(o,i,e.rank,t.rank,a,[c,1],u),m=this.compileAndRun(p,[t,e,s]);return m.reshape(n)}fft(e){const t=!1;return this.fftImpl(e,t)}ifft(e){const t=!0;return this.fftImpl(e,t)}fftImpl(e,t){const n=this.texData.get(e.dataId),s=new bC(yC.REAL,e.shape,t),i=new bC(yC.IMAG,e.shape,t),o=[this.makeComplexComponentTensorInfo(e,n.complexTensors.real),this.makeComplexComponentTensorInfo(e,n.complexTensors.imag)],a=this.compileAndRun(s,o),c=this.compileAndRun(i,o),u=this.complex(a,c).as2D(e.shape[0],e.shape[1]);return a.dispose(),c.dispose(),u}gatherND(e,t){const n=t.shape,s=n[n.length-1],[i,o,a,c]=Fd(e,t),u=t.reshape([o,s]),p=e.reshape([e.size/a,a]),m=new a8(s,c,[o,a]),y=this.compileAndRun(m,[p,u]);return y.reshape(i)}fill(e,t,n){if(n=n||Oa(t),n==="string"){const s=So(n,we(e));return s.fill(t),$s().makeTensor(s,e,n,this)}else{const s=new i8(e,t),i=s.getCustomSetupFunc(t);return this.compileAndRun(s,[],n,i)}}onesLike(e){if(e.dtype==="string")throw new Error("onesLike is not supported under string dtype");return this.fill(e.shape,1,e.dtype)}zerosLike(e){return this.fill(e.shape,e.dtype==="string"?"":0,e.dtype)}linspace(e,t,n){return sw(e,t,n)}makeTensorInfo(e,t,n){const s=this.write(n,e,t);return this.texData.get(s).usage=null,{dataId:s,shape:e,dtype:t}}makeOutput(e,t,n){const{dataId:s}=this.makeTensorInfo(e,t,n);return $s().makeTensorFromDataId(s,e,t,this)}unpackTensor(e){const t=new B6(e.shape);return this.runWebGLProgram(t,[e],e.dtype)}packTensor(e){const t=new $8(e.shape),n=!0;return this.runWebGLProgram(t,[e],e.dtype,null,n)}packedReshape(e,t){const n=[gc(e.shape),...yc(e.shape)],s={dtype:e.dtype,shape:n,dataId:e.dataId},i=[gc(t),...yc(t)],o=new vC(i,n),a=!0,c=this.runWebGLProgram(o,[s],e.dtype,null,a);return{dataId:c.dataId,shape:t,dtype:c.dtype}}decode(e){const t=this.texData.get(e),{isPacked:n,shape:s,dtype:i}=t,o=fS(s);let a;n?a=new J5(o):a=new X5(o);const c=!0,u=this.runWebGLProgram(a,[{shape:o,dtype:i,dataId:e}],i,null,c);return{dtype:i,shape:s,dataId:u.dataId}}runWebGLProgram(e,t,n,s,i=!1){const o=this.makeTensorInfo(e.outputShape,n),a=this.texData.get(o.dataId);if(e.packedOutput&&(a.isPacked=!0),e.outPackingScheme===au.DENSE){const I=lu(e.outputShape);a.texShape=I.map(T=>T*2)}if(e.outTexUsage!=null&&(a.usage=e.outTexUsage),we(o.shape)===0)return a.values=wn(o.dtype,0),o;const c=[],u=t.map(I=>{if(I.dtype==="complex64")throw new Error("GPGPUProgram does not support complex64 input. For complex64 dtypes, please separate the program into real and imaginary parts.");let T=this.texData.get(I.dataId);if(T.texture==null){if(!e.packedInputs&&we(I.shape)<=C().getNumber("WEBGL_SIZE_UPLOAD_UNIFORM"))return{shape:I.shape,texData:null,isUniform:!0,uniformValues:T.values};e.packedInputs&&(T.isPacked=!0,T.shape=I.shape)}else if(!!T.isPacked!==!!e.packedInputs)I=T.isPacked?this.unpackTensor(I):this.packTensor(I),c.push(I),T=this.texData.get(I.dataId);else if(T.isPacked&&!gm(T.shape,I.shape)){const v=I,N=I.shape;I.shape=T.shape,I=this.packedReshape(I,N),c.push(I),T=this.texData.get(I.dataId),v.shape=N}return this.uploadToGPU(I.dataId),{shape:I.shape,texData:T,isUniform:!1}});this.uploadToGPU(o.dataId);const p={shape:o.shape,texData:a,isUniform:!1},m=C8(e,u,p),y=this.getAndSaveBinary(m,()=>v8(this.gpgpu,e,u,p)),b=this.activeTimers!=null;let w;if(b&&(w=this.startTimer()),N8(this.gpgpu,y,u,p,s),c.forEach(I=>this.disposeIntermediateTensorInfo(I)),b&&(w=this.endTimer(w),this.activeTimers.push({name:e.constructor.name,query:this.getQueryTime(w)})),!C().getBool("WEBGL_LAZILY_UNPACK")&&a.isPacked&&i===!1){const I=this.unpackTensor(o);return this.disposeIntermediateTensorInfo(o),I}return o}compileAndRun(e,t,n,s,i=!1){n=n||t[0].dtype;const o=this.runWebGLProgram(e,t,n,s,i);return $s().makeTensorFromDataId(o.dataId,o.shape,o.dtype)}getAndSaveBinary(e,t){return e in this.binaryCache||(this.binaryCache[e]=t()),this.binaryCache[e]}getTextureManager(){return this.textureManager}dispose(){if(this.disposed)return;if(!C().getBool("IS_TEST")){const e=Object.keys(this.binaryCache);e.forEach(t=>{this.gpgpu.deleteProgram(this.binaryCache[t].webGLProgram),delete this.binaryCache[t]})}this.textureManager.dispose(),this.canvas!=null&&typeof HTMLCanvasElement!="undefined"&&this.canvas instanceof HTMLCanvasElement?this.canvas.remove():this.canvas=null,this.gpgpuCreatedLocally&&(this.gpgpu.program=null,this.gpgpu.dispose()),this.disposed=!0}floatPrecision(){return this.floatPrecisionValue==null&&(this.floatPrecisionValue=ee(()=>{if(!C().get("WEBGL_RENDER_FLOAT32_ENABLED")){const e=C().getBool("DEBUG");C().set("DEBUG",!1);const t=this.abs(Ne(1e-8)).dataSync()[0];if(C().set("DEBUG",e),t>0)return 32}return 16})),this.floatPrecisionValue}epsilon(){return this.floatPrecision()===32?V6:H6}uploadToGPU(e){const t=this.texData.get(e),{shape:n,dtype:s,values:i,texture:o,usage:a,isPacked:c}=t;if(o!=null)return;const u=this.activeTimers!=null;let p;u&&(p=qn());let m=t.texShape;if(m==null&&(m=Qj(n,c),t.texShape=m),i!=null){const y=fS(n);let b,w=m[1],I=m[0];const T=i instanceof Uint8Array;c?([w,I]=fc(m[0],m[1]),b=new s8(y,[I,w],T)):b=new n8(y,[I,w],T);const v=this.makeTensorInfo([I,w],s);T?this.texData.get(v.dataId).usage=Cs.PIXELS:this.texData.get(v.dataId).usage=Cs.UPLOAD,this.gpgpu.uploadDenseMatrixToTexture(this.getTexture(v.dataId),w,I,i);const N=!0,E=this.runWebGLProgram(b,[v],s,null,N),D=this.texData.get(E.dataId);t.texture=D.texture,t.texShape=D.texShape,t.isPacked=D.isPacked,t.usage=D.usage,this.disposeIntermediateTensorInfo(v),this.texData.delete(E.dataId),t.values=null,u&&(this.uploadWaitMs+=qn()-p)}else{const y=this.acquireTexture(m,a,s,c);t.texture=y}}convertAndCacheOnCPU(e,t){const n=this.texData.get(e),{dtype:s}=n;return this.releaseGPUData(e),t!=null&&(n.values=J6(t,s)),n.values}acquireTexture(e,t,n,s){if(this.numBytesInGPU+=this.computeBytes(e,n),!this.warnedAboutMemory&&this.numBytesInGPU>this.numMBBeforeWarning*1024*1024){const i=(this.numBytesInGPU/1024/1024).toFixed(2);this.warnedAboutMemory=!0,console.warn(`High memory usage in GPU: ${i} MB, most likely due to a memory leak`)}return this.textureManager.acquireTexture(e,t,s)}computeBytes(e,t){return e[0]*e[1]*Ty(t)}tryRunOnCpuOrThrow(e,t){if(this.shouldExecuteOnCPU(e))try{return t()}catch(n){if(C().getBool("IS_TEST"))throw new Error("CPU forwarding failed")}return null}}function J6(e,t){if(t==="float32"||t==="complex64")return e;if(t==="int32"||t==="bool"){const n=t==="int32"?new Int32Array(e.length):new Uint8Array(e.length);for(let s=0;snew X6,2);const dte={forceHalfFloat:Q6},HC="if (isnan(x)) return x;",e7=` if (isnan(a)) return a; if (isnan(b)) return b; `,t7=` @@ -3649,14 +3649,14 @@ return (round(mod(b, 2.0)) != 1) ? result.g = isNaN.g > 0. ? NAN : result.g; result.b = isNaN.b > 0. ? NAN : result.b; result.a = isNaN.a > 0. ? NAN : result.a; -`;function Am(e){return({inputs:t,backend:n})=>{const{x:s}=t,i=n,o=new it(s.shape,e);return i.runWebGLProgram(o,[s],s.dtype)}}function xS(e,t,n,s){return({inputs:i,backend:o})=>{const{a,b:c}=i,u=o,p=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(t,a.shape,c.shape,!!n):new hn(e,a.shape,c.shape),m=s||a.dtype,y=u.runWebGLProgram(p,[a,c],m);return y}}const n7=e7+` +`;function Am(e){return({inputs:t,backend:n})=>{const{x:s}=t,i=n,o=new it(s.shape,e);return i.runWebGLProgram(o,[s],s.dtype)}}function xS(e,t,n,s){return({inputs:i,backend:o})=>{const{a,b:c}=i,u=o,p=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new to(t,a.shape,c.shape,!!n):new un(e,a.shape,c.shape),m=s||a.dtype,y=u.runWebGLProgram(p,[a,c],m);return y}}const n7=e7+` return atan(a, b); `,s7=` vec4 result = atan(a, b); vec4 isNaN = min(vec4(isnan(a)) + vec4(isnan(b)), vec4(1.0)); `+t7+` return result; -`,i7=xS(n7,s7),r7={kernelName:Ai,backendName:"webgl",kernelFunc:i7};function TS(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const o7={kernelName:Sl,backendName:"webgl",kernelFunc:TS};function a7(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t;hu(i,"avgPool");const{filterSize:o,strides:a,pad:c,dimRoundingMode:u}=s,p=1;k(rn(a,p),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${p}'`);const m=Wn(i.shape,o,a,p,c,u);if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))return TS({inputs:{x:i},backend:n});const y=new du(m,"avg",!1);return n.runWebGLProgram(y,[i],"float32")}const c7={kernelName:ei,backendName:"webgl",kernelFunc:a7};function l7(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o}=t,a=o;hu([i,o],"avgPoolBackprop");const{filterSize:c,strides:u,pad:p}=s,m=Wn(a.shape,c,u,1,p),y=new a5(m);return n.runWebGLProgram(y,[i],a.dtype)}const h7={kernelName:Sa,backendName:"webgl",kernelFunc:l7};class u7{constructor(e,t,n,s,i,o){this.outputShape=[],this.variableNames=["x","mean","variance"],nt(e,t),nt(e,n);let a="0.0";s!=null&&(nt(e,s),this.variableNames.push("offset"),a="getOffsetAtOutCoords()");let c="1.0";i!=null&&(nt(e,i),this.variableNames.push("scale"),c="getScaleAtOutCoords()"),this.outputShape=e,this.userCode=` +`,i7=xS(n7,s7),r7={kernelName:Ai,backendName:"webgl",kernelFunc:i7};function TS(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const o7={kernelName:Sl,backendName:"webgl",kernelFunc:TS};function a7(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t;hu(i,"avgPool");const{filterSize:o,strides:a,pad:c,dimRoundingMode:u}=s,p=1;k(on(a,p),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${p}'`);const m=Wn(i.shape,o,a,p,c,u);if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))return TS({inputs:{x:i},backend:n});const y=new du(m,"avg",!1);return n.runWebGLProgram(y,[i],"float32")}const c7={kernelName:ei,backendName:"webgl",kernelFunc:a7};function l7(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o}=t,a=o;hu([i,o],"avgPoolBackprop");const{filterSize:c,strides:u,pad:p}=s,m=Wn(a.shape,c,u,1,p),y=new a5(m);return n.runWebGLProgram(y,[i],a.dtype)}const h7={kernelName:xa,backendName:"webgl",kernelFunc:l7};class u7{constructor(e,t,n,s,i,o){this.outputShape=[],this.variableNames=["x","mean","variance"],nt(e,t),nt(e,n);let a="0.0";s!=null&&(nt(e,s),this.variableNames.push("offset"),a="getOffsetAtOutCoords()");let c="1.0";i!=null&&(nt(e,i),this.variableNames.push("scale"),c="getScaleAtOutCoords()"),this.outputShape=e,this.userCode=` void main() { float x = getXAtOutCoords(); float mean = getMeanAtOutCoords(); @@ -3681,7 +3681,7 @@ return (round(mod(b, 2.0)) != 1) ? } `}}const p7=({inputs:e,backend:t,attrs:n})=>{const{x:s,mean:i,variance:o,offset:a,scale:c}=e;k(i.shape.length===o.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),k(a==null||i.shape.length===a.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),k(c==null||i.shape.length===c.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");let{varianceEpsilon:u}=n;u==null&&(u=.001);const p=[s,i,o];let m=null;a!=null&&(m=a.shape,p.push(a));let y=null;c!=null&&(y=c.shape,p.push(c));const b=C().getBool("WEBGL_PACK_NORMALIZATION")?new d7(s.shape,i.shape,o.shape,m,y,u):new u7(s.shape,i.shape,o.shape,m,y,u),w=t.runWebGLProgram(b,p,p[0].dtype);return w},m7={kernelName:Ll,backendName:"webgl",kernelFunc:p7};const f7=HC+` return cos(x); -`,g7=Am(f7),y7={kernelName:Ia,backendName:"webgl",kernelFunc:g7};const b7=` +`,g7=Am(f7),y7={kernelName:Ta,backendName:"webgl",kernelFunc:g7};const b7=` if (a == b) { return 1.0; }; @@ -3703,7 +3703,7 @@ return a / b;`,w7=` } return result; -`,L7=xS(b7,w7,!0),S7={kernelName:xa,backendName:"webgl",kernelFunc:L7};class I7{constructor(e){this.variableNames=["Image"],this.outputShape=[];const t=e[2];this.outputShape=e,this.userCode=` +`,L7=xS(b7,w7,!0),S7={kernelName:Aa,backendName:"webgl",kernelFunc:L7};class I7{constructor(e){this.variableNames=["Image"],this.outputShape=[];const t=e[2];this.outputShape=e,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int x = coords[2]; @@ -3773,7 +3773,7 @@ return a / b;`,w7=` ${t.output} = result; } - `}}const v7={kernelName:yd,backendName:"webgl",kernelFunc:N7};let Lc;function N7(e){const{inputs:t,backend:n,attrs:s}=e;let{pixels:i}=t;const{numChannels:o}=s,a=typeof HTMLVideoElement!="undefined"&&i instanceof HTMLVideoElement,c=typeof HTMLImageElement!="undefined"&&i instanceof HTMLImageElement,[u,p]=a?[i.videoWidth,i.videoHeight]:[i.width,i.height],m=[p,u],y=[p,u,o];(c||a)&&(Lc==null&&(Lc=document.createElement("canvas").getContext("2d")),Lc.canvas.width=u,Lc.canvas.height=p,Lc.drawImage(i,0,0,u,p),i=Lc.canvas);const b=n.makeTensorInfo(m,"int32");n.texData.get(b.dataId).usage=Cs.PIXELS,n.gpgpu.uploadPixelDataToTexture(n.getTexture(b.dataId),i);const w=C().getBool("WEBGL_PACK")?new A7(y):new T7(y),I=n.runWebGLProgram(w,[b],"int32");return n.disposeData(b.dataId),I}function C7(e){const t=[];for(;t.length===0||t[t.length-1].outSize!==1;){const n=t.length?t[t.length-1].outSize:e[1],s=uh(n);t.push({inSize:n,windowSize:s,outSize:Math.ceil(n/s)})}return t}function R7(e,t,n,s){const i=C7(e.shape);let o=e;for(let a=0;a`The new shape (${u}) has ${p} elements and the old shape (${i.shape}) has ${c} elements. The new shape and old shape must have the same number of elements.`);const m=a.texData.get(i.dataId);return m.isPacked&&!gm(i.shape,u)&&!(m.texture!==null&&gm(m.shape,u))?O7(i,u,a):(a.incRef(i.dataId),{dataId:i.dataId,shape:u,dtype:i.dtype})}const E7={kernelName:El,backendName:"webgl",kernelFunc:AS};function D7(e,t,n,s){const i=we(t),o=we(e.shape),a=o/i,c=AS({inputs:{x:e},attrs:{shape:[a,i]},backend:s}),u=R7(c,e.dtype,"max",s),p=AS({inputs:{x:u},attrs:{shape:n},backend:s});return s.disposeIntermediateTensorInfo(c),s.disposeIntermediateTensorInfo(u),p}class k7{constructor(e,t){this.variableNames=["A"];const n=new Array(e.length);for(let o=0;o`The new shape (${u}) has ${p} elements and the old shape (${i.shape}) has ${c} elements. The new shape and old shape must have the same number of elements.`);const m=a.texData.get(i.dataId);return m.isPacked&&!gm(i.shape,u)&&!(m.texture!==null&&gm(m.shape,u))?O7(i,u,a):(a.incRef(i.dataId),{dataId:i.dataId,shape:u,dtype:i.dtype})}const E7={kernelName:El,backendName:"webgl",kernelFunc:AS};function D7(e,t,n,s){const i=we(t),o=we(e.shape),a=o/i,c=AS({inputs:{x:e},attrs:{shape:[a,i]},backend:s}),u=R7(c,e.dtype,"max",s),p=AS({inputs:{x:u},attrs:{shape:n},backend:s});return s.disposeIntermediateTensorInfo(c),s.disposeIntermediateTensorInfo(u),p}class k7{constructor(e,t){this.variableNames=["A"];const n=new Array(e.length);for(let o=0;o{const{x:s}=e,{reductionIndices:i,keepDims:o}=t,a=n,c=s.shape.length,u=ft(i,s.shape);let p=u;const m=_n(p,c),y=m!=null,b=a.shouldExecuteOnCPU([s]);let w=s;if(y){if(b){const E=a.texData.get(w.dataId),D=E.values,F=new Array(c);for(let U=0;U`Error in maxPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${p}'`);const m=Wn(i.shape,o,a,p,c,u);if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))return TS({inputs:{x:i},backend:n});const y=new du(m,"max",!1);return n.runWebGLProgram(y,[i],i.dtype)}const U7={kernelName:Cl,backendName:"webgl",kernelFunc:$7};function B7(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o,output:a}=t,c=o;hu([o,a],"maxPoolBackprop");const{filterSize:u,strides:p,pad:m,dimRoundingMode:y}=s,b=Wn(c.shape,u,p,1,m,y),w=!0,I=new du(b,"max",w),T=n.runWebGLProgram(I,[c],c.dtype),v=new k8(b),N=n.runWebGLProgram(v,[i,T],c.dtype);return n.disposeIntermediateTensorInfo(T),N}const M7={kernelName:ad,backendName:"webgl",kernelFunc:B7};function P7(e,t,n,s){let i=new du(n,"max",!1);const o=s.runWebGLProgram(i,[e],"float32");i=new du(n,"max",!0,!0,t);const a=s.runWebGLProgram(i,[e],"float32");return[o,a]}const z7={kernelName:cd,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:i,strides:o,pad:a,includeBatchInIndex:c}=t,u=n;k(s.shape.length===4,()=>`Error in maxPool: input must be rank 4 but got rank ${s.shape.length}.`);const p=[1,1];k(rn(o,p),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${o} and dilations '${p}'`);const m=Wn(s.shape,i,o,p,a),[y,b]=P7(s,c,m,u);return[y,b]}};const G7={kernelName:Kg,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Ja("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=n,u=t,p=u.readSync(s.dataId),m=u.readSync(i.dataId),y=o,b=a,w=c;return wp(p,m,y,b,w)}};const V7=Lp,H7={kernelName:hd,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Ja("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,padToMaxOutputSize:u}=n,p=t,m=p.readSync(s.dataId),y=p.readSync(i.dataId),{selectedIndices:b,validOutputs:w}=V7(m,y,o,a,c,u);return[b,w]}};const Y7=Sp,q7={kernelName:ud,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Ja("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:u}=n,p=t,m=p.readSync(s.dataId),y=p.readSync(i.dataId),b=o,w=a,I=c,T=u,{selectedIndices:v,selectedScores:N}=Y7(m,y,b,w,I,T);return[v,N]}};class j7{constructor(e,t,n,s){this.variableNames=["Image"],this.outputShape=[];const i=e[1],o=e[2],a=Math.sin(t).toFixed(3),c=Math.cos(t).toFixed(3);this.outputShape=e;const[u,p]=qb(s,i,o),m=u.toFixed(3),y=p.toFixed(3);let b="";typeof n=="number"?b=`float outputValue = ${n.toFixed(2)};`:b=` + `}}function YC(e,t,n){const s=C().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new _7(e.shape,t):new k7(e.shape,t);return n.runWebGLProgram(s,[e],e.dtype)}const W7={kernelName:Nl,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{reductionIndices:i,keepDims:o}=t,a=n,c=s.shape.length,u=ft(i,s.shape);let p=u;const m=_n(p,c),y=m!=null,b=a.shouldExecuteOnCPU([s]);let w=s;if(y){if(b){const E=a.texData.get(w.dataId),D=E.values,F=new Array(c);for(let U=0;U`Error in maxPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${p}'`);const m=Wn(i.shape,o,a,p,c,u);if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))return TS({inputs:{x:i},backend:n});const y=new du(m,"max",!1);return n.runWebGLProgram(y,[i],i.dtype)}const U7={kernelName:Cl,backendName:"webgl",kernelFunc:$7};function B7(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o,output:a}=t,c=o;hu([o,a],"maxPoolBackprop");const{filterSize:u,strides:p,pad:m,dimRoundingMode:y}=s,b=Wn(c.shape,u,p,1,m,y),w=!0,I=new du(b,"max",w),T=n.runWebGLProgram(I,[c],c.dtype),v=new k8(b),N=n.runWebGLProgram(v,[i,T],c.dtype);return n.disposeIntermediateTensorInfo(T),N}const M7={kernelName:ad,backendName:"webgl",kernelFunc:B7};function P7(e,t,n,s){let i=new du(n,"max",!1);const o=s.runWebGLProgram(i,[e],"float32");i=new du(n,"max",!0,!0,t);const a=s.runWebGLProgram(i,[e],"float32");return[o,a]}const z7={kernelName:cd,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:i,strides:o,pad:a,includeBatchInIndex:c}=t,u=n;k(s.shape.length===4,()=>`Error in maxPool: input must be rank 4 but got rank ${s.shape.length}.`);const p=[1,1];k(on(o,p),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${o} and dilations '${p}'`);const m=Wn(s.shape,i,o,p,a),[y,b]=P7(s,c,m,u);return[y,b]}};const G7={kernelName:Kg,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Qa("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=n,u=t,p=u.readSync(s.dataId),m=u.readSync(i.dataId),y=o,b=a,w=c;return wp(p,m,y,b,w)}};const V7=Lp,H7={kernelName:hd,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Qa("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,padToMaxOutputSize:u}=n,p=t,m=p.readSync(s.dataId),y=p.readSync(i.dataId),{selectedIndices:b,validOutputs:w}=V7(m,y,o,a,c,u);return[b,w]}};const Y7=Sp,q7={kernelName:ud,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Qa("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:u}=n,p=t,m=p.readSync(s.dataId),y=p.readSync(i.dataId),b=o,w=a,I=c,T=u,{selectedIndices:v,selectedScores:N}=Y7(m,y,b,w,I,T);return[v,N]}};class j7{constructor(e,t,n,s){this.variableNames=["Image"],this.outputShape=[];const i=e[1],o=e[2],a=Math.sin(t).toFixed(3),c=Math.cos(t).toFixed(3);this.outputShape=e;const[u,p]=qb(s,i,o),m=u.toFixed(3),y=p.toFixed(3);let b="";typeof n=="number"?b=`float outputValue = ${n.toFixed(2)};`:b=` vec3 fill = vec3(${n.join(",")}); float outputValue = fill[coords[3]];`,this.userCode=` void main() { @@ -3814,12 +3814,12 @@ return a / b;`,w7=` } `}}const K7={kernelName:bd,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,{radians:i,fillValue:o,center:a}=t,c=n,u=new j7(s.shape,i,o,a),p=c.runWebGLProgram(u,[s],s.dtype);return p}};const X7=HC+` return sin(x); -`,J7=Am(X7),Z7={kernelName:Ta,backendName:"webgl",kernelFunc:J7};const Q7="return x * x;",eX=Am(Q7),tX={kernelName:fd,backendName:"webgl",kernelFunc:eX};const qC="return (a - b) * (a - b);",nX=xS(qC,qC),sX={kernelName:Aa,backendName:"webgl",kernelFunc:nX};const iX="return tan(x);",rX=Am(iX),oX={kernelName:va,backendName:"webgl",kernelFunc:rX};const aX={kernelName:zl,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{perm:i}=t,o=n,a=s.shape.length,c=new Array(a);for(let p=0;p{Tc(EX,{isNodejs:()=>DX});function DX(){return typeof global=="object"&&!0&&typeof o2!="undefined"&&typeof process!="undefined"&&!!process.version}});function gr(r,l,h=!1){if(r.beginPath(),l.slice(1).forEach(({x:d,y:f},g)=>{const S=l[g];r.moveTo(S.x,S.y),r.lineTo(d,f)}),h){const d=l[l.length-1],f=l[0];if(!d||!f)return;r.moveTo(d.x,d.y),r.lineTo(f.x,f.y)}r.stroke()}class ms{constructor(r,l){if(!gi(r)||!gi(l))throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({width:r,height:l})}`);this._width=r,this._height=l}get width(){return this._width}get height(){return this._height}reverse(){return new ms(1/this.width,1/this.height)}}const US={};Tc(US,{computeReshapedDimensions:()=>PS,getCenterPoint:()=>Qo,isDimensions:()=>Dm,isEven:()=>Em,isFloat:()=>MS,isTensor:()=>Jo,isTensor1D:()=>kX,isTensor2D:()=>BS,isTensor3D:()=>yr,isTensor4D:()=>Os,isValidNumber:()=>gi,isValidProbablitiy:()=>Nc,range:()=>zi,round:()=>Zo});const c2=Ye(Je());function Jo(r,l){return r instanceof c2.Tensor&&r.shape.length===l}function kX(r){return Jo(r,1)}function BS(r){return Jo(r,2)}function yr(r){return Jo(r,3)}function Os(r){return Jo(r,4)}function MS(r){return r%1!==0}function Em(r){return r%2===0}function Zo(r,l=2){const h=Math.pow(10,l);return Math.floor(r*h)/h}function Dm(r){return r&&r.width&&r.height}function PS({width:r,height:l},h){const d=h/Math.max(l,r);return new ms(Math.round(r*d),Math.round(l*d))}function Qo(r){return r.reduce((l,h)=>l.add(h),new Ze(0,0)).div(new Ze(r.length,r.length))}function zi(r,l,h){return Array(r).fill(0).map((d,f)=>l+f*h)}function gi(r){return!!r&&r!==Infinity&&r!==-Infinity&&!isNaN(r)||r===0}function Nc(r){return gi(r)&&0<=r&&r<=1}class Ze{constructor(r,l){this._x=r,this._y=l}get x(){return this._x}get y(){return this._y}add(r){return new Ze(this.x+r.x,this.y+r.y)}sub(r){return new Ze(this.x-r.x,this.y-r.y)}mul(r){return new Ze(this.x*r.x,this.y*r.y)}div(r){return new Ze(this.x/r.x,this.y/r.y)}abs(){return new Ze(Math.abs(this.x),Math.abs(this.y))}magnitude(){return Math.sqrt(Math.pow(this.x,2)+Math.pow(this.y,2))}floor(){return new Ze(Math.floor(this.x),Math.floor(this.y))}}class Ct{static isRect(r){return!!r&&[r.x,r.y,r.width,r.height].every(gi)}static assertIsValidBox(r,l,h=!1){if(!Ct.isRect(r))throw new Error(`${l} - invalid box: ${JSON.stringify(r)}, expected object with properties x, y, width, height`);if(!h&&(r.width<0||r.height<0))throw new Error(`${l} - width (${r.width}) and height (${r.height}) must be positive numbers`)}constructor(r,l=!0){const h=r||{},d=[h.left,h.top,h.right,h.bottom].every(gi),f=[h.x,h.y,h.width,h.height].every(gi);if(!f&&!d)throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(h)}`);const[g,S,L,x]=f?[h.x,h.y,h.width,h.height]:[h.left,h.top,h.right-h.left,h.bottom-h.top];Ct.assertIsValidBox({x:g,y:S,width:L,height:x},"Box.constructor",l),this._x=g,this._y=S,this._width=L,this._height=x}get x(){return this._x}get y(){return this._y}get width(){return this._width}get height(){return this._height}get left(){return this.x}get top(){return this.y}get right(){return this.x+this.width}get bottom(){return this.y+this.height}get area(){return this.width*this.height}get topLeft(){return new Ze(this.left,this.top)}get topRight(){return new Ze(this.right,this.top)}get bottomLeft(){return new Ze(this.left,this.bottom)}get bottomRight(){return new Ze(this.right,this.bottom)}round(){const[r,l,h,d]=[this.x,this.y,this.width,this.height].map(f=>Math.round(f));return new Ct({x:r,y:l,width:h,height:d})}floor(){const[r,l,h,d]=[this.x,this.y,this.width,this.height].map(f=>Math.floor(f));return new Ct({x:r,y:l,width:h,height:d})}toSquare(){let{x:r,y:l,width:h,height:d}=this;const f=Math.abs(h-d);return hl&&(S=-O+l+h,O=l),C>r&&(L=-C+r+d,C=r),x<1&&(L=2-x,x=1),A<1&&(L=2-A,A=1),{dy:g,edy:L,dx:f,edx:S,y:A,ey:C,x,ex:O,w:h,h:d}}calibrate(r){return new Ct({left:this.left+r.left*this.width,top:this.top+r.top*this.height,right:this.right+r.right*this.width,bottom:this.bottom+r.bottom*this.height}).toSquare().round()}}class wu extends Ct{constructor(r,l,h,d,f=!1){super({left:r,top:l,right:h,bottom:d},f)}}class Cc{constructor(r,l,h,d,f){this._imageDims=new ms(f.width,f.height),this._score=r,this._classScore=l,this._className=h,this._box=new Ct(d).rescale(this._imageDims)}get score(){return this._score}get classScore(){return this._classScore}get className(){return this._className}get box(){return this._box}get imageDims(){return this._imageDims}get imageWidth(){return this.imageDims.width}get imageHeight(){return this.imageDims.height}get relativeBox(){return new Ct(this._box).rescale(this.imageDims.reverse())}forSize(r,l){return new Cc(this.score,this.classScore,this.className,this.relativeBox,{width:r,height:l})}}class Yt extends Cc{constructor(r,l,h){super(r,r,"",l,h)}forSize(r,l){const{score:h,relativeBox:d,imageDims:f}=super.forSize(r,l);return new Yt(h,d,f)}}function zS(r,l,h=!0){const d=Math.max(0,Math.min(r.right,l.right)-Math.max(r.left,l.left)),f=Math.max(0,Math.min(r.bottom,l.bottom)-Math.max(r.top,l.top)),g=d*f;return h?g/(r.area+l.area-g):g/Math.min(r.area,l.area)}function GS(r){const l=r.map(L=>L.x),h=r.map(L=>L.y),d=l.reduce((L,x)=>xxLL({score:S,boxIndex:L})).sort((S,L)=>S.score-L.score).map(S=>S.boxIndex);const g=[];for(;f.length>0;){const S=f.pop();g.push(S);const L=f,x=[];for(let A=0;Ax[O]<=h)}return g}const Gi=Ye(Je());function yi(r,l){return Gi.tidy(()=>{const[h,d,f]=l,g=Gi.fill([...r.shape.slice(0,3),1],h),S=Gi.fill([...r.shape.slice(0,3),1],d),L=Gi.fill([...r.shape.slice(0,3),1],f),x=Gi.concat([g,S,L],3);return Gi.sub(r,x)})}const ro=Ye(Je());function HS(r,l=!1){return ro.tidy(()=>{const[h,d]=r.shape.slice(1);if(h===d)return r;const f=Math.abs(h-d),g=Math.round(f*(l?.5:1)),S=h>d?2:1,L=$=>{const z=r.shape.slice();return z[S]=$,ro.fill(z,0)},x=L(g),A=f-x.shape[S],O=l&&A?L(A):null,C=[O,r,x].filter($=>!!$).map($=>ro.cast($,"float32"));return ro.concat(C,S)})}function FX(r){const l=r.slice();for(let h=l.length-1;h>0;h--){const d=Math.floor(Math.random()*(h+1)),f=l[h];l[h]=l[d],l[d]=f}return l}function Lu(r){return 1/(1+Math.exp(-r))}function _X(r){return Math.log(r/(1-r))}class Su extends Ct{constructor(r,l,h,d,f=!1){super({x:r,y:l,width:h,height:d},f)}}const WX=.5,$X=.43,UX=.45;class qs{constructor(r,l,h=new Ze(0,0)){const{width:d,height:f}=l;this._imgDims=new ms(d,f),this._shift=h,this._positions=r.map(g=>g.mul(new Ze(d,f)).add(h))}get shift(){return new Ze(this._shift.x,this._shift.y)}get imageWidth(){return this._imgDims.width}get imageHeight(){return this._imgDims.height}get positions(){return this._positions}get relativePositions(){return this._positions.map(r=>r.sub(this._shift).div(new Ze(this.imageWidth,this.imageHeight)))}forSize(r,l){return new this.constructor(this.relativePositions,{width:r,height:l})}shiftBy(r,l){return new this.constructor(this.relativePositions,this._imgDims,new Ze(r,l))}shiftByPoint(r){return this.shiftBy(r.x,r.y)}align(r,l={}){if(r){const f=r instanceof Yt?r.box.floor():new Ct(r);return this.shiftBy(f.x,f.y).align(null,l)}const{useDlibAlignment:h,minBoxPadding:d}=Object.assign({},{useDlibAlignment:!1,minBoxPadding:.2},l);return h?this.alignDlib():this.alignMinBbox(d)}alignDlib(){const r=this.getRefPointsForAlignment(),[l,h,d]=r,f=O=>d.sub(O).magnitude(),g=(f(l)+f(h))/2,S=Math.floor(g/UX),L=Qo(r),x=Math.floor(Math.max(0,L.x-WX*S)),A=Math.floor(Math.max(0,L.y-$X*S));return new Su(x,A,Math.min(S,this.imageWidth+x),Math.min(S,this.imageHeight+A))}alignMinBbox(r){const l=GS(this.positions);return l.pad(l.width*r,l.height*r)}getRefPointsForAlignment(){throw new Error("getRefPointsForAlignment not implemented by base class")}}class BX extends qs{getRefPointsForAlignment(){const r=this.positions;return[r[0],r[1],Qo([r[3],r[4]])]}}class Iu extends qs{getJawOutline(){return this.positions.slice(0,17)}getLeftEyeBrow(){return this.positions.slice(17,22)}getRightEyeBrow(){return this.positions.slice(22,27)}getNose(){return this.positions.slice(27,36)}getLeftEye(){return this.positions.slice(36,42)}getRightEye(){return this.positions.slice(42,48)}getMouth(){return this.positions.slice(48,68)}getRefPointsForAlignment(){return[this.getLeftEye(),this.getRightEye(),this.getMouth()].map(Qo)}}class km{constructor(r,l){this._label=r,this._distance=l}get label(){return this._label}get distance(){return this._distance}toString(r=!0){return`${this.label}${r?` (${Zo(this.distance)})`:""}`}}class Fm extends Ct{static assertIsValidLabeledBox(r,l){if(Ct.assertIsValidBox(r,l),!gi(r.label))throw new Error(`${l} - expected property label (${r.label}) to be a number`)}constructor(r,l){super(r);this._label=l}get label(){return this._label}}class ea{constructor(r,l){if(!(typeof r=="string"))throw new Error("LabeledFaceDescriptors - constructor expected label to be a string");if(!Array.isArray(l)||l.some(h=>!(h instanceof Float32Array)))throw new Error("LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array");this._label=r,this._descriptors=l}get label(){return this._label}get descriptors(){return this._descriptors}toJSON(){return{label:this.label,descriptors:this.descriptors.map(r=>Array.from(r))}}static fromJSON(r){const l=r.descriptors.map(h=>new Float32Array(h));return new ea(r.label,l)}}class MX extends Fm{static assertIsValidPredictedBox(r,l){if(Fm.assertIsValidLabeledBox(r,l),!Nc(r.score)||!Nc(r.classScore))throw new Error(`${l} - expected properties score (${r.score}) and (${r.classScore}) to be a number between [0, 1]`)}constructor(r,l,h,d){super(r,l);this._score=h,this._classScore=d}get score(){return this._score}get classScore(){return this._classScore}}function Vi(r){return r.detection instanceof Yt}function ta(r,l){const h={detection:l};return Object.assign({},r,h)}function YS(){const r=window.fetch||function(){throw new Error("fetch - missing fetch implementation for browser environment")},l=function(){throw new Error("readFile - filesystem not available for browser environment")};return{Canvas:HTMLCanvasElement,CanvasRenderingContext2D,Image:HTMLImageElement,ImageData,Video:HTMLVideoElement,createCanvasElement:()=>document.createElement("canvas"),createImageElement:()=>document.createElement("img"),fetch:r,readFile:l}}function _m(r){let l="";if(!r)try{r=require("fs")}catch(d){l=d.toString()}const h=r?function(d){return new Promise((f,g)=>{r.readFile(d,function(S,L){return S?g(S):f(L)})})}:function(){throw new Error(`readFile - failed to require fs in nodejs environment with error: ${l}`)};return{readFile:h}}function qS(){const r=global.Canvas||global.HTMLCanvasElement,l=global.Image||global.HTMLImageElement,h=function(){if(r)return new r;throw new Error("createCanvasElement - missing Canvas implementation for nodejs environment")},d=function(){if(l)return new l;throw new Error("createImageElement - missing Image implementation for nodejs environment")},f=global.fetch||function(){throw new Error("fetch - missing fetch implementation for nodejs environment")},g=_m();return{Canvas:r||class{},CanvasRenderingContext2D:global.CanvasRenderingContext2D||class{},Image:l||class{},ImageData:global.ImageData||class{},Video:global.HTMLVideoElement||class{},createCanvasElement:h,createImageElement:d,fetch:f,...g}}function jS(){return typeof window=="object"&&typeof document!="undefined"&&typeof HTMLImageElement!="undefined"&&typeof HTMLCanvasElement!="undefined"&&typeof HTMLVideoElement!="undefined"&&typeof ImageData!="undefined"&&typeof CanvasRenderingContext2D!="undefined"}const KS=Ye(a2());let yn;function PX(){if(!yn)throw new Error("getEnv - environment is not defined, check isNodejs() and isBrowser()");return yn}function XS(r){yn=r}function JS(){if(jS())return XS(YS());if(KS.isNodejs())return XS(qS())}function zX(r){if(yn||JS(),!yn)throw new Error("monkeyPatch - environment is not defined, check isNodejs() and isBrowser()");const{Canvas:l=yn.Canvas,Image:h=yn.Image}=r;yn.Canvas=l,yn.Image=h,yn.createCanvasElement=r.createCanvasElement||(()=>new l),yn.createImageElement=r.createImageElement||(()=>new h),yn.ImageData=r.ImageData||yn.ImageData,yn.Video=r.Video||yn.Video,yn.fetch=r.fetch||yn.fetch,yn.readFile=r.readFile||yn.readFile}const gt={getEnv:PX,setEnv:XS,initialize:JS,createBrowserEnv:YS,createFileSystem:_m,createNodejsEnv:qS,monkeyPatch:zX,isBrowser:jS,isNodejs:KS.isNodejs};JS();function na(r){return!gt.isNodejs()&&typeof r=="string"?document.getElementById(r):r}function es(r){const{Canvas:l,CanvasRenderingContext2D:h}=gt.getEnv();if(r instanceof h)return r;const d=na(r);if(!(d instanceof l))throw new Error("resolveContext2d - expected canvas to be of instance of Canvas");const f=d.getContext("2d");if(!f)throw new Error("resolveContext2d - canvas 2d context is null");return f}var Hi;(function(r){r.TOP_LEFT="TOP_LEFT",r.TOP_RIGHT="TOP_RIGHT",r.BOTTOM_LEFT="BOTTOM_LEFT",r.BOTTOM_RIGHT="BOTTOM_RIGHT"})(Hi||(Hi={}));class Wm{constructor(r={}){const{anchorPosition:l,backgroundColor:h,fontColor:d,fontSize:f,fontStyle:g,padding:S}=r;this.anchorPosition=l||Hi.TOP_LEFT,this.backgroundColor=h||"rgba(0, 0, 0, 0.5)",this.fontColor=d||"rgba(255, 255, 255, 1)",this.fontSize=f||14,this.fontStyle=g||"Georgia",this.padding=S||4}}class Rc{constructor(r,l,h={}){this.text=typeof r=="string"?[r]:r instanceof Rc?r.text:r,this.anchor=l,this.options=new Wm(h)}measureWidth(r){const{padding:l}=this.options;return this.text.map(h=>r.measureText(h).width).reduce((h,d)=>h{const z=L+O.x,ne=L+O.y+($+1)*g;h.fillText(C,z,ne)})}}class l2{constructor(r={}){const{boxColor:l,lineWidth:h,label:d,drawLabelOptions:f}=r;this.boxColor=l||"rgba(0, 0, 255, 1)",this.lineWidth=h||2,this.label=d;const g={anchorPosition:Hi.BOTTOM_LEFT,backgroundColor:this.boxColor};this.drawLabelOptions=new Wm(Object.assign({},g,f))}}class ZS{constructor(r,l={}){this.box=new Ct(r),this.options=new l2(l)}draw(r){const l=es(r),{boxColor:h,lineWidth:d}=this.options,{x:f,y:g,width:S,height:L}=this.box;l.strokeStyle=h,l.lineWidth=d,l.strokeRect(f,g,S,L);const{label:x}=this.options;x&&new Rc([x],{x:f-d/2,y:g},this.options.drawLabelOptions).draw(r)}}function GX(r,l){const h=Array.isArray(l)?l:[l];h.forEach(d=>{const f=d instanceof Yt?d.score:Vi(d)?d.detection.score:void 0,g=d instanceof Yt?d.box:Vi(d)?d.detection.box:new Ct(d),S=f?`${Zo(f)}`:void 0;new ZS(g,{label:S}).draw(r)})}function xu(r){const{Image:l,Video:h}=gt.getEnv();return r instanceof l&&r.complete||r instanceof h&&r.readyState>=3}function QS(r){return new Promise((l,h)=>{if(r instanceof gt.getEnv().Canvas||xu(r))return l(null);function d(g){if(!g.currentTarget)return;g.currentTarget.removeEventListener("load",d),g.currentTarget.removeEventListener("error",f),l(g)}function f(g){if(!g.currentTarget)return;g.currentTarget.removeEventListener("load",d),g.currentTarget.removeEventListener("error",f),h(g)}r.addEventListener("load",d),r.addEventListener("error",f)})}function eI(r){return new Promise((l,h)=>{if(!(r instanceof Blob))return h("bufferToImage - expected buf to be of type: Blob");const d=new FileReader;d.onload=()=>{if(typeof d.result!="string")return h("bufferToImage - expected reader.result to be a string, in onload");const f=gt.getEnv().createImageElement();f.onload=()=>l(f),f.onerror=h,f.src=d.result},d.onerror=h,d.readAsDataURL(r)})}function sa(r){const{Image:l,Video:h}=gt.getEnv();return r instanceof l?new ms(r.naturalWidth,r.naturalHeight):r instanceof h?new ms(r.videoWidth,r.videoHeight):new ms(r.width,r.height)}function Oc({width:r,height:l}){const{createCanvasElement:h}=gt.getEnv(),d=h();return d.width=r,d.height=l,d}function Tu(r,l){const{ImageData:h}=gt.getEnv();if(!(r instanceof h)&&!xu(r))throw new Error("createCanvasFromMedia - media has not finished loading yet");const{width:d,height:f}=l||sa(r),g=Oc({width:d,height:f});return r instanceof h?es(g).putImageData(r,0,0):es(g).drawImage(r,0,0,d,f),g}const $m=Ye(Je());async function tI(r,l){const h=l||gt.getEnv().createCanvasElement(),[d,f,g]=r.shape.slice(Os(r)?1:0),S=$m.tidy(()=>r.as3D(d,f,g).toInt());return await $m.browser.toPixels(S,h),S.dispose(),h}function Um(r){const{Image:l,Canvas:h,Video:d}=gt.getEnv();return r instanceof l||r instanceof h||r instanceof d}const VX=1e-7,HX=1e-4;class h2{time(r){return ie("time")}read(r){return ie("read")}readSync(r){return ie("readSync")}numDataIds(){return ie("numDataIds")}disposeData(r){return ie("disposeData")}write(r,l,h){return ie("write")}move(r,l,h,d){return ie("move")}memory(){return ie("memory")}floatPrecision(){return ie("floatPrecision")}epsilon(){return this.floatPrecision()===32?VX:HX}batchMatMul(r,l,h,d){return ie("batchMatMul")}fusedBatchMatMul({a:r,b:l,transposeA:h,transposeB:d,bias:f,activation:g,preluActivationWeights:S}){return ie("fusedBatchMatMul")}slice(r,l,h){return ie("slice")}stridedSlice(r,l,h,d){return ie("stridedSlice")}unstack(r,l){return ie("unstack")}reverse(r,l){return ie("reverse")}concat(r,l){return ie("concat")}neg(r){return ie("neg")}add(r,l){return ie("add")}addN(r){return ie("addN")}subtract(r,l){return ie("subtract")}multiply(r,l){return ie("multiply")}realDivide(r,l){return ie("realDivide")}floorDiv(r,l){return ie("floorDiv")}sum(r,l){return ie("sum")}prod(r,l){return ie("prod")}unsortedSegmentSum(r,l,h){return ie("unsortedSegmentSum")}argMin(r,l){return ie("argMin")}argMax(r,l){return ie("argMax")}equal(r,l){return ie("equal")}notEqual(r,l){return ie("notEqual")}less(r,l){return ie("less")}lessEqual(r,l){return ie("lessEqual")}greater(r,l){return ie("greater")}greaterEqual(r,l){return ie("greaterEqual")}logicalNot(r){return ie("logicalNot")}logicalAnd(r,l){return ie("logicalAnd")}logicalOr(r,l){return ie("logicalOr")}where(r){return ie("where")}select(r,l,h){return ie("select")}topk(r,l,h){return ie("topk")}min(r,l){return ie("min")}minimum(r,l){return ie("minimum")}mod(r,l){return ie("mod")}max(r,l){return ie("max")}maximum(r,l){return ie("maximum")}all(r,l){return ie("all")}any(r,l){return ie("any")}squaredDifference(r,l){return ie("squaredDifference")}ceil(r){return ie("ceil")}floor(r){return ie("floor")}round(r){return ie("round")}sign(r){return ie("sign")}isNaN(r){return ie("isNaN")}isInf(r){return ie("isInf")}isFinite(r){return ie("isFinite")}pow(r,l){return ie("pow")}exp(r){return ie("exp")}expm1(r){return ie("expm1")}softmax(r,l){return ie("softmax")}log(r){return ie("log")}log1p(r){return ie("log1p")}sqrt(r){return ie("sqrt")}rsqrt(r){return ie("rsqrt")}square(r){return ie("square")}reciprocal(r){return ie("reciprocal")}relu(r){return ie("relu")}relu6(r){return ie("relu6")}prelu(r,l){return ie("prelu")}elu(r){return ie("elu")}eluDer(r,l){return ie("eluDer")}selu(r){return ie("selu")}int(r){return ie("int")}clip(r,l,h){return ie("clip")}abs(r){return ie("abs")}complexAbs(r){return ie("complexAbs")}sigmoid(r){return ie("sigmoid")}softplus(r){return ie("softplus")}sin(r){return ie("sin")}cos(r){return ie("cos")}tan(r){return ie("tan")}asin(r){return ie("asin")}acos(r){return ie("acos")}atan(r){return ie("atan")}atan2(r,l){return ie("atan2")}sinh(r){return ie("sinh")}cosh(r){return ie("cosh")}tanh(r){return ie("tanh")}asinh(r){return ie("asinh")}acosh(r){return ie("acosh")}atanh(r){return ie("atanh")}erf(r){return ie("erf")}step(r,l){return ie("step")}fusedConv2d({input:r,filter:l,convInfo:h,bias:d,activation:f,preluActivationWeights:g}){return ie("fusedConv2d")}conv2d(r,l,h){return ie("conv2d")}conv2dDerInput(r,l,h){return ie("conv2dDerInput")}conv2dDerFilter(r,l,h){return ie("conv2dDerFilter")}fusedDepthwiseConv2D({input:r,filter:l,convInfo:h,bias:d,activation:f,preluActivationWeights:g}){return ie("fusedDepthwiseConv2D")}depthwiseConv2D(r,l,h){return ie("depthwiseConv2D")}depthwiseConv2DDerInput(r,l,h){return ie("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(r,l,h){return ie("depthwiseConv2DDerFilter")}conv3d(r,l,h){return ie("conv3d")}conv3dDerInput(r,l,h){return ie("conv3dDerInput")}conv3dDerFilter(r,l,h){return ie("conv3dDerFilter")}maxPool(r,l){return ie("maxPool")}maxPoolBackprop(r,l,h,d){return ie("maxPoolBackprop")}avgPool(r,l){return ie("avgPool")}avgPoolBackprop(r,l,h){return ie("avgPoolBackprop")}avgPool3d(r,l){return ie("avgPool3d")}avgPool3dBackprop(r,l,h){return ie("avgPool3dBackprop")}maxPool3d(r,l){return ie("maxPool3d")}maxPool3dBackprop(r,l,h,d){return ie("maxPool3dBackprop")}reshape(r,l){return ie("reshape")}cast(r,l){return ie("cast")}tile(r,l){return ie("tile")}pad(r,l,h){return ie("pad")}transpose(r,l){return ie("transpose")}gather(r,l,h){return ie("gather")}gatherND(r,l){return ie("gatherND")}scatterND(r,l,h){return ie("scatterND")}batchToSpaceND(r,l,h){return ie("batchToSpaceND")}spaceToBatchND(r,l,h){return ie("spaceToBatchND")}resizeBilinear(r,l,h,d){return ie("resizeBilinear")}resizeBilinearBackprop(r,l,h){return ie("resizeBilinearBackprop")}resizeNearestNeighbor(r,l,h,d){return ie("resizeNearestNeighbor")}resizeNearestNeighborBackprop(r,l,h){return ie("resizeNearestNeighborBackprop")}batchNorm(r,l,h,d,f,g){return ie("batchNorm")}localResponseNormalization4D(r,l,h,d,f){return ie("localResponseNormalization4D")}LRNGrad(r,l,h,d,f,g,S){return ie("LRNGrad")}multinomial(r,l,h,d){return ie("multinomial")}oneHot(r,l,h,d){return ie("oneHot")}cumsum(r,l,h,d){return ie("cumsum")}nonMaxSuppression(r,l,h,d,f){return ie("nonMaxSuppression")}fft(r){return ie("fft")}ifft(r){return ie("ifft")}complex(r,l){return ie("complex")}real(r){return ie("real")}imag(r){return ie("imag")}cropAndResize(r,l,h,d,f,g){return ie("cropAndResize")}depthToSpace(r,l,h){return ie("depthToSpace")}split(r,l,h){return ie("split")}sparseToDense(r,l,h,d){return ie("sparseToDense")}diag(r){return ie("diag")}fill(r,l,h){return ie("fill")}onesLike(r){return ie("onesLike")}zerosLike(r){return ie("zerosLike")}linspace(r,l,h){return ie("linspace")}dispose(){return ie("dispose")}}function ie(r){throw new Error(`'${r}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}const u2="tfjsflags";class d2{constructor(r){this.global=r,this.flags={},this.flagRegistry={},this.urlFlags={},this.populateURLFlags()}setPlatform(r,l){this.platform!=null&&console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${l}.`),this.platformName=r,this.platform=l}registerFlag(r,l,h){if(this.flagRegistry[r]={evaluationFn:l,setHook:h},this.urlFlags[r]!=null){const d=this.urlFlags[r];console.warn(`Setting feature override from URL ${r}: ${d}.`),this.set(r,d)}}async getAsync(r){return r in this.flags?this.flags[r]:(this.flags[r]=await this.evaluateFlag(r),this.flags[r])}get(r){if(r in this.flags)return this.flags[r];const l=this.evaluateFlag(r);if(l instanceof Promise)throw new Error(`Flag ${r} cannot be synchronously evaluated. Please use getAsync() instead.`);return this.flags[r]=l,this.flags[r]}getNumber(r){return this.get(r)}getBool(r){return this.get(r)}getFlags(){return this.flags}get features(){return this.flags}set(r,l){if(this.flagRegistry[r]==null)throw new Error(`Cannot set flag ${r} as it has not been registered.`);this.flags[r]=l,this.flagRegistry[r].setHook!=null&&this.flagRegistry[r].setHook(l)}evaluateFlag(r){if(this.flagRegistry[r]==null)throw new Error(`Cannot evaluate flag '${r}': no evaluation function found.`);return this.flagRegistry[r].evaluationFn()}setFlags(r){this.flags=Object.assign({},r)}reset(){this.flags={},this.urlFlags={},this.populateURLFlags()}populateURLFlags(){if(typeof this.global=="undefined"||typeof this.global.location=="undefined"||typeof this.global.location.search=="undefined")return;const r=YX(this.global.location.search);if(u2 in r){const l=r[u2].split(",");l.forEach(h=>{const[d,f]=h.split(":");this.urlFlags[d]=qX(d,f)})}}}function YX(r){const l={};return r.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(h,...d)=>(jX(l,d[0],d[1]),d.join("="))),l}function jX(r,l,h){r[decodeURIComponent(l)]=decodeURIComponent(h||"")}function qX(r,l){if(l=l.toLowerCase(),l==="true"||l==="false")return l==="true";if(`${+l}`===l)return+l;throw new Error(`Could not parse value flag value ${l} for flag ${r}.`)}function Es(){return p2}let p2=null;function m2(r){p2=r}let nI;function sI(){if(nI==null){let r;if(typeof window!="undefined")r=window;else if(typeof global!="undefined")r=global;else if(typeof process!="undefined")r=process;else if(typeof self!="undefined")r=self;else throw new Error("Could not find a global object");nI=r}return nI}function KX(){const r=sI();return r._tfGlobals==null&&(r._tfGlobals=new Map),r._tfGlobals}function iI(r,l){const h=KX();if(h.has(r))return h.get(r);{const d=l();return h.set(r,d),h.get(r)}}const Bm="Abs",f2="Acos",g2="Acosh",Ec="Add",y2="AddN",b2="ArgMax",w2="ArgMin",L2="Asin",S2="Asinh",I2="Atan",x2="Atanh",T2="Atan2",A2="AvgPool",v2="AvgPoolBackprop",N2="AvgPool3D",C2="AvgPool3DBackprop",Mm="BatchMatMul",Pm="BatchToSpaceND",zm="BroadcastTo",Dc="Cast",R2="Ceil",O2="ClipByValue",E2="Complex",Gm="Concat",Vm="Conv2D",D2="Conv2DBackpropFilter",Hm="Conv2DBackpropInput",k2="Conv3D",F2="Conv3DBackpropFilterV2",_2="Conv3DBackpropInputV2",Ym="Cos",qm="Cosh",jm="Cumsum",W2="CropAndResize",$2="DepthwiseConv2dNative",U2="DepthwiseConv2dNativeBackpropFilter",B2="DepthwiseConv2dNativeBackpropInput",M2="Dilation2D",P2="Dilation2DBackpropInput",z2="Dilation2DBackpropFilter",Km="Div",G2="Elu",V2="EluGrad",H2="Erf",Y2="Equal",Xm="Exp",q2="Expm1",j2="FFT",K2="Fill",X2="FlipLeftRight",Jm="Floor",Zm="FloorDiv",J2="FusedBatchNorm",Qm="GatherV2",Z2="Greater",ef="GreaterEqual",tf="Identity",Q2="IFFT",eR="Imag",tR="IsFinite",nR="IsInf",sR="IsNan",iR="Less",rR="LessEqual",nf="Log",sf="Log1p",oR="LogicalAnd",aR="LogicalNot",cR="LogSoftmax",lR="LRN",hR="LRNBackprop",rf="Max",of="Maximum",uR="MaxPool",dR="MaxPoolBackprop",pR="MaxPool3D",mR="MaxPool3DBackprop",af="Min",cf="Minimum",fR="Mod",lf="Multiply",hf="Negate",gR="NotEqual",yR="NonMaxSuppressionV3",bR="NonMaxSuppressionV4",wR="NonMaxSuppressionV5",LR="OnesLike",SR="OneHot",uf="PadV2",df="Pow",IR="Prelu",xR="Range",TR="Real",AR="Reciprocal",pf="Relu",mf="Reshape",ff="ResizeNearestNeighbor",vR="ResizeNearestNeighborGrad",gf="ResizeBilinear",NR="ResizeBilinearGrad",CR="Relu6",yf="Reverse",RR="Round",bf="Rsqrt",wf="SelectV2",OR="Selu",Lf="Slice",Sf="Sin",If="Sinh",ER="Sign",xf="Sigmoid",DR="Softplus",Tf="Sqrt",Af="Sum",vf="SpaceToBatchND",Nf="SplitV",kR="Softmax",Cf="SquaredDifference",FR="Square",Rf="Sub",_R="Tan",WR="Tanh",Of="Tile",Ef="Transpose",Df="Unpack",kf="UnsortedSegmentSum",Ff="ZerosLike",_f="Step",rI="FromPixels",$R="RotateWithOffset";const UR=iI("kernelRegistry",()=>new Map),oI=iI("gradRegistry",()=>new Map);function Wf(r,l){const h=XX(r,l);return UR.get(h)}function aI(r){return oI.get(r)}function cI(r){const l=UR.entries(),h=[];for(;;){const{done:d,value:f}=l.next();if(d)break;const[g,S]=f,[L]=g.split("_");L===r&&h.push(S)}return h}function BR(r){const{kernelName:l}=r;oI.has(l)&&(Es().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${l}'`)),oI.set(l,r)}function XX(r,l){return`${l}_${r}`}function Z(r,l){if(!r)throw new Error(typeof l=="string"?l:l())}function Zt(r,l,h=""){Z(ia(r,l),()=>h+` Shapes ${r} and ${l} must match`)}function kc(r){Z(r!=null,()=>"The input to the tensor constructor must be a non-null value.")}function Fc(r,l=[],h=!1){if(l==null&&(l=[]),Array.isArray(r)||Ds(r)&&!h)for(let d=0;d=0)h*=r[g];else if(r[g]===-1){if(d!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${d} and dim ${g}`);d=g}else if(r[g]<0)throw Error(`Shapes can not be < 0. Found ${r[g]} at dim ${g}`);if(d===-1){if(l>0&&l!==h)throw Error(`Size(${l}) must match the product of shape ${r}`);return r}if(h===0)throw Error(`Cannot infer the missing size in [${r}] when there are 0 elements`);if(l%h!==0)throw Error(`The implicit shape can't be a fractional number. Got ${l} / ${h}`);const f=r.slice();return f[d]=l/h,f}function ht(r,l){const h=l.length;return r=r==null?l.map((d,f)=>f):[].concat(r),Z(r.every(d=>d>=-h&&d`All values in axis param must be in range [-${h}, ${h}) but got axis ${r}`),Z(r.every(d=>Qt(d)),()=>`All values in axis param must be integers but got axis ${r}`),r.map(d=>d<0?h+d:d)}function PR(r,l){const h=[],d=[],f=l!=null&&Array.isArray(l)&&l.length===0,g=l==null||f?null:ht(l,r).sort();let S=0;for(let L=0;LL)&&r[L]===1&&(h.push(r[L]),d.push(L)),g[S]<=L&&S++}r[L]!==1&&(h.push(r[L]),d.push(L))}return{newShape:h,keptDims:d}}function zR(r,l){let h=null;if(r==null||r==="float32")h=new Float32Array(l);else if(r==="int32")h=new Int32Array(l);else if(r==="bool")h=new Uint8Array(l);else if(r==="string")h=new Array(l);else throw new Error(`Unknown data type ${r}`);return h}function JX(r,l){for(let h=0;hl+=h.length),l}function Au(r){return typeof r=="string"||r instanceof String}function ZX(r){return typeof r=="boolean"}function QX(r){return typeof r=="number"}function vu(r){return Array.isArray(r)?vu(r[0]):r instanceof Float32Array?"float32":r instanceof Int32Array||r instanceof Uint8Array?"int32":QX(r)?"float32":Au(r)?"string":ZX(r)?"bool":"float32"}function lI(r){return!!(r&&r.constructor&&r.call&&r.apply)}function Nu(r){const l=r.length;if(l<2)return[];const h=new Array(l-1);h[l-2]=r[l-1];for(let d=l-3;d>=0;--d)h[d]=h[d+1]*r[d+1];return h}function $f(r,l){if(l==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(r)&&(r=Fc(r)),Es().getBool("DEBUG")&&JX(r,l),eJ(r,l))return r;if(l==null||l==="float32"||l==="complex64")return new Float32Array(r);if(l==="int32")return new Int32Array(r);if(l==="bool"){const h=new Uint8Array(r.length);for(let d=0;dL*x);for(let L=0;Ld*f);if(h===0)return[];if(h!==l.length)throw new Error(`[${r}] does not match the input size ${l.length}.`);return YR(0,r,l)}function eJ(r,l){return r instanceof Float32Array&&l==="float32"||r instanceof Int32Array&&l==="int32"||r instanceof Uint8Array&&l==="bool"}function Uf(r,l){const h=ra(r,l);for(let d=0;d{Z(Number.isInteger(l)&&l>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${r}].`)})}function qR(r,l="utf-8"){return l=l||"utf-8",Es().platform.encode(r,l)}function dI(r,l="utf-8"){return l=l||"utf-8",Es().platform.decode(r,l)}class jR{constructor(r,l){this.backendTimer=r,this.logger=l,l==null&&(this.logger=new nJ)}profileKernel(r,l,h){let d;const f=()=>{d=h()},g=this.backendTimer.time(f);for(let L=0;L{tJ(A,x.dtype,r)})}const S={kernelName:r,outputs:d,inputs:l,timeMs:g.then(L=>L.kernelMs),extraInfo:g.then(L=>L.getExtraProfileInfo!=null?L.getExtraProfileInfo():"")};return S}logKernelProfile(r){const{kernelName:l,outputs:h,timeMs:d,inputs:f,extraInfo:g}=r;h.forEach(S=>{Promise.all([S.data(),d,g]).then(L=>{this.logger.logKernelProfile(l,S,L[0],L[1],f,L[2])})})}}function tJ(r,l,h){if(l!=="float32")return!1;for(let d=0;d0?ne:""} `}}console.log(`%c${L} %c${S} %c${x}D ${O} %c${A} %c${C} %c${g}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function KR(r,l,h){const d={},f={};for(let x=0;xd[te.id]=!0),z=!0,f[A.id]=!0;break}if(z)break}}const g={};g[h.id]=!0;const S={};for(let x=r.length-1;x>=0;x--){const A=r[x],O=A.inputs;for(let C=0;C=0;f--){const g=l[f],S=[];if(g.outputs.forEach(x=>{const A=r[x.id];A!=null?S.push(A):S.push(null)}),g.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${g.kernelName}.`);const L=g.gradient(S);for(const x in g.inputs){if(!(x in L))throw new Error(`Cannot backprop through input ${x}. Available gradients found: ${Object.keys(L)}.`);const A=h(()=>L[x]());if(A.dtype!=="float32")throw new Error(`Error in gradient for op ${g.kernelName}. The gradient of input ${x} must have 'float32' dtype, but has '${A.dtype}'`);const O=g.inputs[x];if(!ia(A.shape,O.shape))throw new Error(`Error in gradient for op ${g.kernelName}. The gradient of input '${x}' has shape '${A.shape}', which does not match the shape of the input '${O.shape}'`);if(r[O.id]==null)r[O.id]=A;else{const C=r[O.id];r[O.id]=d(C,A),C.dispose()}}}}const JR=20,Cu=3,pI=7;function ZR(r,l,h,d){const f=Nu(l),g=sJ(r,l,h,f),S=l.length,L=Mf(r,l,h,f,g),x=["Tensor"];return d&&(x.push(` dtype: ${h}`),x.push(` rank: ${S}`),x.push(` shape: [${l}]`),x.push(" values:")),x.push(L.map(A=>" "+A).join(` +`,J7=Am(X7),Z7={kernelName:va,backendName:"webgl",kernelFunc:J7};const Q7="return x * x;",eX=Am(Q7),tX={kernelName:fd,backendName:"webgl",kernelFunc:eX};const qC="return (a - b) * (a - b);",nX=xS(qC,qC),sX={kernelName:Na,backendName:"webgl",kernelFunc:nX};const iX="return tan(x);",rX=Am(iX),oX={kernelName:Ca,backendName:"webgl",kernelFunc:rX};const aX={kernelName:zl,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{perm:i}=t,o=n,a=s.shape.length,c=new Array(a);for(let p=0;p{vc(EX,{isNodejs:()=>DX});function DX(){return typeof global=="object"&&!0&&typeof o2!="undefined"&&typeof process!="undefined"&&!!process.version}});function gr(r,l,h=!1){if(r.beginPath(),l.slice(1).forEach(({x:d,y:f},g)=>{const S=l[g];r.moveTo(S.x,S.y),r.lineTo(d,f)}),h){const d=l[l.length-1],f=l[0];if(!d||!f)return;r.moveTo(d.x,d.y),r.lineTo(f.x,f.y)}r.stroke()}class ms{constructor(r,l){if(!gi(r)||!gi(l))throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({width:r,height:l})}`);this._width=r,this._height=l}get width(){return this._width}get height(){return this._height}reverse(){return new ms(1/this.width,1/this.height)}}const US={};vc(US,{computeReshapedDimensions:()=>PS,getCenterPoint:()=>ta,isDimensions:()=>Dm,isEven:()=>Em,isFloat:()=>MS,isTensor:()=>Qo,isTensor1D:()=>kX,isTensor2D:()=>BS,isTensor3D:()=>yr,isTensor4D:()=>Os,isValidNumber:()=>gi,isValidProbablitiy:()=>Rc,range:()=>zi,round:()=>ea});const c2=Ye(Je());function Qo(r,l){return r instanceof c2.Tensor&&r.shape.length===l}function kX(r){return Qo(r,1)}function BS(r){return Qo(r,2)}function yr(r){return Qo(r,3)}function Os(r){return Qo(r,4)}function MS(r){return r%1!==0}function Em(r){return r%2===0}function ea(r,l=2){const h=Math.pow(10,l);return Math.floor(r*h)/h}function Dm(r){return r&&r.width&&r.height}function PS({width:r,height:l},h){const d=h/Math.max(l,r);return new ms(Math.round(r*d),Math.round(l*d))}function ta(r){return r.reduce((l,h)=>l.add(h),new Ze(0,0)).div(new Ze(r.length,r.length))}function zi(r,l,h){return Array(r).fill(0).map((d,f)=>l+f*h)}function gi(r){return!!r&&r!==Infinity&&r!==-Infinity&&!isNaN(r)||r===0}function Rc(r){return gi(r)&&0<=r&&r<=1}class Ze{constructor(r,l){this._x=r,this._y=l}get x(){return this._x}get y(){return this._y}add(r){return new Ze(this.x+r.x,this.y+r.y)}sub(r){return new Ze(this.x-r.x,this.y-r.y)}mul(r){return new Ze(this.x*r.x,this.y*r.y)}div(r){return new Ze(this.x/r.x,this.y/r.y)}abs(){return new Ze(Math.abs(this.x),Math.abs(this.y))}magnitude(){return Math.sqrt(Math.pow(this.x,2)+Math.pow(this.y,2))}floor(){return new Ze(Math.floor(this.x),Math.floor(this.y))}}class Ct{static isRect(r){return!!r&&[r.x,r.y,r.width,r.height].every(gi)}static assertIsValidBox(r,l,h=!1){if(!Ct.isRect(r))throw new Error(`${l} - invalid box: ${JSON.stringify(r)}, expected object with properties x, y, width, height`);if(!h&&(r.width<0||r.height<0))throw new Error(`${l} - width (${r.width}) and height (${r.height}) must be positive numbers`)}constructor(r,l=!0){const h=r||{},d=[h.left,h.top,h.right,h.bottom].every(gi),f=[h.x,h.y,h.width,h.height].every(gi);if(!f&&!d)throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(h)}`);const[g,S,L,x]=f?[h.x,h.y,h.width,h.height]:[h.left,h.top,h.right-h.left,h.bottom-h.top];Ct.assertIsValidBox({x:g,y:S,width:L,height:x},"Box.constructor",l),this._x=g,this._y=S,this._width=L,this._height=x}get x(){return this._x}get y(){return this._y}get width(){return this._width}get height(){return this._height}get left(){return this.x}get top(){return this.y}get right(){return this.x+this.width}get bottom(){return this.y+this.height}get area(){return this.width*this.height}get topLeft(){return new Ze(this.left,this.top)}get topRight(){return new Ze(this.right,this.top)}get bottomLeft(){return new Ze(this.left,this.bottom)}get bottomRight(){return new Ze(this.right,this.bottom)}round(){const[r,l,h,d]=[this.x,this.y,this.width,this.height].map(f=>Math.round(f));return new Ct({x:r,y:l,width:h,height:d})}floor(){const[r,l,h,d]=[this.x,this.y,this.width,this.height].map(f=>Math.floor(f));return new Ct({x:r,y:l,width:h,height:d})}toSquare(){let{x:r,y:l,width:h,height:d}=this;const f=Math.abs(h-d);return hl&&(S=-O+l+h,O=l),C>r&&(L=-C+r+d,C=r),x<1&&(L=2-x,x=1),A<1&&(L=2-A,A=1),{dy:g,edy:L,dx:f,edx:S,y:A,ey:C,x,ex:O,w:h,h:d}}calibrate(r){return new Ct({left:this.left+r.left*this.width,top:this.top+r.top*this.height,right:this.right+r.right*this.width,bottom:this.bottom+r.bottom*this.height}).toSquare().round()}}class wu extends Ct{constructor(r,l,h,d,f=!1){super({left:r,top:l,right:h,bottom:d},f)}}class Oc{constructor(r,l,h,d,f){this._imageDims=new ms(f.width,f.height),this._score=r,this._classScore=l,this._className=h,this._box=new Ct(d).rescale(this._imageDims)}get score(){return this._score}get classScore(){return this._classScore}get className(){return this._className}get box(){return this._box}get imageDims(){return this._imageDims}get imageWidth(){return this.imageDims.width}get imageHeight(){return this.imageDims.height}get relativeBox(){return new Ct(this._box).rescale(this.imageDims.reverse())}forSize(r,l){return new Oc(this.score,this.classScore,this.className,this.relativeBox,{width:r,height:l})}}class Yt extends Oc{constructor(r,l,h){super(r,r,"",l,h)}forSize(r,l){const{score:h,relativeBox:d,imageDims:f}=super.forSize(r,l);return new Yt(h,d,f)}}function zS(r,l,h=!0){const d=Math.max(0,Math.min(r.right,l.right)-Math.max(r.left,l.left)),f=Math.max(0,Math.min(r.bottom,l.bottom)-Math.max(r.top,l.top)),g=d*f;return h?g/(r.area+l.area-g):g/Math.min(r.area,l.area)}function GS(r){const l=r.map(L=>L.x),h=r.map(L=>L.y),d=l.reduce((L,x)=>xxLL({score:S,boxIndex:L})).sort((S,L)=>S.score-L.score).map(S=>S.boxIndex);const g=[];for(;f.length>0;){const S=f.pop();g.push(S);const L=f,x=[];for(let A=0;Ax[O]<=h)}return g}const Gi=Ye(Je());function yi(r,l){return Gi.tidy(()=>{const[h,d,f]=l,g=Gi.fill([...r.shape.slice(0,3),1],h,"float32"),S=Gi.fill([...r.shape.slice(0,3),1],d,"float32"),L=Gi.fill([...r.shape.slice(0,3),1],f,"float32"),x=Gi.concat([g,S,L],3);return Gi.sub(r,x)})}const ro=Ye(Je());function HS(r,l=!1){return ro.tidy(()=>{const[h,d]=r.shape.slice(1);if(h===d)return r;const f=Math.abs(h-d),g=Math.round(f*(l?.5:1)),S=h>d?2:1,L=$=>{const z=r.shape.slice();return z[S]=$,ro.fill(z,0,"float32")},x=L(g),A=f-x.shape[S],O=l&&A?L(A):null,C=[O,r,x].filter($=>!!$).map($=>ro.cast($,"float32"));return ro.concat(C,S)})}function FX(r){const l=r.slice();for(let h=l.length-1;h>0;h--){const d=Math.floor(Math.random()*(h+1)),f=l[h];l[h]=l[d],l[d]=f}return l}function Lu(r){return 1/(1+Math.exp(-r))}function _X(r){return Math.log(r/(1-r))}class Su extends Ct{constructor(r,l,h,d,f=!1){super({x:r,y:l,width:h,height:d},f)}}const WX=.5,$X=.43,UX=.45;class qs{constructor(r,l,h=new Ze(0,0)){const{width:d,height:f}=l;this._imgDims=new ms(d,f),this._shift=h,this._positions=r.map(g=>g.mul(new Ze(d,f)).add(h))}get shift(){return new Ze(this._shift.x,this._shift.y)}get imageWidth(){return this._imgDims.width}get imageHeight(){return this._imgDims.height}get positions(){return this._positions}get relativePositions(){return this._positions.map(r=>r.sub(this._shift).div(new Ze(this.imageWidth,this.imageHeight)))}forSize(r,l){return new this.constructor(this.relativePositions,{width:r,height:l})}shiftBy(r,l){return new this.constructor(this.relativePositions,this._imgDims,new Ze(r,l))}shiftByPoint(r){return this.shiftBy(r.x,r.y)}align(r,l={}){if(r){const f=r instanceof Yt?r.box.floor():new Ct(r);return this.shiftBy(f.x,f.y).align(null,l)}const{useDlibAlignment:h,minBoxPadding:d}=Object.assign({},{useDlibAlignment:!1,minBoxPadding:.2},l);return h?this.alignDlib():this.alignMinBbox(d)}alignDlib(){const r=this.getRefPointsForAlignment(),[l,h,d]=r,f=O=>d.sub(O).magnitude(),g=(f(l)+f(h))/2,S=Math.floor(g/UX),L=ta(r),x=Math.floor(Math.max(0,L.x-WX*S)),A=Math.floor(Math.max(0,L.y-$X*S));return new Su(x,A,Math.min(S,this.imageWidth+x),Math.min(S,this.imageHeight+A))}alignMinBbox(r){const l=GS(this.positions);return l.pad(l.width*r,l.height*r)}getRefPointsForAlignment(){throw new Error("getRefPointsForAlignment not implemented by base class")}}class BX extends qs{getRefPointsForAlignment(){const r=this.positions;return[r[0],r[1],ta([r[3],r[4]])]}}class Iu extends qs{getJawOutline(){return this.positions.slice(0,17)}getLeftEyeBrow(){return this.positions.slice(17,22)}getRightEyeBrow(){return this.positions.slice(22,27)}getNose(){return this.positions.slice(27,36)}getLeftEye(){return this.positions.slice(36,42)}getRightEye(){return this.positions.slice(42,48)}getMouth(){return this.positions.slice(48,68)}getRefPointsForAlignment(){return[this.getLeftEye(),this.getRightEye(),this.getMouth()].map(ta)}}class km{constructor(r,l){this._label=r,this._distance=l}get label(){return this._label}get distance(){return this._distance}toString(r=!0){return`${this.label}${r?` (${ea(this.distance)})`:""}`}}class Fm extends Ct{static assertIsValidLabeledBox(r,l){if(Ct.assertIsValidBox(r,l),!gi(r.label))throw new Error(`${l} - expected property label (${r.label}) to be a number`)}constructor(r,l){super(r);this._label=l}get label(){return this._label}}class na{constructor(r,l){if(!(typeof r=="string"))throw new Error("LabeledFaceDescriptors - constructor expected label to be a string");if(!Array.isArray(l)||l.some(h=>!(h instanceof Float32Array)))throw new Error("LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array");this._label=r,this._descriptors=l}get label(){return this._label}get descriptors(){return this._descriptors}toJSON(){return{label:this.label,descriptors:this.descriptors.map(r=>Array.from(r))}}static fromJSON(r){const l=r.descriptors.map(h=>new Float32Array(h));return new na(r.label,l)}}class MX extends Fm{static assertIsValidPredictedBox(r,l){if(Fm.assertIsValidLabeledBox(r,l),!Rc(r.score)||!Rc(r.classScore))throw new Error(`${l} - expected properties score (${r.score}) and (${r.classScore}) to be a number between [0, 1]`)}constructor(r,l,h,d){super(r,l);this._score=h,this._classScore=d}get score(){return this._score}get classScore(){return this._classScore}}function Vi(r){return r.detection instanceof Yt}function sa(r,l){const h={detection:l};return Object.assign({},r,h)}function YS(){const r=window.fetch||function(){throw new Error("fetch - missing fetch implementation for browser environment")},l=function(){throw new Error("readFile - filesystem not available for browser environment")};return{Canvas:HTMLCanvasElement,CanvasRenderingContext2D,Image:HTMLImageElement,ImageData,Video:HTMLVideoElement,createCanvasElement:()=>document.createElement("canvas"),createImageElement:()=>document.createElement("img"),fetch:r,readFile:l}}function _m(r){let l="";if(!r)try{r=require("fs")}catch(d){l=d.toString()}const h=r?function(d){return new Promise((f,g)=>{r.readFile(d,function(S,L){return S?g(S):f(L)})})}:function(){throw new Error(`readFile - failed to require fs in nodejs environment with error: ${l}`)};return{readFile:h}}function qS(){const r=global.Canvas||global.HTMLCanvasElement,l=global.Image||global.HTMLImageElement,h=function(){if(r)return new r;throw new Error("createCanvasElement - missing Canvas implementation for nodejs environment")},d=function(){if(l)return new l;throw new Error("createImageElement - missing Image implementation for nodejs environment")},f=global.fetch||function(){throw new Error("fetch - missing fetch implementation for nodejs environment")},g=_m();return{Canvas:r||class{},CanvasRenderingContext2D:global.CanvasRenderingContext2D||class{},Image:l||class{},ImageData:global.ImageData||class{},Video:global.HTMLVideoElement||class{},createCanvasElement:h,createImageElement:d,fetch:f,...g}}function jS(){return typeof window=="object"&&typeof document!="undefined"&&typeof HTMLImageElement!="undefined"&&typeof HTMLCanvasElement!="undefined"&&typeof HTMLVideoElement!="undefined"&&typeof ImageData!="undefined"&&typeof CanvasRenderingContext2D!="undefined"}const KS=Ye(a2());let yn;function PX(){if(!yn)throw new Error("getEnv - environment is not defined, check isNodejs() and isBrowser()");return yn}function XS(r){yn=r}function JS(){if(jS())return XS(YS());if(KS.isNodejs())return XS(qS())}function zX(r){if(yn||JS(),!yn)throw new Error("monkeyPatch - environment is not defined, check isNodejs() and isBrowser()");const{Canvas:l=yn.Canvas,Image:h=yn.Image}=r;yn.Canvas=l,yn.Image=h,yn.createCanvasElement=r.createCanvasElement||(()=>new l),yn.createImageElement=r.createImageElement||(()=>new h),yn.ImageData=r.ImageData||yn.ImageData,yn.Video=r.Video||yn.Video,yn.fetch=r.fetch||yn.fetch,yn.readFile=r.readFile||yn.readFile}const gt={getEnv:PX,setEnv:XS,initialize:JS,createBrowserEnv:YS,createFileSystem:_m,createNodejsEnv:qS,monkeyPatch:zX,isBrowser:jS,isNodejs:KS.isNodejs};JS();function ia(r){return!gt.isNodejs()&&typeof r=="string"?document.getElementById(r):r}function es(r){const{Canvas:l,CanvasRenderingContext2D:h}=gt.getEnv();if(r instanceof h)return r;const d=ia(r);if(!(d instanceof l))throw new Error("resolveContext2d - expected canvas to be of instance of Canvas");const f=d.getContext("2d");if(!f)throw new Error("resolveContext2d - canvas 2d context is null");return f}var Hi;(function(r){r.TOP_LEFT="TOP_LEFT",r.TOP_RIGHT="TOP_RIGHT",r.BOTTOM_LEFT="BOTTOM_LEFT",r.BOTTOM_RIGHT="BOTTOM_RIGHT"})(Hi||(Hi={}));class Wm{constructor(r={}){const{anchorPosition:l,backgroundColor:h,fontColor:d,fontSize:f,fontStyle:g,padding:S}=r;this.anchorPosition=l||Hi.TOP_LEFT,this.backgroundColor=h||"rgba(0, 0, 0, 0.5)",this.fontColor=d||"rgba(255, 255, 255, 1)",this.fontSize=f||14,this.fontStyle=g||"Georgia",this.padding=S||4}}class Ec{constructor(r,l,h={}){this.text=typeof r=="string"?[r]:r instanceof Ec?r.text:r,this.anchor=l,this.options=new Wm(h)}measureWidth(r){const{padding:l}=this.options;return this.text.map(h=>r.measureText(h).width).reduce((h,d)=>h{const z=L+O.x,ne=L+O.y+($+1)*g;h.fillText(C,z,ne)})}}class l2{constructor(r={}){const{boxColor:l,lineWidth:h,label:d,drawLabelOptions:f}=r;this.boxColor=l||"rgba(0, 0, 255, 1)",this.lineWidth=h||2,this.label=d;const g={anchorPosition:Hi.BOTTOM_LEFT,backgroundColor:this.boxColor};this.drawLabelOptions=new Wm(Object.assign({},g,f))}}class ZS{constructor(r,l={}){this.box=new Ct(r),this.options=new l2(l)}draw(r){const l=es(r),{boxColor:h,lineWidth:d}=this.options,{x:f,y:g,width:S,height:L}=this.box;l.strokeStyle=h,l.lineWidth=d,l.strokeRect(f,g,S,L);const{label:x}=this.options;x&&new Ec([x],{x:f-d/2,y:g},this.options.drawLabelOptions).draw(r)}}function GX(r,l){const h=Array.isArray(l)?l:[l];h.forEach(d=>{const f=d instanceof Yt?d.score:Vi(d)?d.detection.score:void 0,g=d instanceof Yt?d.box:Vi(d)?d.detection.box:new Ct(d),S=f?`${ea(f)}`:void 0;new ZS(g,{label:S}).draw(r)})}function xu(r){const{Image:l,Video:h}=gt.getEnv();return r instanceof l&&r.complete||r instanceof h&&r.readyState>=3}function QS(r){return new Promise((l,h)=>{if(r instanceof gt.getEnv().Canvas||xu(r))return l(null);function d(g){if(!g.currentTarget)return;g.currentTarget.removeEventListener("load",d),g.currentTarget.removeEventListener("error",f),l(g)}function f(g){if(!g.currentTarget)return;g.currentTarget.removeEventListener("load",d),g.currentTarget.removeEventListener("error",f),h(g)}r.addEventListener("load",d),r.addEventListener("error",f)})}function eI(r){return new Promise((l,h)=>{if(!(r instanceof Blob))return h("bufferToImage - expected buf to be of type: Blob");const d=new FileReader;d.onload=()=>{if(typeof d.result!="string")return h("bufferToImage - expected reader.result to be a string, in onload");const f=gt.getEnv().createImageElement();f.onload=()=>l(f),f.onerror=h,f.src=d.result},d.onerror=h,d.readAsDataURL(r)})}function ra(r){const{Image:l,Video:h}=gt.getEnv();return r instanceof l?new ms(r.naturalWidth,r.naturalHeight):r instanceof h?new ms(r.videoWidth,r.videoHeight):new ms(r.width,r.height)}function Dc({width:r,height:l}){const{createCanvasElement:h}=gt.getEnv(),d=h();return d.width=r,d.height=l,d}function Tu(r,l){const{ImageData:h}=gt.getEnv();if(!(r instanceof h)&&!xu(r))throw new Error("createCanvasFromMedia - media has not finished loading yet");const{width:d,height:f}=l||ra(r),g=Dc({width:d,height:f});return r instanceof h?es(g).putImageData(r,0,0):es(g).drawImage(r,0,0,d,f),g}const $m=Ye(Je());async function tI(r,l){const h=l||gt.getEnv().createCanvasElement(),[d,f,g]=r.shape.slice(Os(r)?1:0),S=$m.tidy(()=>r.as3D(d,f,g).toInt());return await $m.browser.toPixels(S,h),S.dispose(),h}function Um(r){const{Image:l,Canvas:h,Video:d}=gt.getEnv();return r instanceof l||r instanceof h||r instanceof d}const VX=1e-7,HX=1e-4;class h2{time(r){return ie("time")}read(r){return ie("read")}readSync(r){return ie("readSync")}numDataIds(){return ie("numDataIds")}disposeData(r){return ie("disposeData")}write(r,l,h){return ie("write")}move(r,l,h,d){return ie("move")}memory(){return ie("memory")}floatPrecision(){return ie("floatPrecision")}epsilon(){return this.floatPrecision()===32?VX:HX}batchMatMul(r,l,h,d){return ie("batchMatMul")}fusedBatchMatMul({a:r,b:l,transposeA:h,transposeB:d,bias:f,activation:g,preluActivationWeights:S}){return ie("fusedBatchMatMul")}slice(r,l,h){return ie("slice")}stridedSlice(r,l,h,d){return ie("stridedSlice")}unstack(r,l){return ie("unstack")}reverse(r,l){return ie("reverse")}concat(r,l){return ie("concat")}neg(r){return ie("neg")}add(r,l){return ie("add")}addN(r){return ie("addN")}subtract(r,l){return ie("subtract")}multiply(r,l){return ie("multiply")}realDivide(r,l){return ie("realDivide")}floorDiv(r,l){return ie("floorDiv")}sum(r,l){return ie("sum")}prod(r,l){return ie("prod")}unsortedSegmentSum(r,l,h){return ie("unsortedSegmentSum")}argMin(r,l){return ie("argMin")}argMax(r,l){return ie("argMax")}equal(r,l){return ie("equal")}notEqual(r,l){return ie("notEqual")}less(r,l){return ie("less")}lessEqual(r,l){return ie("lessEqual")}greater(r,l){return ie("greater")}greaterEqual(r,l){return ie("greaterEqual")}logicalNot(r){return ie("logicalNot")}logicalAnd(r,l){return ie("logicalAnd")}logicalOr(r,l){return ie("logicalOr")}where(r){return ie("where")}select(r,l,h){return ie("select")}topk(r,l,h){return ie("topk")}min(r,l){return ie("min")}minimum(r,l){return ie("minimum")}mod(r,l){return ie("mod")}max(r,l){return ie("max")}maximum(r,l){return ie("maximum")}all(r,l){return ie("all")}any(r,l){return ie("any")}squaredDifference(r,l){return ie("squaredDifference")}ceil(r){return ie("ceil")}floor(r){return ie("floor")}round(r){return ie("round")}sign(r){return ie("sign")}isNaN(r){return ie("isNaN")}isInf(r){return ie("isInf")}isFinite(r){return ie("isFinite")}pow(r,l){return ie("pow")}exp(r){return ie("exp")}expm1(r){return ie("expm1")}softmax(r,l){return ie("softmax")}log(r){return ie("log")}log1p(r){return ie("log1p")}sqrt(r){return ie("sqrt")}rsqrt(r){return ie("rsqrt")}square(r){return ie("square")}reciprocal(r){return ie("reciprocal")}relu(r){return ie("relu")}relu6(r){return ie("relu6")}prelu(r,l){return ie("prelu")}elu(r){return ie("elu")}eluDer(r,l){return ie("eluDer")}selu(r){return ie("selu")}int(r){return ie("int")}clip(r,l,h){return ie("clip")}abs(r){return ie("abs")}complexAbs(r){return ie("complexAbs")}sigmoid(r){return ie("sigmoid")}softplus(r){return ie("softplus")}sin(r){return ie("sin")}cos(r){return ie("cos")}tan(r){return ie("tan")}asin(r){return ie("asin")}acos(r){return ie("acos")}atan(r){return ie("atan")}atan2(r,l){return ie("atan2")}sinh(r){return ie("sinh")}cosh(r){return ie("cosh")}tanh(r){return ie("tanh")}asinh(r){return ie("asinh")}acosh(r){return ie("acosh")}atanh(r){return ie("atanh")}erf(r){return ie("erf")}step(r,l){return ie("step")}fusedConv2d({input:r,filter:l,convInfo:h,bias:d,activation:f,preluActivationWeights:g}){return ie("fusedConv2d")}conv2d(r,l,h){return ie("conv2d")}conv2dDerInput(r,l,h){return ie("conv2dDerInput")}conv2dDerFilter(r,l,h){return ie("conv2dDerFilter")}fusedDepthwiseConv2D({input:r,filter:l,convInfo:h,bias:d,activation:f,preluActivationWeights:g}){return ie("fusedDepthwiseConv2D")}depthwiseConv2D(r,l,h){return ie("depthwiseConv2D")}depthwiseConv2DDerInput(r,l,h){return ie("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(r,l,h){return ie("depthwiseConv2DDerFilter")}conv3d(r,l,h){return ie("conv3d")}conv3dDerInput(r,l,h){return ie("conv3dDerInput")}conv3dDerFilter(r,l,h){return ie("conv3dDerFilter")}maxPool(r,l){return ie("maxPool")}maxPoolBackprop(r,l,h,d){return ie("maxPoolBackprop")}avgPool(r,l){return ie("avgPool")}avgPoolBackprop(r,l,h){return ie("avgPoolBackprop")}avgPool3d(r,l){return ie("avgPool3d")}avgPool3dBackprop(r,l,h){return ie("avgPool3dBackprop")}maxPool3d(r,l){return ie("maxPool3d")}maxPool3dBackprop(r,l,h,d){return ie("maxPool3dBackprop")}reshape(r,l){return ie("reshape")}cast(r,l){return ie("cast")}tile(r,l){return ie("tile")}pad(r,l,h){return ie("pad")}transpose(r,l){return ie("transpose")}gather(r,l,h){return ie("gather")}gatherND(r,l){return ie("gatherND")}scatterND(r,l,h){return ie("scatterND")}batchToSpaceND(r,l,h){return ie("batchToSpaceND")}spaceToBatchND(r,l,h){return ie("spaceToBatchND")}resizeBilinear(r,l,h,d){return ie("resizeBilinear")}resizeBilinearBackprop(r,l,h){return ie("resizeBilinearBackprop")}resizeNearestNeighbor(r,l,h,d){return ie("resizeNearestNeighbor")}resizeNearestNeighborBackprop(r,l,h){return ie("resizeNearestNeighborBackprop")}batchNorm(r,l,h,d,f,g){return ie("batchNorm")}localResponseNormalization4D(r,l,h,d,f){return ie("localResponseNormalization4D")}LRNGrad(r,l,h,d,f,g,S){return ie("LRNGrad")}multinomial(r,l,h,d){return ie("multinomial")}oneHot(r,l,h,d){return ie("oneHot")}cumsum(r,l,h,d){return ie("cumsum")}nonMaxSuppression(r,l,h,d,f){return ie("nonMaxSuppression")}fft(r){return ie("fft")}ifft(r){return ie("ifft")}complex(r,l){return ie("complex")}real(r){return ie("real")}imag(r){return ie("imag")}cropAndResize(r,l,h,d,f,g){return ie("cropAndResize")}depthToSpace(r,l,h){return ie("depthToSpace")}split(r,l,h){return ie("split")}sparseToDense(r,l,h,d){return ie("sparseToDense")}diag(r){return ie("diag")}fill(r,l,h){return ie("fill")}onesLike(r){return ie("onesLike")}zerosLike(r){return ie("zerosLike")}linspace(r,l,h){return ie("linspace")}dispose(){return ie("dispose")}}function ie(r){throw new Error(`'${r}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}const u2="tfjsflags";class d2{constructor(r){this.global=r,this.flags={},this.flagRegistry={},this.urlFlags={},this.populateURLFlags()}setPlatform(r,l){this.platform!=null&&console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${l}.`),this.platformName=r,this.platform=l}registerFlag(r,l,h){if(this.flagRegistry[r]={evaluationFn:l,setHook:h},this.urlFlags[r]!=null){const d=this.urlFlags[r];console.warn(`Setting feature override from URL ${r}: ${d}.`),this.set(r,d)}}async getAsync(r){return r in this.flags?this.flags[r]:(this.flags[r]=await this.evaluateFlag(r),this.flags[r])}get(r){if(r in this.flags)return this.flags[r];const l=this.evaluateFlag(r);if(l instanceof Promise)throw new Error(`Flag ${r} cannot be synchronously evaluated. Please use getAsync() instead.`);return this.flags[r]=l,this.flags[r]}getNumber(r){return this.get(r)}getBool(r){return this.get(r)}getFlags(){return this.flags}get features(){return this.flags}set(r,l){if(this.flagRegistry[r]==null)throw new Error(`Cannot set flag ${r} as it has not been registered.`);this.flags[r]=l,this.flagRegistry[r].setHook!=null&&this.flagRegistry[r].setHook(l)}evaluateFlag(r){if(this.flagRegistry[r]==null)throw new Error(`Cannot evaluate flag '${r}': no evaluation function found.`);return this.flagRegistry[r].evaluationFn()}setFlags(r){this.flags=Object.assign({},r)}reset(){this.flags={},this.urlFlags={},this.populateURLFlags()}populateURLFlags(){if(typeof this.global=="undefined"||typeof this.global.location=="undefined"||typeof this.global.location.search=="undefined")return;const r=YX(this.global.location.search);if(u2 in r){const l=r[u2].split(",");l.forEach(h=>{const[d,f]=h.split(":");this.urlFlags[d]=qX(d,f)})}}}function YX(r){const l={};return r.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(h,...d)=>(jX(l,d[0],d[1]),d.join("="))),l}function jX(r,l,h){r[decodeURIComponent(l)]=decodeURIComponent(h||"")}function qX(r,l){if(l=l.toLowerCase(),l==="true"||l==="false")return l==="true";if(`${+l}`===l)return+l;throw new Error(`Could not parse value flag value ${l} for flag ${r}.`)}function Es(){return p2}let p2=null;function m2(r){p2=r}let nI;function sI(){if(nI==null){let r;if(typeof window!="undefined")r=window;else if(typeof global!="undefined")r=global;else if(typeof process!="undefined")r=process;else if(typeof self!="undefined")r=self;else throw new Error("Could not find a global object");nI=r}return nI}function KX(){const r=sI();return r._tfGlobals==null&&(r._tfGlobals=new Map),r._tfGlobals}function iI(r,l){const h=KX();if(h.has(r))return h.get(r);{const d=l();return h.set(r,d),h.get(r)}}const Bm="Abs",f2="Acos",g2="Acosh",kc="Add",y2="AddN",b2="ArgMax",w2="ArgMin",L2="Asin",S2="Asinh",I2="Atan",x2="Atanh",T2="Atan2",A2="AvgPool",v2="AvgPoolBackprop",N2="AvgPool3D",C2="AvgPool3DBackprop",Mm="BatchMatMul",Pm="BatchToSpaceND",zm="BroadcastTo",Fc="Cast",R2="Ceil",O2="ClipByValue",E2="Complex",Gm="Concat",Vm="Conv2D",D2="Conv2DBackpropFilter",Hm="Conv2DBackpropInput",k2="Conv3D",F2="Conv3DBackpropFilterV2",_2="Conv3DBackpropInputV2",Ym="Cos",qm="Cosh",jm="Cumsum",W2="CropAndResize",$2="DepthwiseConv2dNative",U2="DepthwiseConv2dNativeBackpropFilter",B2="DepthwiseConv2dNativeBackpropInput",M2="Dilation2D",P2="Dilation2DBackpropInput",z2="Dilation2DBackpropFilter",Km="Div",G2="Elu",V2="EluGrad",H2="Erf",Y2="Equal",Xm="Exp",q2="Expm1",j2="FFT",K2="Fill",X2="FlipLeftRight",Jm="Floor",Zm="FloorDiv",J2="FusedBatchNorm",Qm="GatherV2",Z2="Greater",ef="GreaterEqual",tf="Identity",Q2="IFFT",eR="Imag",tR="IsFinite",nR="IsInf",sR="IsNan",iR="Less",rR="LessEqual",nf="Log",sf="Log1p",oR="LogicalAnd",aR="LogicalNot",cR="LogSoftmax",lR="LRN",hR="LRNBackprop",rf="Max",of="Maximum",uR="MaxPool",dR="MaxPoolBackprop",pR="MaxPool3D",mR="MaxPool3DBackprop",af="Min",cf="Minimum",fR="Mod",lf="Multiply",hf="Negate",gR="NotEqual",yR="NonMaxSuppressionV3",bR="NonMaxSuppressionV4",wR="NonMaxSuppressionV5",LR="OnesLike",SR="OneHot",uf="PadV2",df="Pow",IR="Prelu",xR="Range",TR="Real",AR="Reciprocal",pf="Relu",mf="Reshape",ff="ResizeNearestNeighbor",vR="ResizeNearestNeighborGrad",gf="ResizeBilinear",NR="ResizeBilinearGrad",CR="Relu6",yf="Reverse",RR="Round",bf="Rsqrt",wf="SelectV2",OR="Selu",Lf="Slice",Sf="Sin",If="Sinh",ER="Sign",xf="Sigmoid",DR="Softplus",Tf="Sqrt",Af="Sum",vf="SpaceToBatchND",Nf="SplitV",kR="Softmax",Cf="SquaredDifference",FR="Square",Rf="Sub",_R="Tan",WR="Tanh",Of="Tile",Ef="Transpose",Df="Unpack",kf="UnsortedSegmentSum",Ff="ZerosLike",_f="Step",rI="FromPixels",$R="RotateWithOffset";const UR=iI("kernelRegistry",()=>new Map),oI=iI("gradRegistry",()=>new Map);function Wf(r,l){const h=XX(r,l);return UR.get(h)}function aI(r){return oI.get(r)}function cI(r){const l=UR.entries(),h=[];for(;;){const{done:d,value:f}=l.next();if(d)break;const[g,S]=f,[L]=g.split("_");L===r&&h.push(S)}return h}function BR(r){const{kernelName:l}=r;oI.has(l)&&(Es().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${l}'`)),oI.set(l,r)}function XX(r,l){return`${l}_${r}`}function Z(r,l){if(!r)throw new Error(typeof l=="string"?l:l())}function Zt(r,l,h=""){Z(oa(r,l),()=>h+` Shapes ${r} and ${l} must match`)}function _c(r){Z(r!=null,()=>"The input to the tensor constructor must be a non-null value.")}function Wc(r,l=[],h=!1){if(l==null&&(l=[]),Array.isArray(r)||Ds(r)&&!h)for(let d=0;d=0)h*=r[g];else if(r[g]===-1){if(d!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${d} and dim ${g}`);d=g}else if(r[g]<0)throw Error(`Shapes can not be < 0. Found ${r[g]} at dim ${g}`);if(d===-1){if(l>0&&l!==h)throw Error(`Size(${l}) must match the product of shape ${r}`);return r}if(h===0)throw Error(`Cannot infer the missing size in [${r}] when there are 0 elements`);if(l%h!==0)throw Error(`The implicit shape can't be a fractional number. Got ${l} / ${h}`);const f=r.slice();return f[d]=l/h,f}function ht(r,l){const h=l.length;return r=r==null?l.map((d,f)=>f):[].concat(r),Z(r.every(d=>d>=-h&&d`All values in axis param must be in range [-${h}, ${h}) but got axis ${r}`),Z(r.every(d=>Qt(d)),()=>`All values in axis param must be integers but got axis ${r}`),r.map(d=>d<0?h+d:d)}function PR(r,l){const h=[],d=[],f=l!=null&&Array.isArray(l)&&l.length===0,g=l==null||f?null:ht(l,r).sort();let S=0;for(let L=0;LL)&&r[L]===1&&(h.push(r[L]),d.push(L)),g[S]<=L&&S++}r[L]!==1&&(h.push(r[L]),d.push(L))}return{newShape:h,keptDims:d}}function zR(r,l){let h=null;if(r==null||r==="float32")h=new Float32Array(l);else if(r==="int32")h=new Int32Array(l);else if(r==="bool")h=new Uint8Array(l);else if(r==="string")h=new Array(l);else throw new Error(`Unknown data type ${r}`);return h}function JX(r,l){for(let h=0;hl+=h.length),l}function Au(r){return typeof r=="string"||r instanceof String}function ZX(r){return typeof r=="boolean"}function QX(r){return typeof r=="number"}function vu(r){return Array.isArray(r)?vu(r[0]):r instanceof Float32Array?"float32":r instanceof Int32Array||r instanceof Uint8Array?"int32":QX(r)?"float32":Au(r)?"string":ZX(r)?"bool":"float32"}function lI(r){return!!(r&&r.constructor&&r.call&&r.apply)}function Nu(r){const l=r.length;if(l<2)return[];const h=new Array(l-1);h[l-2]=r[l-1];for(let d=l-3;d>=0;--d)h[d]=h[d+1]*r[d+1];return h}function $f(r,l){if(l==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(r)&&(r=Wc(r)),Es().getBool("DEBUG")&&JX(r,l),eJ(r,l))return r;if(l==null||l==="float32"||l==="complex64")return new Float32Array(r);if(l==="int32")return new Int32Array(r);if(l==="bool"){const h=new Uint8Array(r.length);for(let d=0;dL*x);for(let L=0;Ld*f);if(h===0)return[];if(h!==l.length)throw new Error(`[${r}] does not match the input size ${l.length}.`);return YR(0,r,l)}function eJ(r,l){return r instanceof Float32Array&&l==="float32"||r instanceof Int32Array&&l==="int32"||r instanceof Uint8Array&&l==="bool"}function Uf(r,l){const h=aa(r,l);for(let d=0;d{Z(Number.isInteger(l)&&l>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${r}].`)})}function qR(r,l="utf-8"){return l=l||"utf-8",Es().platform.encode(r,l)}function dI(r,l="utf-8"){return l=l||"utf-8",Es().platform.decode(r,l)}class jR{constructor(r,l){this.backendTimer=r,this.logger=l,l==null&&(this.logger=new nJ)}profileKernel(r,l,h){let d;const f=()=>{d=h()},g=this.backendTimer.time(f);for(let L=0;L{tJ(A,x.dtype,r)})}const S={kernelName:r,outputs:d,inputs:l,timeMs:g.then(L=>L.kernelMs),extraInfo:g.then(L=>L.getExtraProfileInfo!=null?L.getExtraProfileInfo():"")};return S}logKernelProfile(r){const{kernelName:l,outputs:h,timeMs:d,inputs:f,extraInfo:g}=r;h.forEach(S=>{Promise.all([S.data(),d,g]).then(L=>{this.logger.logKernelProfile(l,S,L[0],L[1],f,L[2])})})}}function tJ(r,l,h){if(l!=="float32")return!1;for(let d=0;d0?ne:""} `}}console.log(`%c${L} %c${S} %c${x}D ${O} %c${A} %c${C} %c${g}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function KR(r,l,h){const d={},f={};for(let x=0;xd[te.id]=!0),z=!0,f[A.id]=!0;break}if(z)break}}const g={};g[h.id]=!0;const S={};for(let x=r.length-1;x>=0;x--){const A=r[x],O=A.inputs;for(let C=0;C=0;f--){const g=l[f],S=[];if(g.outputs.forEach(x=>{const A=r[x.id];A!=null?S.push(A):S.push(null)}),g.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${g.kernelName}.`);const L=g.gradient(S);for(const x in g.inputs){if(!(x in L))throw new Error(`Cannot backprop through input ${x}. Available gradients found: ${Object.keys(L)}.`);const A=h(()=>L[x]());if(A.dtype!=="float32")throw new Error(`Error in gradient for op ${g.kernelName}. The gradient of input ${x} must have 'float32' dtype, but has '${A.dtype}'`);const O=g.inputs[x];if(!oa(A.shape,O.shape))throw new Error(`Error in gradient for op ${g.kernelName}. The gradient of input '${x}' has shape '${A.shape}', which does not match the shape of the input '${O.shape}'`);if(r[O.id]==null)r[O.id]=A;else{const C=r[O.id];r[O.id]=d(C,A),C.dispose()}}}}const JR=20,Cu=3,pI=7;function ZR(r,l,h,d){const f=Nu(l),g=sJ(r,l,h,f),S=l.length,L=Mf(r,l,h,f,g),x=["Tensor"];return d&&(x.push(` dtype: ${h}`),x.push(` rank: ${S}`),x.push(` shape: [${l}]`),x.push(" values:")),x.push(L.map(A=>" "+A).join(` `)),x.join(` -`)}function sJ(r,l,h,d){const f=qt(l),g=d[d.length-1],S=new Array(g).fill(0),L=l.length,x=h==="complex64"?Ou(r):r;if(L>1)for(let A=0;AJR){const se=Cu*S;let fe=Array.from(r.slice(0,se)),de=Array.from(r.slice((L-Cu)*S,L*S));return h==="complex64"&&(fe=Ou(fe),de=Ou(de)),["["+fe.map((Ae,xe)=>Ru(Ae,f[xe],h)).join(", ")+", ..., "+de.map((Ae,xe)=>Ru(Ae,f[L-Cu+xe],h)).join(", ")+"]"]}const te=h==="complex64"?Ou(r):Array.from(r);return["["+te.map((se,fe)=>Ru(se,f[fe],h)).join(", ")+"]"]}const A=l.slice(1),O=d.slice(1),C=d[0]*S,$=[];if(L>JR){for(let te=0;te1)for(let A=0;AJR){const se=Cu*S;let fe=Array.from(r.slice(0,se)),de=Array.from(r.slice((L-Cu)*S,L*S));return h==="complex64"&&(fe=Ou(fe),de=Ou(de)),["["+fe.map((Ae,xe)=>Ru(Ae,f[xe],h)).join(", ")+", ..., "+de.map((Ae,xe)=>Ru(Ae,f[L-Cu+xe],h)).join(", ")+"]"]}const te=h==="complex64"?Ou(r):Array.from(r);return["["+te.map((se,fe)=>Ru(se,f[fe],h)).join(", ")+"]"]}const A=l.slice(1),O=d.slice(1),C=d[0]*S,$=[];if(L>JR){for(let te=0;te`Length of values '${d}' does not match the size inferred by the shape '${this.size}'.`)}if(l==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=h||zR(l,this.size),this.strides=Nu(r)}set(r,...l){l.length===0&&(l=[0]),Z(l.length===this.rank,()=>`The number of provided coordinates (${l.length}) must match the rank (${this.rank})`);const h=this.locToIndex(l);this.values[h]=r}get(...r){r.length===0&&(r=[0]);let l=0;for(const d of r){if(d<0||d>=this.shape[l]){const f=`Requested out of range element at ${r}. Buffer shape=${this.shape}`;throw new Error(f)}l++}let h=r[r.length-1];for(let d=0;ddI(h))}catch(h){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return r}dataSync(){this.throwIfDisposed();const r=Yi().readSync(this.dataId);if(this.dtype==="string")try{return r.map(l=>dI(l))}catch(l){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return r}async bytes(){this.throwIfDisposed();const r=await Yi().read(this.dataId);return this.dtype==="string"?r:new Uint8Array(r.buffer)}dispose(){if(this.isDisposed)return;Yi().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(r=!1){return Wc.print(this,r)}clone(){return this.throwIfDisposed(),Wc.clone(this)}toString(r=!1){const l=this.dataSync();return ZR(l,this.shape,this.dtype,r)}cast(r){return this.throwIfDisposed(),Wc.cast(this,r)}variable(r=!0,l,h){return this.throwIfDisposed(),Yi().makeVariable(this,r,l,h)}}Object.defineProperty(Tn,Symbol.hasInstance,{value:r=>!!r&&r.data!=null&&r.dataSync!=null&&r.throwIfDisposed!=null});class Pf extends Tn{constructor(r,l,h,d){super(r.shape,r.dtype,r.dataId,d);this.trainable=l,this.name=h}assign(r){if(r.dtype!==this.dtype)throw new Error(`dtype of the new value (${r.dtype}) and previous value (${this.dtype}) must match`);if(!ia(r.shape,this.shape))throw new Error(`shape of the new value (${r.shape}) and previous value (${this.shape}) must match`);Yi().disposeTensor(this),this.dataId=r.dataId,Yi().incRef(this,null)}dispose(){Yi().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty(Pf,Symbol.hasInstance,{value:r=>r instanceof Tn&&r.assign!=null&&r.assign instanceof Function});var iO;(function(r){r.R0="R0",r.R1="R1",r.R2="R2",r.R3="R3",r.R4="R4",r.R5="R5",r.R6="R6"})(iO||(iO={}));var mI;(function(r){r.float32="float32",r.int32="int32",r.bool="int32",r.complex64="complex64"})(mI||(mI={}));var fI;(function(r){r.float32="float32",r.int32="int32",r.bool="bool",r.complex64="complex64"})(fI||(fI={}));var gI;(function(r){r.float32="float32",r.int32="float32",r.bool="float32",r.complex64="complex64"})(gI||(gI={}));var yI;(function(r){r.float32="complex64",r.int32="complex64",r.bool="complex64",r.complex64="complex64"})(yI||(yI={}));const rJ={float32:gI,int32:mI,bool:fI,complex64:yI};function rO(r,l){if(r==="string"||l==="string"){if(r==="string"&&l==="string")return"string";throw new Error(`Can not upcast ${r} with ${l}`)}return rJ[r][l]}function mt(r,l){if(r.dtype===l.dtype)return[r,l];const h=rO(r.dtype,l.dtype);return[r.cast(h),l.cast(h)]}function zf(r){const l=[],h=new Set;return oO(r,l,h),l}function oO(r,l,h){if(r==null)return;if(r instanceof Tn){l.push(r);return}if(!oJ(r))return;const d=r;for(const f in d){const g=d[f];h.has(g)||(h.add(g),oO(g,l,h))}}function oJ(r){return Array.isArray(r)||typeof r=="object"}class aO{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(const r in this.registeredVariables)this.registeredVariables[r].dispose()}}class Eu{constructor(r){this.ENV=r,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new aO}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;const r=this.getSortedBackends();for(let l=0;l{l.setupFunc!=null&&l.setupFunc(this.backendInstance)})}disposeRegisteredKernels(r){const l=cI(r);l.forEach(h=>{h.disposeFunc!=null&&h.disposeFunc(this.registry[r])})}initializeBackend(r){const l=this.registryFactory[r];if(l==null)throw new Error(`Cannot initialize backend ${r}, no registration found.`);try{const h=l.factory();if(h&&!(h instanceof h2)&&typeof h.then=="function"){const d=++this.pendingBackendInitId,f=h.then(g=>d(dthis.registryFactory[l].priority-this.registryFactory[r].priority)}initializeBackendsAndReturnBest(){const r=this.getSortedBackends();for(let l=0;lthis.startScope(h),()=>this.endScope(d),()=>(d=l(),d instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),d))}scopedRun(r,l,h){r();try{const d=h();return l(),d}catch(d){throw l(),d}}nextTensorId(){return Eu.nextTensorId++}nextVariableId(){return Eu.nextVariableId++}clone(r){const l=this.makeTensorFromDataId(r.dataId,r.shape,r.dtype),h={x:r},d=g=>({x:()=>{const S="float32",L={x:g},x={dtype:S};return H.runKernelFunc(A=>A.cast(g,S),L,null,Dc,x)}}),f=[];return this.addTapeNode(this.state.activeScope.name,h,[l],d,f,{}),l}runKernel(r,l,h,d,f){const g=null,S=null;return this.runKernelFunc(g,l,S,r,h,d,f)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(r,l,h){const d=this.backend.numDataIds();let f=0;h.forEach(L=>{f+=L.dtype==="complex64"?3:1});const g=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],S=d-l-f-g;if(S>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${S} data ids) after running '${r}'`)}runKernelFunc(r,l,h,d,f,g,S){let L,x=[];const A=this.isTapeOn();d==null&&(d=this.state.activeScope!=null?this.state.activeScope.name:"");const O=this.state.numBytes,C=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let $;const z=Wf(d,this.backendName);let ne;if(z!=null)$=()=>{const se=this.backend.numDataIds();ne=z.kernelFunc({inputs:l,attrs:f,backend:this.backend});const fe=Array.isArray(ne)?ne:[ne];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(d,se,fe);const de=fe.map(({dataId:Ae,shape:xe,dtype:Me})=>this.makeTensorFromDataId(Ae,xe,Me));if(A){let Ae=this.getTensorsForGradient(d,l,de);if(Ae==null){S==null&&(S=[]);const xe=de.filter((Me,Ke)=>S[Ke]);Ae=(g||[]).slice().concat(xe)}x=this.saveTensorsForBackwardMode(Ae)}return de};else{const se=fe=>{if(!A)return;x=fe.map(de=>this.keep(this.clone(de)))};$=()=>{const fe=this.backend.numDataIds();ne=this.tidy(()=>r(this.backend,se));const de=Array.isArray(ne)?ne:[ne];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(d,fe,de),de}}let te;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?L=$():(te=this.profiler.profileKernel(d,l,()=>$()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(te),L=te.outputs)}),A&&this.addTapeNode(d,l,L,h,x,f),this.state.profiling&&this.state.activeProfile.kernels.push({name:d,bytesAdded:this.state.numBytes-O,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-C,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(l).map(se=>l[se]!=null?l[se].shape:null),outputShapes:L.map(se=>se.shape),kernelTimeMs:te.timeMs,extraInfo:te.extraInfo}),Array.isArray(ne)?L:L[0]}saveTensorsForBackwardMode(r){const l=r.map(h=>this.keep(this.clone(h)));return l}getTensorsForGradient(r,l,h){const d=aI(r);if(d!=null){const f=d.inputsToSave||[],g=d.outputsToSave||[];let S;d.saveAllInputs?(Z(Array.isArray(l),()=>"saveAllInputs is true, expected inputs to be an array."),S=Object.keys(l).map(x=>l[x])):S=f.map(x=>l[x]);const L=h.filter((x,A)=>g[A]);return S.concat(L)}return null}makeTensor(r,l,h,d){if(r==null)throw new Error("Values passed to engine.makeTensor() are null");h=h||"float32",d=d||this.backend;let f=r;h==="string"&&Au(r[0])&&(f=r.map(L=>qR(L)));const g=d.write(f,l,h),S=new Tn(l,h,g,this.nextTensorId());if(this.incRef(S,d),h==="string"){const L=this.state.tensorInfo.get(g),x=HR(f);this.state.numBytes+=x-L.bytes,L.bytes=x}return S}makeTensorFromDataId(r,l,h,d){h=h||"float32";const f=new Tn(l,h,r,this.nextTensorId());return this.incRef(f,d),f}makeVariable(r,l=!0,h,d){h=h||this.nextVariableId().toString(),d!=null&&d!==r.dtype&&(r=r.cast(d));const f=new Pf(r,l,h,this.nextTensorId());if(this.state.registeredVariables[f.name]!=null)throw new Error(`Variable with name ${f.name} was already registered`);return this.state.registeredVariables[f.name]=f,this.incRef(f,this.backend),f}incRef(r,l){const h=this.state.tensorInfo.has(r.dataId)?this.state.tensorInfo.get(r.dataId).refCount:0;if(this.state.numTensors++,r.dtype==="string"&&this.state.numStringTensors++,h===0){this.state.numDataBuffers++;let d=0;r.dtype!=="complex64"&&r.dtype!=="string"&&(d=r.size*VR(r.dtype)),this.state.tensorInfo.set(r.dataId,{backend:l||this.backend,dtype:r.dtype,shape:r.shape,bytes:d,refCount:0}),this.state.numBytes+=d}this.state.tensorInfo.get(r.dataId).refCount++,r instanceof Pf||this.track(r)}disposeTensor(r){if(!this.state.tensorInfo.has(r.dataId))return;this.state.numTensors--,r.dtype==="string"&&this.state.numStringTensors--;const l=this.state.tensorInfo.get(r.dataId),h=l.refCount;h<=1?(r.dtype!=="complex64"&&(this.state.numBytes-=l.bytes),this.state.numDataBuffers--,l.backend.disposeData(r.dataId),this.state.tensorInfo.delete(r.dataId)):this.state.tensorInfo.get(r.dataId).refCount--}disposeVariables(){for(const r in this.state.registeredVariables){const l=this.state.registeredVariables[r];this.disposeVariable(l)}}disposeVariable(r){this.disposeTensor(r),this.state.registeredVariables[r.name]!=null&&delete this.state.registeredVariables[r.name]}memory(){const r=this.backend.memory();return r.numTensors=this.state.numTensors,r.numDataBuffers=this.state.numDataBuffers,r.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(r.unreliable=!0,r.reasons==null&&(r.reasons=[]),r.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),r}async profile(r){this.state.profiling=!0;const l=this.state.numBytes,h=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await r(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(d=>d.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-l,this.state.activeProfile.newTensors=this.state.numTensors-h;for(const d of this.state.activeProfile.kernels)d.kernelTimeMs=await d.kernelTimeMs,d.extraInfo=await d.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(r,l,h,d,f,g){const S={id:this.state.nextTapeNodeId++,kernelName:r,inputs:l,outputs:h,saved:f},L=aI(r);L!=null&&(d=L.gradFunc),d!=null&&(S.gradient=x=>(x=x.map((A,O)=>{if(A==null){const C=h[O],$=ra(C.size,C.dtype);return this.makeTensor($,C.shape,C.dtype)}return A}),d(x.length>1?x:x[0],f,g))),this.state.activeTape.push(S)}keep(r){return r.kept=!0,r}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(r){const l={track:[],name:"unnamed scope",id:this.state.nextScopeId++};r&&(l.name=r),this.state.scopeStack.push(l),this.state.activeScope=l}endScope(r){const l=zf(r),h=new Set(l.map(f=>f.id));for(let f=0;f{!f.kept&&f.scopeId===d.id&&this.track(f)})}gradients(r,l,h,d=!1){if(Z(l.length>0,()=>"gradients() received an empty list of xs."),h!=null&&h.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${h.dtype}'`);const f=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",r));Z(f instanceof Tn,()=>"The result y returned by f() must be a tensor.");const g=KR(this.state.activeTape,l,f);if(!d&&g.length===0&&l.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{const S={};S[f.id]=h==null?aJ(f.shape):h,XR(S,g,x=>this.tidy(x),cJ);const L=l.map(x=>S[x.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(x=>{for(const A of x.saved)A.dispose()}),this.state.activeTape=null),{value:f,grads:L}})}customGrad(r){return Z(lI(r),()=>"The f passed in customGrad(f) must be a function."),(...l)=>{Z(l.every(f=>f instanceof Tn),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let h;const d={};return l.forEach((f,g)=>{d[g]=f}),this.runKernelFunc((f,g)=>(h=r(...l,g),Z(h.value instanceof Tn,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),Z(lI(h.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),h.value),d,(f,g)=>{const S=h.gradFunc(f,g),L=Array.isArray(S)?S:[S];Z(L.length===l.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),Z(L.every(A=>A instanceof Tn),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");const x={};return L.forEach((A,O)=>{x[O]=()=>A}),x})}}readSync(r){const l=this.state.tensorInfo.get(r);return l.backend.readSync(r)}read(r){const l=this.state.tensorInfo.get(r);return l.backend.read(r)}async time(r){const l=uI(),h=await this.backend.time(r);return h.wallMs=uI()-l,h}track(r){return this.state.activeScope!=null&&(r.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(r)),r}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new aO;for(const r in this.registry)this.disposeRegisteredKernels(r),this.registry[r].dispose(),delete this.registry[r];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}Eu.nextTensorId=0;Eu.nextVariableId=0;function aJ(r){const l=Uf(qt(r),"float32");return H.makeTensor(l,r,"float32")}function bI(){const r=sI();if(r._tfengine==null){const l=new d2(r);r._tfengine=new Eu(l)}return m2(r._tfengine.ENV),tO(()=>r._tfengine),r._tfengine}const H=bI();function cJ(r,l){const h={a:r,b:l};return H.runKernelFunc((d,f)=>{const g=d.add(r,l);return f([r,l]),g},h,null,Ec)}function cO(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}const br=Es();br.registerFlag("DEBUG",()=>!1,r=>{r&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")});br.registerFlag("IS_BROWSER",()=>cO());br.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined");br.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor));br.registerFlag("PROD",()=>!1);br.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>br.getBool("DEBUG"));br.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0);br.registerFlag("IS_TEST",()=>!1);function wr(r,l){let h=r;if(Ds(r))return l==="string"?[]:[r.length];if(!Array.isArray(r))return[];const d=[];for(;Array.isArray(h)||Ds(h)&&l!=="string";)d.push(h.length),h=h[0];return Array.isArray(r)&&Es().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&&lO(r,d,[]),d}function lO(r,l,h){if(h=h||[],!Array.isArray(r)&&!Ds(r)){Z(l.length===0,()=>`Element arr[${h.join("][")}] is a primitive, but should be an array/TypedArray of ${l[0]} elements`);return}Z(l.length>0,()=>`Element arr[${h.join("][")}] should be a primitive, but is an array of ${r.length} elements`),Z(r.length===l[0],()=>`Element arr[${h.join("][")}] should have ${l[0]} elements, but has ${r.length} elements`);const d=l.slice(1);for(let f=0;f=0&&(f=d),hO(d,f,l,h),r==null||!Ds(r)&&!Array.isArray(r)&&typeof r!="number"&&typeof r!="boolean"&&typeof r!="string"){const x=r==null?"null":r.constructor.name;throw new Error(`Argument '${l}' passed to '${h}' must be a Tensor or TensorLike, but got '${x}'`)}const g=wr(r,f);!Ds(r)&&!Array.isArray(r)&&(r=[r]);const S=!0,L=f!=="string"?$f(r,f):Fc(r,[],S);return H.makeTensor(L,g,f)}function Gf(r,l,h,d="numeric"){if(!Array.isArray(r))throw new Error(`Argument ${l} passed to ${h} must be a \`Tensor[]\` or \`TensorLike[]\``);const f=r;return f.map((g,S)=>M(g,`${l}[${S}]`,h),d)}const uO="__op";function G(r){const l=Object.keys(r);if(l.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${l.length} keys.`);let h=l[0];const d=r[h];h.endsWith("_")&&(h=h.substring(0,h.length-1)),h=h+uO;const f=(...g)=>{H.startScope(h);try{const S=d(...g);return S instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),H.endScope(S),S}catch(S){throw H.endScope(null),S}};return Object.defineProperty(f,"name",{value:h,configurable:!0}),f}function lJ(r,l){const h=M(r,"real","complex"),d=M(l,"imag","complex");Zt(h.shape,d.shape,`real and imag shapes, ${h.shape} and ${d.shape}, must match in call to tf.complex().`);const f=S=>S.complex(h,d),g={real:h,imag:d};return H.runKernelFunc(f,g,null,E2)}const qi=G({complex_:lJ});function ji(r,l,h,d){if(d==null&&(d=vu(r)),d==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!Ds(r)&&!Array.isArray(r)&&typeof r!="number"&&typeof r!="boolean"&&typeof r!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(l!=null){Bf(l);const f=qt(l),g=qt(h);Z(f===g,()=>`Based on the provided shape, [${l}], the tensor should have ${f} values but has ${g}`);for(let S=0;S`Error creating a new Tensor. Inferred shape (${h}) does not match the provided shape (${l}). `)}}return!Ds(r)&&!Array.isArray(r)&&(r=[r]),l=l||h,r=d!=="string"?$f(r,d):Fc(r,[],!0),H.makeTensor(r,l,d)}function wI(r,l,h){const d=wr(r,h);return ji(r,l,d,h)}function Du(r,l="float32",h){return l=l||"float32",Bf(r),new eO(r,l,h)}function hJ(r,l){const h=M(r,"x","cast");if(!GR(l))throw new Error(`Failed to cast to unknown dtype ${l}`);if(l==="string"&&h.dtype!=="string"||l!=="string"&&h.dtype==="string")throw new Error("Only strings can be casted to strings");const d={x:h},f={dtype:l};return H.runKernelFunc(g=>g.cast(h,l),d,null,Dc,f)}const Le=G({cast_:hJ});function uJ(r){const l=M(r,"x","clone",null),h=()=>H.makeTensorFromDataId(l.dataId,l.shape,l.dtype),d={x:l};return H.runKernelFunc(h,d,null,tf)}const bi=G({clone_:uJ});function LI(r,l=!1){console.log(r.toString(l))}bI();const dJ={buffer:Du,cast:Le,clone:bi,print:LI};nO(dJ);function pJ(r,l){const h=M(r,"x","reshape",null),d={x:h},f={shape:l},g=(S,L)=>(l=MR(l,h.size),Z(h.size===qt(l),()=>"new shape and old shape must have the same number of elements."),L([h]),S.reshape(h,l));return H.runKernelFunc(g,d,null,mf,f)}const re=G({reshape_:pJ});function mJ(r,l,h=!1,d=!1){let f=M(r,"a","matMul"),g=M(l,"b","matMul");[f,g]=mt(f,g),Z(f.rank>=2&&g.rank>=2&&f.rank===g.rank,()=>`Error in matMul: inputs must have the same rank of at least 2, got ranks ${f.rank} and ${g.rank}.`);const S=h?f.shape[f.rank-2]:f.shape[f.rank-1],L=d?g.shape[g.rank-1]:g.shape[g.rank-2],x=h?f.shape[f.rank-1]:f.shape[f.rank-2],A=d?g.shape[g.rank-2]:g.shape[g.rank-1],O=f.shape.slice(0,-2),C=g.shape.slice(0,-2),$=qt(O),z=qt(C);Z(ia(O,C),()=>`Error in matMul: outer dimensions (${O}) and (${C}) of Tensors with shapes ${f.shape} and ${g.shape} must match.`),Z(S===L,()=>`Error in matMul: inner shapes (${S}) and (${L}) of Tensors with shapes ${f.shape} and ${g.shape} and transposeA=${h} and transposeB=${d} must match.`);const ne=f.shape.slice(0,-2).concat([x,A]),te=h?re(f,[$,S,x]):re(f,[$,x,S]),se=d?re(g,[z,A,L]):re(g,[z,L,A]),fe=(Me,Ke)=>(Ke([te,se]),Me.batchMatMul(te,se,h,d)),de={a:te,b:se},Ae={transposeA:h,transposeB:d},xe=H.runKernelFunc(fe,de,null,Mm,Ae);return re(xe,ne)}const dn=G({matMul_:mJ});function fJ(r,l){const h=M(r,"x","transpose");if(l==null&&(l=h.shape.map((g,S)=>S).reverse()),Z(h.rank===l.length,()=>`Error in transpose: rank of input ${h.rank} must match length of perm ${l}.`),l.forEach(g=>{Z(g>=0&&g`All entries in 'perm' must be between 0 and ${h.rank-1} but got ${l}`)}),h.rank<=1)return h.clone();const d={x:h},f={perm:l};return H.runKernelFunc(g=>g.transpose(h,l),d,null,Ef,f)}const Wt=G({transpose_:fJ});function SI(r,l,h){if(kc(r),l!=null&&l.length!==3)throw new Error("tensor3d() requires shape to have three numbers");const d=wr(r,h);if(d.length!==3&&d.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(d.length===1&&l==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return ji(r,l,d,h)}const II={};Tc(II,{fromPixels:()=>bJ,toPixels:()=>yJ});let $c;function gJ(r,l=3){if(l>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(r==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let h=!1,d=!1,f=!1,g=!1,S=!1;if(r.data instanceof Uint8Array)h=!0;else if(typeof ImageData!="undefined"&&r instanceof ImageData)d=!0;else if(typeof HTMLVideoElement!="undefined"&&r instanceof HTMLVideoElement)f=!0;else if(typeof HTMLImageElement!="undefined"&&r instanceof HTMLImageElement)g=!0;else if(r.getContext!=null)S=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${r.constructor.name}`);if(f){const z=2;if(f&&r.readyState element.")}const L=Wf(rI,H.backendName);if(L!=null){const z={pixels:r},ne={numChannels:l};return H.runKernel(rI,z,ne)}const[x,A]=f?[r.videoWidth,r.videoHeight]:[r.width,r.height];let O;S?O=r.getContext("2d").getImageData(0,0,x,A).data:d||h?O=r.data:(g||f)&&($c==null&&($c=document.createElement("canvas").getContext("2d")),$c.canvas.width=x,$c.canvas.height=A,$c.drawImage(r,0,0,x,A),O=$c.getImageData(0,0,x,A).data);let C;if(l===4)C=new Int32Array(O);else{const z=x*A;C=new Int32Array(z*l);for(let ne=0;ne4||g===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${g}`);if(h.dtype!=="float32"&&h.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${h.dtype}. Please use float32 or int32 tensors.`);const S=await h.data(),L=h.dtype==="float32"?255:1,x=new Uint8ClampedArray(f*d*4);for(let A=0;A1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${z}.`)}else if(h.dtype==="int32"&&(z<0||z>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${z}.`);g===1?(O[0]=z*L,O[1]=z*L,O[2]=z*L):O[$]=z*L}const C=A*4;x[C+0]=Math.round(O[0]),x[C+1]=Math.round(O[1]),x[C+2]=Math.round(O[2]),x[C+3]=Math.round(O[3])}if(l!=null){l.width=f,l.height=d;const A=l.getContext("2d"),O=new ImageData(x,f,d);A.putImageData(O,0,0)}return h!==r&&h.dispose(),x}const bJ=G({fromPixels_:gJ});function dO(r,l,h){const d=r.shape.length;Z(d===l.length,()=>`Error in slice${d}D: Length of begin ${l} must match the rank of the array (${d}).`),Z(d===h.length,()=>`Error in slice${d}D: Length of size ${h} must match the rank of the array (${d}).`);for(let f=0;f`Error in slice${d}D: begin[${f}] + size[${f}] (${l[f]+h[f]}) would overflow input.shape[${f}] (${r.shape[f]})`)}function Vf(r,l,h){let d;const f=r.shape.length;typeof l=="number"?d=[l,...new Array(f-1).fill(0)]:l.length{Z(S!==-1,()=>"slice() does not support negative begin indexing.")});let g;return h==null?g=new Array(f).fill(-1):typeof h=="number"?g=[h,...new Array(f-1).fill(-1)]:h.lengthS>=0?S:(Z(S===-1,()=>`Negative size values should be exactly -1 but got ${S} for the slice() size at index ${L}.`),r.shape[L]-d[L])),[d,g]}function wJ(r){Es().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(r+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}sO(wJ);function pO(r,l){return H.tidy(r,l)}function mO(r){const l=zf(r);l.forEach(h=>h.dispose())}function LJ(r,l){let h=M(r,"a","add"),d=M(l,"b","add");[h,d]=mt(h,d);const f=(S,L)=>{const x=S.add(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,Ec)}const St=G({add_:LJ});function SJ(r,l){let h=M(r,"a","floorDiv"),d=M(l,"b","floorDiv");[h,d]=mt(h,d);const f=(S,L)=>{const x=S.floorDiv(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,Zm)}const xI=G({floorDiv_:SJ});function IJ(r,l){let h=M(r,"a","div"),d=M(l,"b","div");if([h,d]=mt(h,d),h.dtype==="int32"&&d.dtype==="int32")return xI(h,d);const f=(L,x)=>{const A=L.realDivide(h,d);return x([h,d]),A},g={a:h,b:d},S={};return H.runKernelFunc(f,g,null,Km,S)}const ze=G({div_:IJ});function xJ(r,l){let h=M(r,"a","mul"),d=M(l,"b","mul");[h,d]=mt(h,d);const f=(S,L)=>{const x=S.multiply(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,lf)}const ae=G({mul_:xJ});function TJ(r){const l=M(r,"x","abs"),h={x:l};return H.runKernelFunc((d,f)=>(f([l]),l.dtype==="complex64"?d.complexAbs(l):d.abs(l)),h,null,Bm)}const Pn=G({abs_:TJ});function AJ(r,l){for(let h=0;hr[g]);return[h,f]}function ts(r,l){const h=l.map(d=>1);return vJ(r,h,l)}function fs(r,l){if(AJ(r,l))return null;const h=[];for(let d=0;dh.push(d)),h}function Uc(r){return r.map((l,h)=>[h,l]).sort((l,h)=>l[1]-h[1]).map(l=>l[0])}function oo(r,l){const h=[];for(let d=l-r;d`The output # of rows (${L}) must be an integer. Change the stride and/or zero pad parameters`);const x=oa((S-l+2*d)/h+1,f);return Z(Qt(x),()=>`The output # of columns (${x}) must be an integer. Change the stride and/or zero pad parameters`),[L,x]}function OJ(r,l,h,d,f,g){f==null&&(f=gO(r,l,d));const S=r[0],L=r[1],x=r[2],A=oa((S-l+2*f)/d+1,g);Z(Qt(A),()=>`The output # of depths (${A}) must be an integer. Change the stride and/or zero pad parameters`);const O=oa((L-l+2*f)/d+1,g);Z(Qt(O),()=>`The output # of rows (${O}) must be an integer. Change the stride and/or zero pad parameters`);const C=oa((x-l+2*f)/d+1,g);return Z(Qt(C),()=>`The output # of columns (${C}) must be an integer. Change the stride and/or zero pad parameters`),[A,O,C,h]}function gO(r,l,h,d=1){const f=Bc(l,d);return Math.floor((r[0]*(h-1)-h+f)/2)}function Hf(r){return typeof r=="number"?[r,r,r]:r.length===2?[r[0],r[1],1]:r}function TI(r){return typeof r=="number"?[r,r,r]:r}function Bc(r,l){return l<=1?r:r+(r-1)*(l-1)}function NJ(r,l,h,d,f,g,S,L,x){let A,O,C;if(typeof r=="number"){const $=r===0?"VALID":"NUMBER";A={top:r,bottom:r,left:r,right:r,type:$};const z=RJ([l,h],g,d,r,L);O=z[0],C=z[1]}else if(r==="same"){O=Math.ceil(l/d),C=Math.ceil(h/f);const $=Math.max(0,(O-1)*d+g-l),z=Math.max(0,(C-1)*f+S-h),ne=Math.floor($/2),te=$-ne,se=Math.floor(z/2),fe=z-se;A={top:ne,bottom:te,left:se,right:fe,type:"SAME"}}else if(r==="valid")A={top:0,bottom:0,left:0,right:0,type:"VALID"},O=Math.ceil((l-g+1)/d),C=Math.ceil((h-S+1)/f);else if(typeof r=="object"){const $=x==="channelsLast"?r[1][0]:r[2][0],z=x==="channelsLast"?r[1][1]:r[2][1],ne=x==="channelsLast"?r[2][0]:r[3][0],te=x==="channelsLast"?r[2][1]:r[3][1],se=$===0&&z===0&&ne===0&&te===0?"VALID":"EXPLICIT";A={top:$,bottom:z,left:ne,right:te,type:se},O=oa((l-g+$+z)/d+1,L),C=oa((h-S+ne+te)/f+1,L)}else throw Error(`Unknown padding parameter: ${r}`);return{padInfo:A,outHeight:O,outWidth:C}}function CJ(r,l,h,d,f,g,S,L,x,A,O){let C,$,z,ne;if(typeof r=="number"){const te=r===0?"VALID":"NUMBER";C={top:r,bottom:r,left:r,right:r,front:r,back:r,type:te};const se=OJ([l,h,d,1],L,1,f,r,O);$=se[0],z=se[1],ne=se[2]}else if(r==="same"){$=Math.ceil(l/f),z=Math.ceil(h/g),ne=Math.ceil(d/S);const te=($-1)*f+L-l,se=(z-1)*g+x-h,fe=(ne-1)*S+A-d,de=Math.floor(te/2),Ae=te-de,xe=Math.floor(se/2),Me=se-xe,Ke=Math.floor(fe/2),wt=fe-Ke;C={top:xe,bottom:Me,left:Ke,right:wt,front:de,back:Ae,type:"SAME"}}else if(r==="valid")C={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},$=Math.ceil((l-L+1)/f),z=Math.ceil((h-x+1)/g),ne=Math.ceil((d-A+1)/S);else throw Error(`Unknown padding parameter: ${r}`);return{padInfo:C,outDepth:$,outHeight:z,outWidth:ne}}function oa(r,l){if(!l)return r;switch(l){case"round":return Math.round(r);case"ceil":return Math.ceil(r);case"floor":return Math.floor(r);default:throw new Error(`Unknown roundingMode ${l}`)}}function ao(r){const[l,h,d]=Hf(r);return l===1&&h===1&&d===1}function co(r,l){return ao(r)||ao(l)}function Mc(r){if(r==="NHWC")return"channelsLast";if(r==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${r}`)}function yO(r,l){const h=r[0].length;r.forEach((f,g)=>{Z(f.length===h,()=>`Error in concat${h}D: rank of tensors[${g}] must be the same as the rank of the rest (${h})`)}),Z(l>=0&&l`Error in concat${h}D: axis must be between 0 and ${h-1}.`);const d=r[0];r.forEach((f,g)=>{for(let S=0;S`Error in concat${h}D: Shape of tensors[${g}] (${f}) does not match the shape of the rest (${d}) along the non-concatenated axis ${g}.`)})}function bO(r,l){const h=r[0].slice();for(let d=1;d=1,()=>"Pass at least one tensor to concat");let h=Gf(r,"tensors","concat");h[0].dtype==="complex64"&&h.forEach(S=>{if(S.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor - with dtype ${S.dtype}. `)});const d=(S,L)=>{const x=ht(l,h[0].shape)[0],A=bO(h.map($=>$.shape),x);if(qt(A)===0)return wI([],A);if(h=h.filter($=>$.size>0),h.length===1)return h[0];const O=h.map($=>$.shape);yO(O,x);const C=S.concat(h,x);return L(h),C},f=h,g={axis:l};return H.runKernelFunc(d,f,null,Gm,g)}const bn=G({concat_:EJ});function DJ(r){const l=M(r,"x","sigmoid"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.sigmoid(l);return f([g]),g},h,null,xf)}const AI=G({sigmoid_:DJ});function kJ(r,l,h){const d=M(r,"x","slice");if(d.rank===0)throw new Error("Slicing scalar is not possible");const f=(L,x)=>{const[A,O]=Vf(d,l,h);return dO(d,A,O),x([d]),L.slice(d,A,O)},g={x:d},S={begin:l,size:h};return H.runKernelFunc(f,g,null,Lf,S)}const At=G({slice_:kJ});function FJ(r,l,h){const d=M(r,"x","batchToSpaceND"),f=l.reduce((x,A)=>x*A);Z(d.rank>=1+l.length,()=>`input rank is ${d.rank} but should be > than blockShape.length ${l.length}`),Z(h.length===l.length,()=>`crops.length is ${h.length} but should be equal to blockShape.length ${l.length}`),Z(d.shape[0]%f===0,()=>`input tensor batch is ${d.shape[0]} but is not divisible by the product of the elements of blockShape ${l.join(" * ")} === ${f}`);const g=x=>x.batchToSpaceND(d,l,h),S={x:d},L={blockShape:l,crops:h};return H.runKernelFunc(g,S,null,Pm,L)}const vI=G({batchToSpaceND_:FJ});function _J(r,l){let h=M(r,"broadcastTo","x");const d=h.shape;if(l.some(O=>!(O>0)||O%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${l}].`);if(l.lengthh.rank){const O=h.shape.slice();for(;O.length=0;O--)if(f[O]===l[O])g[O]=1;else if(h.shape[O]!==1)throw new Error(`broadcastTo(): [${d}] cannot be broadcast to [${l}].`);const S=g.map((O,C)=>O>1?C:-1).filter(O=>O>=0);if(S.length===0)return bi(h);const L=O=>O.tile(h,g),x={x:h},A={shape:l,inputShape:f};return H.runKernelFunc(L,x,null,zm,A)}const jf=G({broadcastTo_:_J});function WJ(r,l,h,d,f="NHWC",g=[1,1],S){const L=M(r,"x","conv2d"),x=M(l,"filter","conv2d");let A=L,O=!1;L.rank===3&&(O=!0,A=re(L,[1,L.shape[0],L.shape[1],L.shape[2]])),Z(A.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${A.rank}.`),Z(x.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${x.rank}.`),S!=null&&Z(Qt(d),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${S} but got pad ${d}.`);const C=f==="NHWC"?A.shape[3]:A.shape[1];Z(C===x.shape[2],()=>`Error in conv2d: depth of input (${C}) must match input depth for filter ${x.shape[2]}.`),Z(co(h,g),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${h} and dilations '${g}'`);const $=(se,fe)=>{const de=Mc(f),Ae=Lr(A.shape,x.shape,h,g,d,S,!1,de),xe=se.conv2d(A,x,Ae);return fe([A,x]),xe},z={x:A,filter:x},ne={strides:h,pad:d,dataFormat:f,dilations:g,dimRoundingMode:S},te=H.runKernelFunc($,z,null,Vm,ne);return O?re(te,[te.shape[1],te.shape[2],te.shape[3]]):te}const NI=G({conv2d_:WJ});function $J(r,l,h,d,f,g="NHWC",S){Z(r.length===l.rank,()=>`Length of inShape (${r.length}) and rank of dy (${l.rank}) must match`);let L=r,x=l,A=!1;l.rank===3&&(A=!0,x=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]),L=[1,r[0],r[1],r[2]]),Z(L.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${L.length}.`),Z(x.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${x.rank}`),Z(h.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${h.rank}`);const O=g==="NHWC"?L[3]:L[1],C=g==="NHWC"?x.shape[3]:x.shape[1];Z(O===h.shape[2],()=>`Error in conv2dDerInput: depth of input (${O}) must match input depth for filter ${h.shape[2]}.`),Z(C===h.shape[3],()=>`Error in conv2dDerInput: depth of output (${C}) must match output depth for filter ${h.shape[3]}.`),S!=null&&Z(Qt(f),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${S} but got pad ${f}.`);const $=(se,fe)=>{const de=1,Ae=Mc(g),xe=Lr(L,h.shape,d,de,f,S,!1,Ae),Me=se.conv2dDerInput(x,h,xe);return fe([x,h]),Me},z={dy:x,filter:h},ne={strides:d,pad:f,dataFormat:g,dimRoundingMode:S,inputShape:L},te=H.runKernelFunc($,z,null,Hm,ne);return A?re(te,[te.shape[1],te.shape[2],te.shape[3]]):te}const wO=G({conv2DBackpropInput_:$J});function UJ(r,l,h,d,f){Z(r.length===l.rank,()=>`Length of inShape (${r.length}) and rank of dy (${l.rank}) must match`);let g=r,S=l,L=!1;l.rank===4&&(L=!0,S=re(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]]),g=[1,r[0],r[1],r[2],r[3]]);const x=g[4],A=S.shape[4];Z(g.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${g.length}.`),Z(S.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${S.rank}`),Z(h.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${h.rank}`),Z(x===h.shape[3],()=>`Error in conv3dDerInput: depth of input (${x}) must match input depth for filter ${h.shape[3]}.`),Z(A===h.shape[4],()=>`Error in conv3dDerInput: depth of output (${A}) must match output depth for filter ${h.shape[4]}.`);const O=ne=>{const te=1,se=ku(g,h.shape,d,te,f);return ne.conv3dDerInput(S,h,se)},C={dy:S},$={pad:f},z=H.runKernelFunc(O,C,null,_2,$);return L?re(z,[z.shape[1],z.shape[2],z.shape[3],z.shape[4]]):z}const LO=G({conv3DBackpropInput_:UJ});function BJ(r){const l=M(r,"x","cos"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.cos(l);return f([l]),g},h,null,Ym)}const Fu=G({cos_:BJ});function MJ(r){const l=M(r,"x","cosh"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.cosh(l);return f([l]),g},h,null,qm)}const CI=G({cosh_:MJ});function PJ(r,l=0,h=!1,d=!1){const f=M(r,"x","cumsum"),g=(x,A)=>{const O=fs([l],f.rank);let C=f;O!=null&&(C=Wt(f,O));const $=oo(1,f.rank)[0];let z=x.cumsum(C,$,h,d);if(A([f]),O!=null){const ne=Uc(O);z=Wt(z,ne)}return z},S={x:f},L={axis:l,exclusive:h,reverse:d};return H.runKernelFunc(g,S,null,jm,L)}const RI=G({cumsum_:PJ});function vt(r,l){const h=[];for(let d=0;d1)&&h.unshift(g)}return h}function rt(r,l){const h=[],d=Math.max(r.length,l.length);for(let f=0;fS.equal(h,d),g={a:h,b:d};return H.runKernelFunc(f,g,null,Y2)}const OI=G({equal_:zJ});function GJ(r,l,h){const d=M(l,"a","where"),f=M(h,"b","where"),g=M(r,"condition","where","bool"),S=rt(d.shape,f.shape),L=jf(d,S),x=jf(f,S);g.rank===1&&Z(g.shape[0]===d.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),g.rank!==1&&Zt(g.shape,x.shape,"Error in where: ");const A=(C,$)=>{const z=C.select(g,L,x);return $([g]),z},O={condition:g,t:L,e:x};return H.runKernelFunc(A,O,null,wf)}const zn=G({where_:GJ});function VJ(r){const l=M(r,"x","zerosLike"),h={x:l};return H.runKernelFunc(d=>d.zerosLike(l),h,null,Ff)}const je=G({zerosLike_:VJ});function HJ(r){const l=M(r,"x","exp"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.exp(l);return f([g]),g},h,null,Xm)}const Gn=G({exp_:HJ});function YJ(r,l=0){const h=null,d=M(r,"x","expandDims",h);Z(l<=d.rank,()=>"Axis must be <= rank of the tensor");const f=d.shape.slice();return l<0&&(Z(-(d.rank+1)<=l,()=>`Axis must be in the interval [${-(d.rank+1)}, ${d.rank}]`),l=d.rank+l+1),f.splice(l,0,1),re(d,f)}const ks=G({expandDims_:YJ});function qJ(r,l){const h=null,d=M(r,"x","tile",h);Z(d.rank===l.length,()=>`Error in transpose: rank of input ${d.rank} must match length of reps ${l}.`);const f=(x,A)=>{const O=x.tile(d,l);return A([d]),O},g=[d],S={x:d},L={reps:l};return H.runKernelFunc(f,S,null,Of,L,g)}const aa=G({tile_:qJ});function jJ(r,l,h,d="float32"){l==null&&(l=r);const f=Du([r,l],d),g=r<=l?r:l;for(let L=0;Lf.fill(r,l,h),{},null,K2,d)}function KJ(r){const l=M(r,"x","floor"),h={x:l};return H.runKernelFunc(d=>d.floor(l),h,null,Jm)}const kI=G({floor_:KJ});function SO(r,l,h){const d=r.shape[h],f=[];let g=1,S=1;for(let L=0;L{const O=ht(h,d.shape)[0],C=SO(d,f,O),$=x.gather(d,re(f,[f.size]),O);return A([d,f]),re($,C.outputShape)};return H.runKernelFunc(L,g,null,Qm,S)}const FI=G({gather_:XJ});function JJ(r,l){let h=M(r,"a","greater"),d=M(l,"b","greater");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=S=>S.greater(h,d),g={a:h,b:d};return H.runKernelFunc(f,g,null,Z2)}const wi=G({greater_:JJ});function ZJ(r,l){let h=M(r,"a","greaterEqual"),d=M(l,"b","greaterEqual");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=(S,L)=>{const x=S.greaterEqual(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,ef)}const Sr=G({greaterEqual_:ZJ});function QJ(r){const l=M(r,"input","imag"),h=f=>f.imag(l),d={input:l};return H.runKernelFunc(h,d,null,eR)}const _u=G({imag_:QJ});function e9(r,l){let h=M(r,"a","maximum"),d=M(l,"b","maximum");[h,d]=mt(h,d),h.dtype==="bool"&&(h=Le(h,"int32"),d=Le(d,"int32")),rt(h.shape,d.shape);const f=(S,L)=>{const x=S.maximum(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,of)}const _I=G({maximum_:e9});function ke(r,l){if((Ds(r)&&l!=="string"||Array.isArray(r))&&l!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(l==="string"&&Ds(r)&&!(r instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");const h=[],d=[];return ji(r,h,d,l)}function t9(r,l){let h=M(r,"a","less"),d=M(l,"b","less");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=S=>S.less(h,d),g={a:h,b:d};return H.runKernelFunc(f,g,null,iR)}const WI=G({less_:t9});function n9(r,l){let h=M(r,"a","lessEqual"),d=M(l,"b","lessEqual");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=(S,L)=>{const x=S.lessEqual(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,rR)}const Ir=G({lessEqual_:n9});function s9(r){const l=M(r,"x","log"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.log(l);return f([l]),g},h,null,nf)}const lo=G({log_:s9});function i9(r){const l=M(r,"x","log1p"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.log1p(l);return f([l]),g},h,null,sf)}const $I=G({log1p_:i9});function Kf(r){return H.customGrad(r)}function r9(r){const l=M(r,"x","neg"),h={x:l};return H.runKernelFunc(d=>d.neg(l),h,null,hf)}const yt=G({neg_:r9});function o9(r,l=null,h=!1){const d=M(r,"x","max"),f=(L,x)=>{const A=ht(l,d.shape);let O=A;const C=fs(O,d.rank);let $=d;C!=null&&($=Wt(d,C),O=oo(O.length,$.rank));const z=L.max($,O);C!=null&&$.dispose();let ne=z;if(h){const te=ts(ne.shape,ht(l,d.shape));ne=re(ne,te),z.dispose()}return x([d,ne]),ne},g={x:d},S={reductionIndices:l,keepDims:h};return H.runKernelFunc(f,g,null,rf,S)}const ca=G({max_:o9});function a9(r,l){let h=M(r,"a","sub"),d=M(l,"b","sub");[h,d]=mt(h,d);const f=(S,L)=>{const x=S.subtract(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,Rf)}const Be=G({sub_:a9});function c9(r,l=null,h=!1){let d=M(r,"x","sum");d.dtype==="bool"&&(d=Le(d,"int32"));const f=(L,x)=>{x([d]);const A=ht(l,d.shape),O=fs(A,d.rank);let C=A,$=d;O!=null&&($=Wt(d,O),C=oo(C.length,d.rank));let z=L.sum($,C);if(h){const ne=ts(z.shape,A);z=re(z,ne)}return z},g={x:d},S={axis:l,keepDims:h};return H.runKernelFunc(f,g,null,Af,S)}const Fe=G({sum_:c9});function l9(r,l=null,h=!1){const d=M(r,"x","logSumExp"),f=ht(l,d.shape),g=ca(d,f,!0),S=Be(d,g),L=Gn(S),x=Fe(L,f),A=lo(x),O=St(re(g,A.shape),A);if(h){const C=ts(O.shape,f);return re(O,C)}return O}const UI=G({logSumExp_:l9});function h9(r,l){const h=M(r,"a","logicalAnd","bool"),d=M(l,"b","logicalAnd","bool");rt(h.shape,d.shape);const f={a:h,b:d};return H.runKernelFunc(g=>g.logicalAnd(h,d),f,null,oR)}const la=G({logicalAnd_:h9});function u9(r){const l=M(r,"x","logicalNot","bool"),h={x:l};return H.runKernelFunc(d=>d.logicalNot(l),h,null,aR)}const BI=G({logicalNot_:u9});function Fs(r,l="float32"){if(l==="complex64"){const d=Fs(r,"float32"),f=Fs(r,"float32");return qi(d,f)}const h=ra(qt(r),l);return H.makeTensor(h,r,l)}function Ki(r,l="float32"){if(l==="complex64"){const d=Ki(r,"float32"),f=Fs(r,"float32");return qi(d,f)}const h=Uf(qt(r),l);return H.makeTensor(h,r,l)}function d9(r,l=null,h=!1){const d=M(r,"x","mean"),f=ht(l,d.shape),g=fO(d.shape,f),S=g[1],L=qt(S),x=Kf(A=>{const O=ke(L),C=O.dtype===A.dtype?A:Le(A,O.dtype),$=ze(C,O),z=Fe($,l,h),ne=te=>{const se=A.shape.slice();f.forEach(Ae=>{se[Ae]=1});const fe=re(te,se),de=ze(ae(fe,Ki(A.shape,"float32")),L);return de};return{value:z,gradFunc:ne}});return x(d)}const MI=G({mean_:d9});function p9(r,l=null,h=!1){const d=M(r,"x","min"),f=(L,x)=>{const A=ht(l,d.shape);let O=A;const C=fs(O,d.rank);let $=d;C!=null&&($=Wt(d,C),O=oo(O.length,d.rank));const z=L.min($,O);C!=null&&$.dispose();let ne=z;if(h){const te=ts(ne.shape,A);ne=re(z,te),z.dispose()}return x([d,ne]),ne},g={x:d},S={axis:l,keepDims:h};return H.runKernelFunc(f,g,null,af,S)}const Xf=G({min_:p9});function m9(r,l){let h=M(r,"a","minimum"),d=M(l,"b","minimum");[h,d]=mt(h,d),h.dtype==="bool"&&(h=Le(h,"int32"),d=Le(d,"int32")),rt(h.shape,d.shape);const f=(S,L)=>{const x=S.minimum(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,cf)}const PI=G({minimum_:m9});function f9(r){const l=M(r,"x","square"),h={},d=[l],f=[];return H.runKernelFunc((g,S)=>(S([l]),g.square(l)),{x:l},null,"Square",h,d,f)}const ut=G({square_:f9});function g9(r,l){let h=M(r,"a","notEqual"),d=M(l,"b","notEqual");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=S=>S.notEqual(h,d),g={a:h,b:d};return H.runKernelFunc(f,g,null,gR)}const zI=G({notEqual_:g9});function y9(r){const l=M(r,"input","real"),h=f=>f.real(l),d={input:l};return H.runKernelFunc(h,d,null,TR)}const Pc=G({real_:y9});function b9(r,l,h=0){const d=M(r,"x","pad");if(d.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");const f=(L,x)=>(x([d]),L.pad(d,l,h)),g={paddings:l,constantValue:h},S={x:d};return H.runKernelFunc(f,S,null,uf,g)}const GI=G({pad_:b9});function w9(r,l,h){const d=M(r,"x","spaceToBatchND");Z(d.rank>=1+l.length,()=>`input rank ${d.rank} should be > than [blockShape] ${l.length}`),Z(h.length===l.length,()=>`paddings.shape[0] ${h.length} must be equal to [blockShape] ${l.length}`),Z(d.shape.reduce((L,x,A)=>A>0&&A<=l.length?L&&(x+h[A-1][0]+h[A-1][1])%l[A-1]===0:L,!0),()=>`input spatial dimensions ${d.shape.slice(1)} with paddings ${h.toString()} must be divisible by blockShapes ${l.toString()}`);const f=L=>L.spaceToBatchND(d,l,h),g={x:d},S={blockShape:l,paddings:h};return H.runKernelFunc(f,g,null,vf,S)}const VI=G({spaceToBatchND_:w9});function L9(r,l){let h=M(r,"base","pow"),d=M(l,"exp","pow");[h,d]=mt(h,d);const f={a:h,b:d},g=(S,L)=>{const x=S.pow(h,d);return L([h,d,x]),x};return H.runKernelFunc(g,f,null,df)}const ha=G({pow_:L9});function ho(r,l){kc(r);const h=wr(r,l);if(h.length!==1)throw new Error("tensor1d() requires values to be a flat/TypedArray");const d=null;return ji(r,d,h,l)}function Jf(r,l,h=1,d="float32"){if(h===0)throw new Error("Cannot have a step of zero");const f=()=>{const S=r===l,L=r1;if(S||L||x)return Fs([0],d);const A=Math.abs(Math.ceil((l-r)/h)),O=ra(A,d);l(g([l]),l.dtype==="bool"?Le(l,"int32"):f.relu(l)),d={x:l};return H.runKernelFunc(h,d,null,pf)}const Wu=G({relu_:S9});function I9(r,l){const h=M(r,"x","reverse"),d=S=>{const L=ht(l,h.shape);if(h.rank===0)return bi(h);const x=S.reverse(h,L);return re(x,h.shape)},f={x:h},g={dims:l};return H.runKernelFunc(d,f,null,yf,g)}const zc=G({reverse_:I9});function x9(r){const l=M(r,"x","rsqrt"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.rsqrt(l);return f([l]),g},h,null,bf)}const HI=G({rsqrt_:x9});function T9(r){const l=M(r,"x","sin"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.sin(l);return f([l]),g},h,null,Sf)}const YI=G({sin_:T9});function A9(r){const l=M(r,"x","sinh"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.sinh(l);return f([l]),g},h,null,If)}const qI=G({sinh_:A9});function v9(r){Z(r.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${r.dtype}.`);const l={input:r};return H.runKernelFunc(h=>{const d=r.shape[r.shape.length-1],f=r.size/d,g=r.as2D(f,d),S=h.fft(g);return S.reshape(r.shape)},l,null,j2)}const $u=G({fft_:v9});function N9(r){Z(r.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${r.dtype}.`);const l={input:r};return H.runKernelFunc(h=>{const d=r.shape[r.shape.length-1],f=r.size/d,g=re(r,[f,d]),S=h.ifft(g);return re(S,r.shape)},l,null,Q2)}const Gc=G({ifft_:N9});function C9(r){const l=r.shape[r.shape.length-1],h=r.size/l;let d;if(l<=2){const f=re(r,[h,l]);d=Gc(f)}else{const f=[h,2*(l-1)],g=re(Pc(r),[h,l]),S=re(_u(r),[h,l]),L=zc(At(g,[0,1],[h,l-2]),1),x=ae(zc(At(S,[0,1],[h,l-2]),1),ke(-1)),A=bn([g,L],1),O=bn([S,x],1),C=re(qi(A,O),[f[0],f[1]]);d=Gc(C)}if(d=Pc(d),r.rank===3&&r.shape[0]!==0){const f=d,g=r.shape[0];d=re(d,[g,d.shape[0]/g,d.shape[1]]),f.dispose()}return d}const jI=G({irfft_:C9});function IO(r,l,h=0){let d=[];if(typeof l=="number")Z(r.shape[h]%l===0,()=>"Number of splits must evenly divide the axis."),d=new Array(l).fill(r.shape[h]/l);else{const f=l.reduce((S,L)=>(L===-1&&(S+=1),S),0);Z(f<=1,()=>"There should be only one negative value in split array.");const g=l.indexOf(-1);if(g!==-1){const S=l.reduce((L,x)=>x>0?L+x:L);l[g]=r.shape[h]-S}Z(r.shape[h]===l.reduce((S,L)=>S+L),()=>"The sum of sizes must match the size of the axis dimension."),d=l}return d}function R9(r,l,h=0){const d=M(r,"x","split"),f=(L,x)=>{const A=ht(h,d.shape)[0],O=IO(d,l,A);return L.split(d,O,A)},g={x:d},S={numOrSizeSplits:l,axis:h};return H.runKernelFunc(f,g,null,Nf,S)}const uo=G({split_:R9});function O9(r,l){Z(r.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${r.dtype}`);let h=r.shape[r.shape.length-1];const d=r.size/h;let f;if(l!=null&&l0),te=r.shape.map(se=>se);te[r.shape.length-1]=l,f=At(r,ne,te),h=l}else if(l!=null&&l>h){const ne=r.shape.map(te=>te);ne[r.shape.length-1]=l-h,f=bn([r,Fs(ne)],r.shape.length-1),h=l}else f=r;const g=je(f),S=re(qi(f,g),[d,h]),L=$u(S),x=Math.floor(h/2)+1,A=Pc(L),O=_u(L),C=uo(A,[x,h-x],A.shape.length-1),$=uo(O,[x,h-x],O.shape.length-1),z=f.shape.slice();return z[f.shape.length-1]=x,re(qi(C[0],$[0]),z)}const Uu=G({rfft_:O9});function E9(r){const l=M(r,"x","sqrt"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.sqrt(l);return f([l]),g},h,null,Tf)}const gs=G({sqrt_:E9});function D9(r,l){let h=M(r,"a","squaredDifference"),d=M(l,"b","squaredDifference");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=(L,x)=>{const A=L.squaredDifference(h,d);return x([h,d]),A},g={a:h,b:d},S={};return H.runKernelFunc(f,g,null,Cf,S)}const KI=G({squaredDifference_:D9});function k9(r,l){const h=M(r,"x","squeeze");return re(h,PR(h.shape,l).newShape)}const XI=G({squeeze_:k9});function F9(r,l=0){const h=Gf(r,"tensors","stack");if(Z(h.length>=1,()=>"Pass at least one tensor to tf.stack"),h.length===1)return ks(h[0],l);const d=h[0].rank,f=h[0].shape,g=h[0].dtype;Z(l<=d,()=>"Axis must be <= rank of the tensor"),h.forEach(L=>{Zt(f,L.shape,"All tensors passed to stack must have matching shapes"),Z(g===L.dtype,()=>"All tensors passed to stack must have matching dtypes")});const S=h.map(L=>ks(L,l));return bn(S,l)}const js=G({stack_:F9});function _9(r,l=0){const h=M(r,"x","step"),d={x:h},f={alpha:l};return H.runKernelFunc(g=>g.step(h,l),d,null,_f,f)}const ua=G({step_:_9});function da(r,l,h){if(kc(r),l!=null&&l.length!==2)throw new Error("tensor2d() requires shape to have two numbers");const d=wr(r,h);if(d.length!==2&&d.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(d.length===1&&l==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return ji(r,l,d,h)}function W9(r,l,h){const d=M(r,"x","unsortedSegmentSum"),f=M(l,"segmentIds","unsortedSegmentSum","int32");Z(Qt(h),()=>"numSegments must be of dtype int");const g={x:d,segmentIds:f},S={numSegments:h},L=(x,A)=>{const O=x.unsortedSegmentSum(d,f,h);return A([f]),O};return H.runKernelFunc(L,g,null,kf,S)}const JI=G({unsortedSegmentSum_:W9});function $9(r,l=0){const h=M(r,"x","unstack");Z(l>=-h.shape.length&&l`Axis = ${l} is not in [-${h.shape.length}, ${h.shape.length})`),l<0&&(l+=h.shape.length);const d={value:h},f={axis:l},g=S=>S.unstack(h,l);return H.runKernelFunc(g,d,null,Df,f)}const Bu=G({unstack_:$9});function U9(r,l="euclidean",h=null,d=!1){r=M(r,"x","norm");const f=xO(r,l,h);let g=f.shape;if(d){const S=ht(h,r.shape);g=ts(f.shape,S)}return re(f,g)}function xO(r,l,h=null){if(r.rank===0)return Pn(r);if(r.rank!==1&&h===null)return xO(re(r,[-1]),l,h);if(r.rank===1||typeof h=="number"||Array.isArray(h)&&h.length===1){if(l===1)return Fe(Pn(r),h);if(l===Infinity)return ca(Pn(r),h);if(l===-Infinity)return Xf(Pn(r),h);if(l==="euclidean"||l===2)return gs(Fe(ha(Pn(r),ke(2,"int32")),h));throw new Error(`Error in norm: invalid ord value: ${l}`)}if(Array.isArray(h)&&h.length===2){if(l===1)return ca(Fe(Pn(r),h[0]),h[1]-1);if(l===Infinity)return ca(Fe(Pn(r),h[1]),h[0]);if(l===-Infinity)return Xf(Fe(Pn(r),h[1]),h[0]);if(l==="fro"||l==="euclidean")return gs(Fe(ut(r),h));throw new Error(`Error in norm: invalid ord value: ${l}`)}throw new Error(`Error in norm: invalid axis: ${h}`)}const Zf=G({norm_:U9});function TO(r){return Math.floor(Math.pow(2,Math.ceil(Math.log(r)/Math.log(2))))}function Qf(r,l,h){const d=1-r%2,f=new Float32Array(r);for(let g=0;g`Error in conv2dDerFilter: input must be rank 4, but got shape ${L.shape}.`),Z(x.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${x.shape}.`),Z(h.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${h}.`);const A=g==="NHWC"?L.shape[3]:L.shape[1],O=g==="NHWC"?x.shape[3]:x.shape[1];Z(A===h[2],()=>`Error in conv2dDerFilter: depth of input ${A}) must match input depth in filter (${h[2]}.`),Z(O===h[3],()=>`Error in conv2dDerFilter: depth of dy (${O}) must match output depth for filter (${h[3]}).`),S!=null&&Z(Qt(f),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${S} but got pad ${f}.`);const C=ne=>{const te=1,se=Mc(g),fe=Lr(L.shape,h,d,te,f,S,!1,se);return ne.conv2dDerFilter(L,x,fe)},$={x:L,dy:x},z={strides:d,pad:f,dataFormat:g,dimRoundingMode:S};return H.runKernelFunc(C,$,null,D2,z)}const eg=G({conv2DBackpropFilter_:B9});function M9(r,l,h,d){let f=r;r.rank===3&&(f=re(r,[1,r.shape[0],r.shape[1],r.shape[2]]));let g=l;g.rank===3&&(g=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]));const S=x=>x.depthwiseConv2DDerFilter(f,g,d),L={x:f,dy:g};return H.runKernelFunc(S,L,null,U2)}const AO=G({depthwiseConv2dNativeBackpropFilter_:M9});function P9(r,l,h,d){let f=l,g=!1;l.rank===3&&(g=!0,f=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]));const S=A=>A.depthwiseConv2DDerInput(f,h,d),L={dy:f},x=H.runKernelFunc(S,L,null,B2);return g?re(x,[x.shape[1],x.shape[2],x.shape[3]]):x}const vO=G({depthwiseConv2dNativeBackpropInput_:P9});function z9(r){return Qf(r,.54,.46)}const NO=G({hammingWindow_:z9});function G9(r){return Qf(r,.5,.5)}const tg=G({hannWindow_:G9});function V9(r,l,h,d=!1,f=0){let g=0;const S=[];for(;g+l<=r.size;)S.push(At(r,g,l)),g+=h;if(d)for(;g`Error in cropAndResize: image must be rank 4,but got rank ${S.rank}.`),Z(L.rank===2&&L.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${A},4] but had shape ${L.shape}.`),Z(x.rank===1&&x.shape[0]===A,()=>`Error in cropAndResize: boxInd must be have size [${A}] but had shape ${L.shape}.`),Z(d.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${d.length}.`),Z(d[0]>=1&&d[1]>=1,()=>`cropSize must be atleast [1,1], but was ${d}`),Z(f==="bilinear"||f==="nearest",()=>`method must be bilinear or nearest, but was ${f}`);const O=ne=>ne.cropAndResize(S,L,x,d,f,g),C={image:S,boxes:L,boxInd:x},$={method:f,extrapolationValue:g,cropSize:d},z=H.runKernelFunc(O,C,null,W2,$);return z}const RO=G({cropAndResize_:Y9});function q9(r){const l=M(r,"image","flipLeftRight","float32");Z(l.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${l.rank}.`);const h={image:l},d=H.runKernel(X2,h,{});return d}const OO=G({flipLeftRight_:q9});function j9(r,l,h=0,d=.5){const f=M(r,"image","rotateWithOffset","float32");Z(f.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${f.rank}.`);const g={image:f},S={radians:l,fillValue:h,center:d},L=H.runKernel($R,g,S);return L}const EO=G({rotateWithOffset_:j9});function Ks(r,l,h,d,f,g){d==null&&(d=.5),f==null&&(f=Number.NEGATIVE_INFINITY),g==null&&(g=0);const S=r.shape[0];return h=Math.min(h,S),Z(0<=d&&d<=1,()=>`iouThreshold must be in [0, 1], but was '${d}'`),Z(r.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${r.rank}'`),Z(r.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${r.shape[1]}`),Z(l.rank===1,()=>"scores must be a 1D tensor"),Z(l.shape[0]===S,()=>`scores has incompatible shape with boxes. Expected ${S}, but was ${l.shape[0]}`),Z(0<=g&&g<=1,()=>`softNmsSigma must be in [0, 1], but was '${g}'`),{maxOutputSize:h,iouThreshold:d,scoreThreshold:f,softNmsSigma:g}}function K9(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY){const g=M(r,"boxes","nonMaxSuppression"),S=M(l,"scores","nonMaxSuppression"),L=Ks(g,S,h,d,f);h=L.maxOutputSize,d=L.iouThreshold,f=L.scoreThreshold;const x={maxOutputSize:h,iouThreshold:d,scoreThreshold:f};return H.runKernelFunc(A=>A.nonMaxSuppression(g,S,h,d,f),{boxes:g,scores:S},null,yR,x)}const DO=G({nonMaxSuppression_:K9});function kO(r,l,h){const d=X9(r,l,h),f=d<0?-(d+1):d;r.splice(f,0,l)}function X9(r,l,h){return Z9(r,l,h||J9)}function J9(r,l){return r>l?1:r>>1);const L=h(l,r[g]);L>0?d=g+1:(f=g,S=!L)}return S?d:-d-1}function FO(r,l,h,d,f){return ZI(r,l,h,d,f,0).selectedIndices}function _O(r,l,h,d,f,g){return ZI(r,l,h,d,f,0,!1,g,!0)}function WO(r,l,h,d,f,g){return ZI(r,l,h,d,f,g,!0)}function ZI(r,l,h,d,f,g,S=!1,L=!1,x=!1){const A=[];for(let se=0;sef&&A.push({score:l[se],boxIndex:se,suppressBeginIndex:0});A.sort($O);const O=g>0?-.5/g:0,C=[],$=[];for(;C.length0;){const se=A.pop(),{score:fe,boxIndex:de,suppressBeginIndex:Ae}=se;if(fe=Ae;--Me){const Ke=Q9(r,de,C[Me]);if(Ke>=d){xe=!0;break}if(se.score=se.score*eZ(d,O,Ke),se.score<=f)break}se.suppressBeginIndex=C.length,xe||(se.score===fe?(C.push(de),$.push(se.score)):se.score>f&&kO(A,se,$O))}const z=C.length,ne=h-z;L&&ne>0&&(C.push(...new Array(ne).fill(0)),$.push(...new Array(ne).fill(0)));const te={selectedIndices:ho(C,"int32")};return S&&(te.selectedScores=ho($,"float32")),x&&(te.validOutputs=ke(z,"int32")),te}function Q9(r,l,h){const d=r.subarray(l*4,l*4+4),f=r.subarray(h*4,h*4+4),g=Math.min(d[0],d[2]),S=Math.min(d[1],d[3]),L=Math.max(d[0],d[2]),x=Math.max(d[1],d[3]),A=Math.min(f[0],f[2]),O=Math.min(f[1],f[3]),C=Math.max(f[0],f[2]),$=Math.max(f[1],f[3]),z=(L-g)*(x-S),ne=(C-A)*($-O);if(z<=0||ne<=0)return 0;const te=Math.max(g,A),se=Math.max(S,O),fe=Math.min(L,C),de=Math.min(x,$),Ae=Math.max(fe-te,0)*Math.max(de-se,0);return Ae/(z+ne-Ae)}function eZ(r,l,h){const d=Math.exp(l*h*h);return h<=r?d:0}function $O(r,l){return r.score-l.score||r.score===l.score&&l.boxIndex-r.boxIndex}async function tZ(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY){const g=M(r,"boxes","nonMaxSuppressionAsync"),S=M(l,"scores","nonMaxSuppressionAsync"),L=Ks(g,S,h,d,f);h=L.maxOutputSize,d=L.iouThreshold,f=L.scoreThreshold;const x=await Promise.all([g.data(),S.data()]),A=x[0],O=x[1],C=FO(A,O,h,d,f);return g!==r&&g.dispose(),S!==l&&S.dispose(),C}const UO=tZ;function nZ(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY,g=0){const S=M(r,"boxes","nonMaxSuppression"),L=M(l,"scores","nonMaxSuppression"),x=Ks(S,L,h,d,f,g);h=x.maxOutputSize,d=x.iouThreshold,f=x.scoreThreshold,g=x.softNmsSigma;const A={boxes:S,scores:L},O={maxOutputSize:h,iouThreshold:d,scoreThreshold:f,softNmsSigma:g},C=H.runKernel(wR,A,O);return{selectedIndices:C[0],selectedScores:C[1]}}const BO=G({nonMaxSuppressionWithScore_:nZ});async function sZ(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY,g=0){const S=M(r,"boxes","nonMaxSuppressionAsync"),L=M(l,"scores","nonMaxSuppressionAsync"),x=Ks(S,L,h,d,f,g);h=x.maxOutputSize,d=x.iouThreshold,f=x.scoreThreshold,g=x.softNmsSigma;const A=await Promise.all([S.data(),L.data()]),O=A[0],C=A[1],$=WO(O,C,h,d,f,g);return S!==r&&S.dispose(),L!==l&&L.dispose(),$}const MO=sZ;function iZ(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY,g=!1){const S=M(r,"boxes","nonMaxSuppression"),L=M(l,"scores","nonMaxSuppression"),x=Ks(S,L,h,d,f,null),A=x.maxOutputSize,O=x.iouThreshold,C=x.scoreThreshold,$={boxes:S,scores:L},z={maxOutputSize:A,iouThreshold:O,scoreThreshold:C,padToMaxOutputSize:g},ne=H.runKernel(bR,$,z);return{selectedIndices:ne[0],validOutputs:ne[1]}}const PO=G({nonMaxSuppressionPadded_:iZ});async function rZ(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY,g=!1){const S=M(r,"boxes","nonMaxSuppressionAsync"),L=M(l,"scores","nonMaxSuppressionAsync"),x=Ks(S,L,h,d,f,null),A=x.maxOutputSize,O=x.iouThreshold,C=x.scoreThreshold,[$,z]=await Promise.all([S.data(),L.data()]),ne=_O($,z,A,O,C,g);return S!==r&&S.dispose(),L!==l&&L.dispose(),ne}const zO=rZ;function oZ(r,l,h=!1){const d=M(r,"images","resizeBilinear");Z(d.rank===3||d.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${d.rank}.`),Z(l.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${l}.`);let f=d,g=!1;d.rank===3&&(g=!0,f=re(d,[1,d.shape[0],d.shape[1],d.shape[2]]));const[S,L]=l,x=($,z)=>(z([f]),$.resizeBilinear(f,S,L,h)),A={images:f},O={alignCorners:h,size:l},C=H.runKernelFunc(x,A,null,gf,O);return g?re(C,[C.shape[1],C.shape[2],C.shape[3]]):C}const GO=G({resizeBilinear_:oZ});function aZ(r,l,h=!1){const d=M(r,"images","resizeNearestNeighbor");Z(d.rank===3||d.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${d.rank}.`),Z(l.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${l}.`),Z(d.dtype==="float32"||d.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let f=d,g=!1;d.rank===3&&(g=!0,f=re(d,[1,d.shape[0],d.shape[1],d.shape[2]]));const[S,L]=l,x={images:f},A={alignCorners:h,size:l},O=($,z)=>(z([f]),$.resizeNearestNeighbor(f,S,L,h)),C=H.runKernelFunc(O,x,null,ff,A);return g?re(C,[C.shape[1],C.shape[2],C.shape[3]]):C}const VO=G({resizeNearestNeighbor_:aZ});function cZ(r,l,h){Z(l%1===0,()=>`bandPart(): numLower must be an integer, got ${l}.`),Z(h%1===0,()=>`bandPart(): numUpper must be an integer, got ${h}.`);const d=M(r,"a","bandPart");Z(d.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${d.rank}.`);const f=d.shape,[g,S]=d.shape.slice(-2);if(!(l<=g))throw new Error(`bandPart(): numLower (${l}) must not be greater than the number of rows (${g}).`);if(!(h<=S))throw new Error(`bandPart(): numUpper (${h}) must not be greater than the number of columns (${S}).`);l<0&&(l=g),h<0&&(h=S);const L=re(Jf(0,g,1,"int32"),[-1,1]),x=Jf(0,S,1,"int32"),A=Be(L,x),O=la(Ir(A,ke(+l,"int32")),Sr(A,ke(-h,"int32"))),C=Fs([g,S],d.dtype);return re(js(Bu(re(d,[-1,g,S])).map($=>zn(O,$,C))),f)}const HO=G({bandPart_:cZ});function lZ(r){let l;if(Array.isArray(r)){l=!1,Z(r!=null&&r.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");const f=r[0].shape[0];for(let g=1;g`Gram-Schmidt: Non-unique lengths found in the input vectors: (${r[g].shape[0]} vs. ${f})`)}else l=!0,r=uo(r,r.shape[0],0).map(f=>XI(f,[0]));Z(r.length<=r[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${r.length}) exceeds number of dimensions (${r[0].shape[0]}).`);const h=[],d=r;for(let f=0;f{let g=d[f];if(f>0)for(let S=0;S=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${r.rank}`),r.rank===2)return qO(r,l);{const h=r.shape.slice(0,r.shape.length-2).reduce((x,A)=>x*A),d=Bu(re(r,[h,r.shape[r.shape.length-2],r.shape[r.shape.length-1]]),0),f=[],g=[];d.forEach(x=>{const[A,O]=qO(x,l);f.push(A),g.push(O)});const S=re(js(f,0),r.shape),L=re(js(g,0),r.shape);return[S,L]}}function qO(r,l=!1){return H.tidy(()=>{Z(r.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${r.shape.length}D Tensor.`);const h=r.shape[0],d=r.shape[1];let f=EI(h),g=bi(r);const S=da([[1]],[1,1]);let L=bi(S);const x=h>=d?d:h;for(let A=0;A{const z=At(g,[A,A],[h-A,1]),ne=Zf(z),te=At(g,[A,A],[1,1]),se=zn(wi(te,0),da([[-1]]),da([[1]])),fe=Be(te,ae(se,ne)),de=ze(z,fe);de.shape[0]===1?L=bi(S):L=bn([S,At(de,[1,0],[de.shape[0]-1,de.shape[1]])],0);const Ae=yt(ze(dn(se,fe),ne)),xe=At(g,[A,0],[h-A,d]),Me=ae(Ae,L),Ke=Wt(L);if(A===0)g=Be(xe,dn(Me,dn(Ke,xe)));else{const Kt=Be(xe,dn(Me,dn(Ke,xe)));g=bn([At(g,[0,0],[A,d]),Kt],0)}const wt=Wt(Me),$t=At(f,[0,A],[h,f.shape[1]-A]);if(A===0)f=Be($t,dn(dn($t,L),wt));else{const Kt=Be($t,dn(dn($t,L),wt));f=bn([At(f,[0,0],[h,A]),Kt],1)}return[L,g,f]}),mO([O,C,$])}return!l&&h>d&&(f=At(f,[0,0],[h,d]),g=At(g,[0,0],[d,d])),[f,g]})}const jO=G({qr_:hZ});var jt;(function(r){r[r.NONE=0]="NONE",r[r.MEAN=1]="MEAN",r[r.SUM=2]="SUM",r[r.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(jt||(jt={}));function uZ(r,l,h=jt.SUM_BY_NONZERO_WEIGHTS){const d=M(r,"losses","computeWeightedLoss");let f=null;l!=null&&(f=M(l,"weights","computeWeightedLoss"));const g=f==null?d:ae(d,f);if(h===jt.NONE)return g;if(h===jt.SUM)return Fe(g);if(h===jt.MEAN){if(f==null)return MI(g);{const S=d.size/f.size,L=ze(Fe(g),Fe(f));return S>1?ze(L,ke(S)):L}}if(h===jt.SUM_BY_NONZERO_WEIGHTS){if(f==null)return ze(Fe(g),ke(d.size));{const S=ae(f,Ki(d.shape)),L=Le(Fe(zI(S,ke(0))),"float32");return ze(Fe(g),L)}}throw Error(`Unknown reduction: ${h}`)}const An=G({computeWeightedLoss_:uZ});function dZ(r,l,h,d=jt.SUM_BY_NONZERO_WEIGHTS){const f=M(r,"labels","absoluteDifference"),g=M(l,"predictions","absoluteDifference");let S=null;h!=null&&(S=M(h,"weights","absoluteDifference")),Zt(f.shape,g.shape,"Error in absoluteDifference: ");const L=Pn(Be(f,g));return An(L,S,d)}const KO=G({absoluteDifference_:dZ});function pZ(r,l,h,d,f=jt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","cosineDistance"),S=M(l,"predictions","cosineDistance");let L=null;d!=null&&(L=M(d,"weights","cosineDistance")),Zt(g.shape,S.shape,"Error in cosineDistance: ");const x=ke(1),A=Be(x,Fe(ae(g,S),h,!0));return An(A,L,f)}const XO=G({cosineDistance_:pZ});function mZ(r,l,h,d=jt.SUM_BY_NONZERO_WEIGHTS){let f=M(r,"labels","hingeLoss");const g=M(l,"predictions","hingeLoss");let S=null;h!=null&&(S=M(h,"weights","hingeLoss")),Zt(f.shape,g.shape,"Error in hingeLoss: ");const L=ke(1);f=Be(ae(ke(2),f),L);const x=Wu(Be(L,ae(f,g)));return An(x,S,d)}const JO=G({hingeLoss_:mZ});function fZ(r,l,h,d=1,f=jt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","huberLoss"),S=M(l,"predictions","huberLoss");let L=null;h!=null&&(L=M(h,"weights","huberLoss")),Zt(g.shape,S.shape,"Error in huberLoss: ");const x=ke(d),A=Pn(Be(S,g)),O=PI(A,x),C=Be(A,O),$=St(ae(ke(.5),ut(O)),ae(x,C));return An($,L,f)}const ZO=G({huberLoss_:fZ});function gZ(r,l,h,d=1e-7,f=jt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","logLoss"),S=M(l,"predictions","logLoss");let L=null;h!=null&&(L=M(h,"weights","logLoss")),Zt(g.shape,S.shape,"Error in logLoss: ");const x=ke(1),A=ke(d),O=yt(ae(g,lo(St(S,A)))),C=ae(Be(x,g),lo(St(Be(x,S),A))),$=Be(O,C);return An($,L,f)}const QO=G({logLoss_:gZ});function yZ(r,l,h,d=jt.SUM_BY_NONZERO_WEIGHTS){const f=M(r,"labels","meanSquaredError"),g=M(l,"predictions","meanSquaredError");let S=null;h!=null&&(S=M(h,"weights","meanSquaredError")),Zt(f.shape,g.shape,"Error in meanSquaredError: ");const L=KI(f,g);return An(L,S,d)}const e1=G({meanSquaredError_:yZ});function bZ(r,l){const h=M(r,"labels","sigmoidCrossEntropyWithLogits"),d=M(l,"logits","sigmoidCrossEntropyWithLogits");Zt(h.shape,d.shape,"Error in sigmoidCrossEntropyWithLogits: ");const f=Wu(d),g=ae(d,h),S=$I(Gn(yt(Pn(d))));return St(Be(f,g),S)}function wZ(r,l,h,d=0,f=jt.SUM_BY_NONZERO_WEIGHTS){let g=M(r,"multiClassLabels","sigmoidCrossEntropy");const S=M(l,"logits","sigmoidCrossEntropy");let L=null;if(h!=null&&(L=M(h,"weights","sigmoidCrossEntropy")),Zt(g.shape,S.shape,"Error in sigmoidCrossEntropy: "),d>0){const A=ke(d),O=ke(1),C=ke(.5);g=St(ae(g,Be(O,A)),ae(C,A))}const x=bZ(g,S);return An(x,L,f)}const t1=G({sigmoidCrossEntropy_:wZ});function LZ(r,l,h=-1){if(h===-1&&(h=l.rank-1),h!==l.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${l.rank} and dim was ${h}`);const d=Kf((f,g,S)=>{const L=!0,x=UI(g,[h],L),A=Be(Le(g,"float32"),x);S([f,A]);const O=yt(ae(A,f)),C=Fe(O,[h]),$=(z,ne)=>{const[te,se]=ne,fe=ts(z.shape,[h]);return[ae(re(z,fe),Be(Le(te,"float32"),Gn(se))),ae(re(z,fe),Be(Gn(se),Le(te,"float32")))]};return{value:C,gradFunc:$}});return d(r,l)}function SZ(r,l,h,d=0,f=jt.SUM_BY_NONZERO_WEIGHTS){let g=M(r,"onehotLabels","softmaxCrossEntropy");const S=M(l,"logits","softmaxCrossEntropy");let L=null;if(h!=null&&(L=M(h,"weights","softmaxCrossEntropy")),Zt(g.shape,S.shape,"Error in softmaxCrossEntropy: "),d>0){const A=ke(d),O=ke(1),C=ke(g.shape[1]);g=St(ae(g,Be(O,A)),ze(A,C))}const x=LZ(g,S);return An(x,L,f)}const n1=G({softmaxCrossEntropy_:SZ});const yAe={fft:$u,ifft:Gc,rfft:Uu,irfft:jI},IAe={hammingWindow:NO,hannWindow:tg,frame:ng,stft:CO},s1={flipLeftRight:OO,resizeNearestNeighbor:VO,resizeBilinear:GO,rotateWithOffset:EO,cropAndResize:RO,nonMaxSuppression:DO,nonMaxSuppressionAsync:UO,nonMaxSuppressionWithScore:BO,nonMaxSuppressionWithScoreAsync:MO,nonMaxSuppressionPadded:PO,nonMaxSuppressionPaddedAsync:zO},$Ae={bandPart:HO,gramSchmidt:YO,qr:jO},qAe={absoluteDifference:KO,computeWeightedLoss:An,cosineDistance:XO,hingeLoss:JO,huberLoss:ZO,logLoss:QO,meanSquaredError:e1,sigmoidCrossEntropy:t1,softmaxCrossEntropy:n1};const i1=1.7580993408473768,r1=1.0507009873554805;const o1={kernelName:Bm,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,ua(Le(h,"float32"),-1))}}};const a1={kernelName:f2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>{const d=ut(Le(h,"float32")),f=gs(Be(ke(1),d));return yt(ze(r,f))}}}};const c1={kernelName:g2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>{const d=gs(Be(ut(Le(h,"float32")),1));return ze(r,d)}}}};const l1={kernelName:Ec,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{let L=r;const x=vt(h.shape,f);return x.length>0&&(L=Fe(L,x)),re(L,h.shape)},S=()=>{let L=r;const x=vt(d.shape,f);return x.length>0&&(L=Fe(L,x)),re(L,d.shape)};return{a:g,b:S}}};const h1={kernelName:y2,saveAllInputs:!0,gradFunc:(r,l)=>{const h={};return l.forEach((d,f)=>{h[f]=()=>r.clone()}),h}};const u1={kernelName:b2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>je(h)}}};const d1={kernelName:w2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>je(h)}}};const p1={kernelName:L2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,gs(Be(ke(1),ut(Le(h,"float32")))))}}};const m1={kernelName:S2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>{const d=gs(St(ke(1),ut(Le(h,"float32"))));return ze(r,d)}}}};const f1={kernelName:T2,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{const L=St(ut(h),ut(d));let x=ae(r,ze(d,L));const A=vt(h.shape,f);return A.length>0&&(x=Fe(x,A)),re(x,h.shape)},S=()=>{const L=St(ut(h),ut(d));let x=yt(ae(r,ze(h,L)));const A=vt(d.shape,f);return A.length>0&&(x=Fe(x,A)),re(x,d.shape)};return{a:g,b:S}}};const g1={kernelName:I2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,St(ut(Le(h,"float32")),1))}}};const y1={kernelName:x2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,Be(ke(1),ut(Le(h,"float32"))))}}};function IZ(r,l,h,d,f=[1,1,1],g,S){const L=M(r,"dy","avgPool3dBackprop"),x=M(l,"input","avgPool3dBackprop");let A=L,O=x,C=!1;x.rank===4&&(C=!0,A=re(L,[1,L.shape[0],L.shape[1],L.shape[2],L.shape[3]]),O=re(x,[1,x.shape[0],x.shape[1],x.shape[2],x.shape[3]])),Z(A.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${A.rank}.`),Z(O.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${O.rank}.`),Z(co(d,f),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${d} and dilations '${f}'`),S!=null&&Z(Qt(g),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${S} but got pad ${g}.`);const $=se=>{const fe=qf(O.shape,h,d,f,g,S);return se.avgPool3dBackprop(A,O,fe)},z={dy:A,input:O},ne={filterSize:h,strides:d,dilations:f,pad:g,dimRoundingMode:S},te=H.runKernelFunc($,z,null,C2,ne);return C?re(te,[te.shape[1],te.shape[2],te.shape[3],te.shape[4]]):te}const b1=G({avgPool3dBackprop_:IZ});const w1={kernelName:N2,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{filterSize:f,strides:g,dilations:S,pad:L,dimRoundingMode:x}=h,A=S==null?[1,1,1]:S;return{x:()=>b1(r,d,f,g,A,L,x)}}};function xZ(r,l,h,d,f){const g=M(r,"dy","avgPoolBackprop"),S=M(l,"input","avgPoolBackprop");Z(S.rank===g.rank,()=>`Rank of input (${S.rank}) does not match rank of dy (${g.rank})`);let L=S,x=g,A=!1;S.rank===3&&(A=!0,L=re(S,[1,S.shape[0],S.shape[1],S.shape[2]]),x=re(g,[1,g.shape[0],g.shape[1],g.shape[2]])),Z(x.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${x.rank}.`),Z(L.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${L.rank}.`);const O=ne=>{const te=Yf(L.shape,h,d,1,f);return ne.avgPoolBackprop(x,L,te)},C={dy:x,input:L},$={filterSize:h,strides:d,pad:f},z=H.runKernelFunc(O,C,null,v2,$);return A?re(z,[z.shape[1],z.shape[2],z.shape[3]]):z}const L1=G({avgPoolBackprop_:xZ});const S1={kernelName:A2,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{filterSize:f,strides:g,pad:S}=h;return{x:()=>L1(r,d,f,g,S)}}};const I1={kernelName:Mm,inputsToSave:["a","b"],gradFunc:(r,l,h)=>{const[d,f]=l,{transposeA:g,transposeB:S}=h;return!g&&!S?{a:()=>dn(r,f,!1,!0),b:()=>dn(d,r,!0,!1)}:!g&&S?{a:()=>dn(r,f,!1,!1),b:()=>dn(r,d,!0,!1)}:g&&!S?{a:()=>dn(f,r,!1,!0),b:()=>dn(d,r,!1,!1)}:{a:()=>dn(f,r,!0,!0),b:()=>dn(r,d,!0,!0)}}};const x1={kernelName:Pm,gradFunc:(r,l,h)=>{const{blockShape:d,crops:f}=h;return{x:()=>VI(r,d,f)}}};const T1={kernelName:zm,gradFunc:(r,l,h)=>{const d=h,f=d.inputShape,g=d.shape,S=Array.from(g);for(let x=f.length-1;x>=0;x--)if(f[x]===g[x])S[x]=1;else if(f[x]!==1)throw new Error(`broadcastTo(): [${f}] cannot be broadcast to [${g}].`);const L=[];for(let x=0;x1&&L.push(x);return{x:()=>Fe(r,L,!0)}}};const A1={kernelName:Dc,gradFunc:r=>({x:()=>r.clone()})};const v1={kernelName:R2,gradFunc:r=>({x:()=>je(r)})};const N1={kernelName:O2,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{clipValueMin:f,clipValueMax:g}=h;return{x:()=>zn(la(Sr(d,f),Ir(d,g)),r,je(r))}}};const C1={kernelName:Gm,saveAllInputs:!0,gradFunc:(r,l,h)=>{const d=l.map(x=>x.shape),{axis:f}=h,g=ht(f,l[0].shape)[0],S=d.map(x=>x[g]),L=uo(r,S,g);return L.map(x=>()=>x)}};const R1={kernelName:Vm,inputsToSave:["x","filter"],gradFunc:(r,l,h)=>{const[d,f]=l,{dilations:g,strides:S,pad:L,dataFormat:x}=h;return Z(ao(g),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${g}'`),{x:()=>wO(d.shape,r,f,S,L,x),filter:()=>eg(d,r,f.shape,S,L,x)}}};const O1={kernelName:Hm,inputsToSave:["dy","filter"],gradFunc:(r,l,h)=>{const[d,f]=l,{strides:g,pad:S,dataFormat:L,dimRoundingMode:x}=h;return{dy:()=>NI(r,f,g,S,L,1,x),filter:()=>eg(r,d,f.shape,g,S,L,x)}}};function TZ(r,l,h,d,f){let g=r;r.rank===4&&(g=re(r,[1,r.shape[0],r.shape[1],r.shape[2],r.shape[3]]));let S=l;S.rank===4&&(S=re(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]])),Z(g.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${g.shape}.`),Z(S.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${S.shape}.`),Z(h.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${h}.`),Z(g.shape[4]===h[3],()=>`Error in conv3dDerFilter: depth of input ${g.shape[4]}) must match input depth in filter (${h[3]}.`),Z(S.shape[4]===h[4],()=>`Error in conv3dDerFilter: depth of dy (${S.shape[4]}) must match output depth for filter (${h[4]}).`);const L=O=>{const C=1,$=ku(g.shape,h,d,C,f);return O.conv3dDerFilter(g,S,$)},x={x:g,y:S},A={strides:d,pad:f};return H.runKernelFunc(L,x,null,F2,A)}const E1=G({conv3DBackpropFilter_:TZ});const D1={kernelName:k2,inputsToSave:["x","filter"],gradFunc:(r,l,h)=>{const{dilations:d,strides:f,pad:g}=h;Z(ao(d),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${d}'`);const[S,L]=l;return{x:()=>LO(S.shape,r,L,f,g),filter:()=>E1(S,r,L.shape,f,g)}}};const k1={kernelName:Ym,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(yt(YI(Le(h,"float32"))),r)}}};const F1={kernelName:qm,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(qI(Le(h,"float32")),r)}}};const _1={kernelName:jm,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{axis:f,exclusive:g,reverse:S}=h;return{x:()=>{const L=fs([f],d.rank);let x=RI(r,f,g,!S);return L!=null&&(x=Wt(x,L)),x}}}};const W1={kernelName:$2,inputsToSave:["x","filter"],gradFunc:(r,l,h)=>{const{dilations:d,strides:f,pad:g,dimRoundingMode:S}=h,L=d==null?[1,1]:d;Z(ao(L),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${L}'`);const[x,A]=l;Z(x.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${x.rank}.`),Z(A.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${A.rank}.`),Z(x.shape[3]===A.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${x.shape[3]}) must match the inChannels dimension in filter ${A.shape[2]}.`),Z(co(f,L),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${f} and dilations '${L}'.`),S!=null&&Z(Qt(g),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${S} but got pad ${g}.`);const O=Lr(x.shape,A.shape,f,L,g,S,!0);return{x:()=>vO(x.shape,r,A,O),filter:()=>AO(x,r,A.shape,O)}}};const $1={kernelName:M2,inputsToSave:["x","filter"],gradFunc:(r,l,h)=>{const[d,f]=l,g={x:d,filter:f,dy:r},S={x:d,filter:f,dy:r};return{x:()=>H.runKernel(P2,g,h),filter:()=>H.runKernel(z2,S,h)}}};const U1={kernelName:Km,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{const L=ze(r,Le(d,"float32")),x=vt(h.shape,f);return x.length>0?re(Fe(L,x),h.shape):L},S=()=>{let L=ae(r,Le(h,"float32"));const x=vt(d.shape,f);x.length>0&&(L=re(Fe(L,x),d.shape));const A=ut(d);return yt(ze(L,Le(A,"float32")))};return{a:g,b:S}}};const B1={kernelName:G2,outputsToSave:[!0],gradFunc:(r,l)=>{const[h]=l,d=g=>g.eluDer(r,h),f={dy:r,y:h};return{x:()=>H.runKernelFunc(d,f,null,V2)}}};const M1={kernelName:H2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l,d=ae(Gn(yt(ut(h))),2/Math.sqrt(Math.PI));return{x:()=>ae(r,d)}}};const P1={kernelName:Xm,outputsToSave:[!0],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,h)}}};const z1={kernelName:q2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,Gn(h))}}};const G1={kernelName:Jm,gradFunc:r=>({x:()=>je(r)})};const V1={kernelName:Zm,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{const L=ze(r,Le(d,"float32")),x=vt(h.shape,f);return x.length>0?re(Fe(L,x),h.shape):L},S=()=>{let L=ae(r,Le(h,"float32"));const x=vt(d.shape,f);x.length>0&&(L=re(Fe(L,x),d.shape));const A=ut(d);return yt(ze(L,Le(A,"float32")))};return{a:g,b:S}}};const H1={kernelName:J2,inputsToSave:["x","mean","variance","scale"],gradFunc:(r,l,h)=>{const{varianceEpsilon:d}=h,[f,g,S,L]=l,x=L==null?ke(1):L,A=vt(g.shape,f.shape),O=[];if(g.rank===1){for(let xe=0;xeg.rank===1?re(ae(ae(r,aa(re(z,[1,1,1,g.shape[0]]),O)),x),f.shape):re(ae(ae(r,z),x),f.shape),se=()=>{let xe=ae(ae(z,ke(-1)),$);return g.rank===1&&(xe=Fe(xe,A)),re(xe,g.shape)},fe=()=>{let xe=ae(ae(ne,C),$);return g.rank===1&&(xe=Fe(xe,A)),re(xe,g.shape)},de=()=>{const xe=ae(C,z);let Me=ae(r,xe);return g.rank===1&&(Me=Fe(Me,A)),re(Me,g.shape)},Ae=()=>{let xe=r;return g.rank===1&&(xe=Fe(xe,A)),re(xe,g.shape)};return{x:te,mean:se,variance:fe,scale:de,offset:Ae}}};const j1={kernelName:Qm,inputsToSave:["x","indices"],gradFunc:(r,l,h)=>{const[d,f]=l,{axis:g}=h,S=ht(g,d.shape)[0],L=()=>{const x=d.shape,A=f.size,O=x.slice(0,S),C=O.length,$=x.slice(g,x.length).slice(1),z=$.length,ne=Y1(0,C),te=Y1(C+1,C+1+z),se=q1([O,[A],$]),fe=re(r,se),de=re(f,[A]),Ae=q1([[C],ne,te]),xe=Wt(fe,Ae);let Me=JI(xe,de,d.shape[S]);const Ke=Uc(Ae);return Me=Wt(Me,Ke),Me};return{x:L,indices:()=>f}}};function Y1(r,l){const h=[];for(let d=r;d{const[h,d]=l;return{a:()=>je(h),b:()=>je(d)}}};const X1={kernelName:tf,gradFunc:r=>({x:()=>Le(r,"float32")})};const J1={kernelName:tR,gradFunc:r=>({x:()=>je(r)})};const Z1={kernelName:nR,gradFunc:r=>({x:()=>je(r)})};const Q1={kernelName:sR,gradFunc:r=>({x:()=>je(r)})};const eE={kernelName:sf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,St(h,1))}}};const tE={kernelName:nf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,Le(h,"float32"))}}};const nE={kernelName:cR,inputsToSave:[],outputsToSave:[!0],gradFunc:(r,l,h)=>{const[d]=l,{axis:f}=h;return{logits:()=>{const g=!0,S=Gn(d);return Be(r,ae(Fe(r,f,g),S))}}}};function AZ(r,l,h,d=5,f=1,g=1,S=.5){const L=O=>O.LRNGrad(h,r,l,d,f,g,S),x={x:r,y:l,dy:h},A={depthRadius:d,bias:f,alpha:g,beta:S};return H.runKernelFunc(L,x,null,hR,A)}const sE=G({localResponseNormalizationBackprop_:AZ});const iE={kernelName:lR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,h)=>{const[d,f]=l,{depthRadius:g,bias:S,alpha:L,beta:x}=h;return{x:()=>sE(d,f,r,g,S,L,x)}}};function sg(r,l,h,d,f){return l.rank{const g=ae(r,Le(OI(h,l),r.dtype));return f==null?g:Wt(g,f)}}}const QI={kernelName:rf,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,h)=>{const d=h,{reductionIndices:f}=d,[g,S]=l,L=ht(f,g.shape),x=fs(L,g.rank),A=sg(r,S,g,L,x);return{x:()=>{let O=A.x();return x!=null&&(O=Wt(O)),O}}}};const rE={kernelName:of,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=()=>ae(r,Le(Sr(h,d),"float32")),g=()=>ae(r,Le(WI(h,d),"float32"));return{a:f,b:g}}};function vZ(r,l,h,d,f,g=[1,1,1],S,L){const x=M(r,"dy","maxPool3dBackprop"),A=M(l,"input","maxPool3dBackprop"),O=M(h,"output","maxPool3dBackprop");let C=x,$=A,z=O,ne=!1;A.rank===4&&(ne=!0,C=re(x,[1,x.shape[0],x.shape[1],x.shape[2],x.shape[3]]),$=re(A,[1,A.shape[0],A.shape[1],A.shape[2],A.shape[3]]),z=re(O,[1,O.shape[0],O.shape[1],O.shape[2],O.shape[3]])),Z(C.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${C.rank}.`),Z($.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${$.rank}.`),Z(z.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${z.rank}.`),Z(co(f,g),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${f} and dilations '${g}'`),L!=null&&Z(Qt(S),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${L} but got pad ${S}.`);const te=Ae=>{const xe=qf($.shape,d,f,g,S,L);return Ae.maxPool3dBackprop(C,$,z,xe)},se={dy:C,input:$,output:z},fe={filterSize:d,strides:f,dilations:g,pad:S,dimRoundingMode:L},de=H.runKernelFunc(te,se,null,mR,fe);return ne?re(de,[de.shape[1],de.shape[2],de.shape[3],de.shape[4]]):de}const oE=G({maxPool3dBackprop_:vZ});const aE={kernelName:pR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,h)=>{const[d,f]=l,{filterSize:g,strides:S,dilations:L,pad:x,dimRoundingMode:A}=h,O=L==null?[1,1,1]:L;return{x:()=>oE(r,d,f,g,S,O,x,A)}}};function NZ(r,l,h,d,f,g,S){const L=M(r,"dy","maxPoolBackprop"),x=M(l,"input","maxPoolBackprop"),A=M(h,"output","maxPoolBackprop");Z(x.rank===L.rank,()=>`Rank of input (${x.rank}) does not match rank of dy (${L.rank})`),Z(L.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${L.rank}.`),Z(x.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${x.rank}.`),S!=null&&Z(Qt(g),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${S} but got pad ${g}.`);const O=z=>{const ne=Yf(x.shape,d,f,1,g,S);return z.maxPoolBackprop(L,x,A,ne)},C={dy:L,input:x,output:A},$={filterSize:d,strides:f,pad:g,dimRoundingMode:S};return H.runKernelFunc(O,C,null,dR,$)}const cE=G({maxPoolBackprop_:NZ});const lE={kernelName:uR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,h)=>{const[d,f]=l,{filterSize:g,strides:S,pad:L}=h;return{x:()=>cE(r,d,f,g,S,L)}}};const hE={kernelName:af,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,h)=>{const d=h,{axis:f}=d,[g,S]=l,L=ht(f,g.shape),x=fs(L,g.rank),A=sg(r,S,g,L,x);return{x:()=>{let O=A.x();return x!=null&&(O=Wt(O)),O}}}};const uE={kernelName:cf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=()=>ae(r,Le(Ir(h,d),"float32")),g=()=>ae(r,Le(wi(h,d),"float32"));return{a:f,b:g}}};const dE={kernelName:fR,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{const L=vt(h.shape,f);return L.length>0?re(Fe(r,L),h.shape):r},S=()=>{const L=ae(r,yt(kI(ze(h,d)))),x=vt(d.shape,f);return x.length>0?re(Fe(L,x),d.shape):L};return{a:g,b:S}}};const pE={kernelName:lf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{const L=ae(r,Le(d,"float32")),x=vt(h.shape,f);return x.length>0?re(Fe(L,x),h.shape):L},S=()=>{const L=ae(r,Le(h,"float32")),x=vt(d.shape,f);return x.length>0?re(Fe(L,x),d.shape):L};return{a:g,b:S}}};const mE={kernelName:hf,gradFunc:r=>({x:()=>yt(r)})};const fE={kernelName:SR,inputsToSave:["indices"],gradFunc:(r,l)=>{const h=l[0];return{indices:()=>Fs(h.shape,"float32")}}};const gE={kernelName:LR,gradFunc:r=>({x:()=>je(r)})};const ex={kernelName:uf,inputsToSave:["x"],gradFunc:(r,l,h)=>{const d=l[0],{paddings:f}=h,g=f.map(S=>S[0]);return{x:()=>At(r,g,d.shape)}}};const yE={kernelName:df,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(r,l)=>{const[h,d,f]=l,g=h,S=d,L=rt(g.shape,S.shape),x=()=>{const O=Le(S,"float32");let C=ae(r,ae(O,ha(g,Be(O,ke(1)))));const $=vt(g.shape,L);return $.length>0&&(C=Fe(C,$)),re(C,g.shape)},A=()=>{const O=wi(g,0),C=zn(O,lo(g),je(g));let $=ae(r,ae(f,C));const z=vt(S.shape,L);return z.length>0&&($=Fe($,z)),re($,S.shape)};return{a:x,b:A}}};const bE={kernelName:IR,inputsToSave:["x","alpha"],gradFunc:(r,l)=>{const[h,d]=l,f=wi(h,0);return{x:()=>zn(f,r,ae(r,d)),alpha:()=>{let g=zn(f,je(r),ae(r,h));const S=vt(d.shape,r.shape);return S.length>0&&(g=Fe(g,S)),re(g,d.shape)}}}};const wE={kernelName:AR,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,yt(ut(h)))}}};const LE={kernelName:CR,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l,d=ae(Ir(h,6),ua(h));return{x:()=>ae(r,Le(d,"float32"))}}};const SE={kernelName:pf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,Le(ua(h),"float32"))}}};const IE={kernelName:mf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>re(r,h.shape)}}};const xE={kernelName:gf,inputsToSave:["images"],gradFunc:(r,l,h)=>{const[d]=l,f=L=>{const{alignCorners:x}=h;return L.resizeBilinearBackprop(r,d,x)},g={images:d},S=()=>H.runKernelFunc(f,g,null,NR,h);return{images:S}}};const TE={kernelName:ff,inputsToSave:["images"],gradFunc:(r,l,h)=>{const[d]=l,f=L=>{const{alignCorners:x}=h;return L.resizeNearestNeighborBackprop(r,d,x)},g={images:d},S=()=>H.runKernelFunc(f,g,null,vR,h);return{images:S}}};const AE={kernelName:yf,gradFunc:(r,l,h)=>{const{dims:d}=h,f=ht(d,r.shape);return{x:()=>zc(r,f)}}};const vE={kernelName:RR,gradFunc:r=>({x:()=>je(r)})};const NE={kernelName:bf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>yt(ze(r,ae(ha(h,1.5),2)))}}};const CE={kernelName:wf,inputsToSave:["condition"],gradFunc:(r,l)=>{const[h]=l;return{condition:()=>Le(je(h),"float32"),t:()=>ae(r,Le(h,r.dtype)),e:()=>ae(r,Le(BI(h),r.dtype))}}};const RE={kernelName:OR,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>{const d=wi(h,ke(0)),f=ke(i1),g=ke(r1),S=ae(r,g),L=ae(ae(r,f),Gn(Le(h,"float32")));return zn(d,S,L)}}}};const OE={kernelName:xf,outputsToSave:[!0],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,ae(h,Be(ke(1),h)))}}};const EE={kernelName:ER,gradFunc:r=>({x:()=>je(r)})};const DE={kernelName:Sf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(Fu(Le(h,"float32")),r)}}};const kE={kernelName:If,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(CI(Le(h,"float32")),r)}}};const FE={kernelName:Lf,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{begin:f,size:g}=h,S=d.shape,[L,x]=Vf(d,f,g),A=[];for(let O=0;OGI(r,A)}}};const _E={kernelName:kR,outputsToSave:[!0],gradFunc:(r,l,h)=>{const[d]=l,{dim:f}=h,g=!0,S=ae(r,d);return{logits:()=>Be(S,ae(Fe(S,[f],g),d))}}};const WE={kernelName:DR,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,AI(h))}}};const tx={kernelName:vf,gradFunc:(r,l,h)=>{const{blockShape:d,paddings:f}=h;return{x:()=>vI(r,d,f)}}};const nx={kernelName:Nf,gradFunc:(r,l,h)=>{const{axis:d}=h;return{x:()=>bn(r,d)}}};const $E={kernelName:Tf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,ae(gs(Le(h,"float32")),2))}}};const UE={kernelName:FR,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,ae(Le(h,"float32"),2))}}};const BE={kernelName:Cf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=ke(2),g=()=>ae(r,ae(f,Be(h,d))),S=()=>ae(r,ae(f,Be(d,h)));return{a:g,b:S}}};const ME={kernelName:_f,gradFunc:r=>({x:()=>je(r)})};const PE={kernelName:Rf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{let L=r;const x=vt(h.shape,f);return x.length>0&&(L=Fe(L,x)),re(L,h.shape)},S=()=>{let L=r;const x=vt(d.shape,f);return x.length>0&&(L=Fe(L,x)),re(yt(L),d.shape)};return{a:g,b:S}}};const zE={kernelName:Af,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,f=d.shape.slice(),{axis:g}=h,S=ht(g,d.shape);S.forEach(A=>{f[A]=1});const L=re(r,f),x=ae(L,Ki(d.shape,"float32"));return{x:()=>x}}};const GE={kernelName:_R,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,ut(Fu(h)))}}};const VE={kernelName:WR,outputsToSave:[!0],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(Be(ke(1),ut(h)),r)}}};const HE={kernelName:Of,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{reps:f}=h,g=()=>{let S=je(d);if(d.rank===1)for(let L=0;L{const d=h,{perm:f}=d,g=Uc(f);return{x:()=>Wt(r,g)}}};const qE={kernelName:Df,gradFunc:(r,l,h)=>{const d=h,{axis:f}=d;return{value:()=>js(r,f)}}};const jE={kernelName:kf,inputsToSave:["segmentIds"],gradFunc:(r,l)=>{const[h]=l,d=()=>CZ(r,h);return{x:d}}};function CZ(r,l){const h=_I(l,je(l)),d=FI(r,h);let f=Sr(l,ke(0,"int32"));const g=d.rank-f.rank;for(let L=0;L({x:()=>je(r)})};const RZ=[o1,a1,c1,l1,h1,u1,d1,p1,m1,f1,g1,y1,w1,S1,I1,x1,T1,A1,v1,N1,C1,O1,R1,D1,k1,F1,_1,W1,$1,U1,B1,M1,P1,z1,V1,G1,H1,j1,K1,X1,J1,Z1,Q1,eE,tE,nE,iE,QI,QI,rE,aE,lE,hE,uE,dE,pE,mE,fE,gE,ex,ex,yE,bE,wE,LE,SE,IE,xE,TE,AE,vE,NE,CE,RE,OE,EE,DE,kE,FE,_E,WE,tx,tx,nx,nx,$E,BE,UE,ME,PE,zE,GE,VE,HE,YE,qE,jE,KE];for(const r of RZ)BR(r);function sx(r,l,h=!1){const{Image:d,Canvas:f}=gt.getEnv();if(!(r instanceof d||r instanceof f))throw new Error("imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement");const g=sa(r),S=l/Math.max(g.height,g.width),L=S*g.width,x=S*g.height,A=Oc({width:l,height:l}),O=r instanceof f?r:Tu(r),C=Math.abs(L-x)/2,$=h&&L{if(yr(h)){this._imageTensors[d]=h,this._inputDimensions[d]=h.shape;return}if(Os(h)){const g=h.shape[0];if(g!==1)throw new Error(`NetInput - tf.Tensor4D with batchSize ${g} passed, but not supported in input array`);this._imageTensors[d]=h,this._inputDimensions[d]=h.shape.slice(1);return}const f=h instanceof gt.getEnv().Canvas?h:Tu(h);this._canvases[d]=f,this._inputDimensions[d]=[f.height,f.width,3]})}get imageTensors(){return this._imageTensors}get canvases(){return this._canvases}get isBatchInput(){return this.batchSize>1||this._treatAsBatchInput}get batchSize(){return this._batchSize}get inputDimensions(){return this._inputDimensions}get inputSize(){return this._inputSize}get reshapedInputDimensions(){return zi(this.batchSize,0,1).map((r,l)=>this.getReshapedInputDimensions(l))}getInput(r){return this.canvases[r]||this.imageTensors[r]}getInputDimensions(r){return this._inputDimensions[r]}getInputHeight(r){return this._inputDimensions[r][0]}getInputWidth(r){return this._inputDimensions[r][1]}getReshapedInputDimensions(r){if(typeof this.inputSize!="number")throw new Error("getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet");const l=this.getInputWidth(r),h=this.getInputHeight(r);return PS({width:l,height:h},this.inputSize)}toBatchTensor(r,l=!0){return this._inputSize=r,pO(()=>{const h=zi(this.batchSize,0,1).map(f=>{const g=this.getInput(f);if(g instanceof Tn){let S=Os(g)?g:g.expandDims();return S=HS(S,l),(S.shape[1]!==r||S.shape[2]!==r)&&(S=s1.resizeBilinear(S,[r,r])),S.as3D(r,r,3)}if(g instanceof gt.getEnv().Canvas)return II.fromPixels(sx(g,r,l));throw new Error(`toBatchTensor - at batchIdx ${f}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${g}`)}),d=js(h.map(f=>Le(f,"float32"))).as4D(this.batchSize,r,r,3);return d})}}async function Rt(r){if(r instanceof po)return r;let l=Array.isArray(r)?r:[r];if(!l.length)throw new Error("toNetInput - empty array passed as input");const h=f=>Array.isArray(r)?` at input index ${f}:`:"",d=l.map(na);return d.forEach((f,g)=>{if(!Um(f)&&!yr(f)&&!Os(f))throw typeof l[g]=="string"?new Error(`toNetInput -${h(g)} string passed, but could not resolve HTMLElement for element id ${l[g]}`):new Error(`toNetInput -${h(g)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`);if(Os(f)){const S=f.shape[0];if(S!==1)throw new Error(`toNetInput -${h(g)} tf.Tensor4D with batchSize ${S} passed, but not supported in input array`)}}),await Promise.all(d.map(f=>Um(f)&&QS(f))),new po(d,Array.isArray(r))}async function Vc(r,l){const{Canvas:h}=gt.getEnv();let d=r;if(!(r instanceof h)){const S=await Rt(r);if(S.batchSize>1)throw new Error("extractFaces - batchSize > 1 not supported");const L=S.getInput(0);d=L instanceof h?L:await tI(L)}const f=es(d),g=l.map(S=>S instanceof Yt?S.forSize(d.width,d.height).box.floor():S).map(S=>S.clipAtImageBorders(d.width,d.height));return g.map(({x:S,y:L,width:x,height:A})=>{const O=Oc({width:x,height:A});return es(O).putImageData(f.getImageData(S,L,x,A),0,0),O})}const ig=Ye(Je());async function Hc(r,l){if(!yr(r)&&!Os(r))throw new Error("extractFaceTensors - expected image tensor to be 3D or 4D");if(Os(r)&&r.shape[0]>1)throw new Error("extractFaceTensors - batchSize > 1 not supported");return ig.tidy(()=>{const[h,d,f]=r.shape.slice(Os(r)?1:0),g=l.map(L=>L instanceof Yt?L.forSize(d,h).box:L).map(L=>L.clipAtImageBorders(d,h)),S=g.map(({x:L,y:x,width:A,height:O})=>ig.slice3d(r.as3D(h,d,f),[x,L,0],[O,A,f]));return S})}async function pa(r,l){const h=gt.getEnv().fetch,d=await h(r,l);if(!(d.status<400))throw new Error(`failed to fetch: (${d.status}) ${d.statusText}, from url: ${d.url}`);return d}async function OZ(r){const l=await pa(r),h=await l.blob();if(!h.type.startsWith("image/"))throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${h.type}, for url: ${l.url}`);return eI(h)}async function ix(r){return(await pa(r)).json()}async function EZ(r){return new Float32Array(await(await pa(r)).arrayBuffer())}function rg(r,l){const h=`${l}-weights_manifest.json`;if(!r)return{modelBaseUri:"",manifestUri:h};if(r==="/")return{modelBaseUri:"/",manifestUri:`/${h}`};const d=r.startsWith("http://")?"http://":r.startsWith("https://")?"https://":"";r=r.replace(d,"");const f=r.split("/").filter(L=>L),g=r.endsWith(".json")?f[f.length-1]:h;let S=d+(r.endsWith(".json")?f.slice(0,f.length-1):f).join("/");return S=r.startsWith("/")?`/${S}`:S,{modelBaseUri:S,manifestUri:S==="/"?`/${g}`:`${S}/${g}`}}const XE=Ye(Je());async function rx(r,l){const{manifestUri:h,modelBaseUri:d}=rg(r,l);let f=await ix(h);return XE.io.loadWeights(f,d)}function DZ(r,l,h=!1){const{width:d,height:f}=h?sa(l):l;return r.width=d,r.height=f,{width:d,height:f}}const xr=Ye(Je());class kn{constructor(r){this._name=r;this._params=void 0;this._paramMappings=[]}get params(){return this._params}get paramMappings(){return this._paramMappings}get isLoaded(){return!!this.params}getParamFromPath(r){const{obj:l,objProp:h}=this.traversePropertyPath(r);return l[h]}reassignParamFromPath(r,l){const{obj:h,objProp:d}=this.traversePropertyPath(r);h[d].dispose(),h[d]=l}getParamList(){return this._paramMappings.map(({paramPath:r})=>({path:r,tensor:this.getParamFromPath(r)}))}getTrainableParams(){return this.getParamList().filter(r=>r.tensor instanceof xr.Variable)}getFrozenParams(){return this.getParamList().filter(r=>!(r.tensor instanceof xr.Variable))}variable(){this.getFrozenParams().forEach(({path:r,tensor:l})=>{this.reassignParamFromPath(r,l.variable())})}freeze(){this.getTrainableParams().forEach(({path:r,tensor:l})=>{const h=xr.tensor(l.dataSync());l.dispose(),this.reassignParamFromPath(r,h)})}dispose(r=!0){this.getParamList().forEach(l=>{if(r&&l.tensor.isDisposed)throw new Error(`param tensor has already been disposed for path ${l.path}`);l.tensor.dispose()}),this._params=void 0}serializeParams(){return new Float32Array(this.getParamList().map(({tensor:r})=>Array.from(r.dataSync())).reduce((r,l)=>r.concat(l)))}async load(r){if(r instanceof Float32Array){this.extractWeights(r);return}await this.loadFromUri(r)}async loadFromUri(r){if(r&&typeof r!="string")throw new Error(`${this._name}.loadFromUri - expected model uri`);const l=await rx(r,this.getDefaultModelName());this.loadFromWeightMap(l)}async loadFromDisk(r){if(r&&typeof r!="string")throw new Error(`${this._name}.loadFromDisk - expected model file path`);const{readFile:l}=gt.getEnv(),{manifestUri:h,modelBaseUri:d}=rg(r,this.getDefaultModelName()),f=x=>Promise.all(x.map(A=>l(A).then(O=>O.buffer))),g=xr.io.weightsLoaderFactory(f),S=JSON.parse((await l(h)).toString()),L=await g(S,d);this.loadFromWeightMap(L)}loadFromWeightMap(r){const{paramMappings:l,params:h}=this.extractParamsFromWeigthMap(r);this._paramMappings=l,this._params=h}extractWeights(r){const{paramMappings:l,params:h}=this.extractParams(r);this._paramMappings=l,this._params=h}traversePropertyPath(r){if(!this.params)throw new Error("traversePropertyPath - model has no loaded params");const l=r.split("/").reduce((f,g)=>{if(!f.nextObj.hasOwnProperty(g))throw new Error(`traversePropertyPath - object does not have property ${g}, for path ${r}`);return{obj:f.nextObj,objProp:g,nextObj:f.nextObj[g]}},{nextObj:this.params}),{obj:h,objProp:d}=l;if(!h||!d||!(h[d]instanceof xr.Tensor))throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${r}`);return{obj:h,objProp:d}}}const Yc=Ye(Je());function ns(r,l,h){return Yc.tidy(()=>{let d=Yc.separableConv2d(r,l.depthwise_filter,l.pointwise_filter,h,"same");return d=Yc.add(d,l.bias),d})}const Dt=Ye(Je());function og(r,l,h=!1){return Dt.tidy(()=>{const d=Dt.relu(h?Dt.add(Dt.conv2d(r,l.conv0.filters,[2,2],"same"),l.conv0.bias):ns(r,l.conv0,[2,2])),f=ns(d,l.conv1,[1,1]),g=Dt.relu(Dt.add(d,f)),S=ns(g,l.conv2,[1,1]);return Dt.relu(Dt.add(d,Dt.add(f,S)))})}function Mu(r,l,h=!1,d=!0){return Dt.tidy(()=>{const f=Dt.relu(h?Dt.add(Dt.conv2d(r,l.conv0.filters,d?[2,2]:[1,1],"same"),l.conv0.bias):ns(r,l.conv0,d?[2,2]:[1,1])),g=ns(f,l.conv1,[1,1]),S=Dt.relu(Dt.add(f,g)),L=ns(S,l.conv2,[1,1]),x=Dt.relu(Dt.add(f,Dt.add(g,L))),A=ns(x,l.conv3,[1,1]);return Dt.relu(Dt.add(f,Dt.add(g,Dt.add(L,A))))})}const mo=Ye(Je());function ma(r,l,h="same",d=!1){return mo.tidy(()=>{const f=mo.add(mo.conv2d(r,l.filters,[1,1],h),l.bias);return d?mo.relu(f):f})}function Vn(r,l){Object.keys(r).forEach(h=>{l.some(d=>d.originalPath===h)||r[h].dispose()})}const ag=Ye(Je());function qc(r,l){return function(h,d,f,g){const S=ag.tensor4d(r(h*d*f*f),[f,f,h,d]),L=ag.tensor1d(r(d));return l.push({paramPath:`${g}/filters`},{paramPath:`${g}/bias`}),{filters:S,bias:L}}}const cg=Ye(Je());function lg(r,l){return function(h,d,f){const g=cg.tensor2d(r(h*d),[h,d]),S=cg.tensor1d(r(d));return l.push({paramPath:`${f}/weights`},{paramPath:`${f}/bias`}),{weights:g,bias:S}}}class ox{constructor(r,l,h){this.depthwise_filter=r;this.pointwise_filter=l;this.bias=h}}const Pu=Ye(Je());function jc(r,l){return function(h,d,f){const g=Pu.tensor4d(r(3*3*h),[3,3,h,1]),S=Pu.tensor4d(r(h*d),[1,1,h,d]),L=Pu.tensor1d(r(d));return l.push({paramPath:`${f}/depthwise_filter`},{paramPath:`${f}/pointwise_filter`},{paramPath:`${f}/bias`}),new ox(g,S,L)}}function Kc(r){return function(l){const h=r(`${l}/depthwise_filter`,4),d=r(`${l}/pointwise_filter`,4),f=r(`${l}/bias`,1);return new ox(h,d,f)}}function ys(r,l){return function(h,d,f){const g=r[h];if(!Jo(g,d))throw new Error(`expected weightMap[${h}] to be a Tensor${d}D, instead have ${g}`);return l.push({originalPath:h,paramPath:f||h}),g}}function Hn(r){let l=r;function h(f){const g=l.slice(0,f);return l=l.slice(f),g}function d(){return l}return{extractWeights:h,getRemainingWeights:d}}function hg(r,l){const h=qc(r,l),d=jc(r,l);function f(S,L,x,A=!1){const O=A?h(S,L,3,`${x}/conv0`):d(S,L,`${x}/conv0`),C=d(L,L,`${x}/conv1`),$=d(L,L,`${x}/conv2`);return{conv0:O,conv1:C,conv2:$}}function g(S,L,x,A=!1){const{conv0:O,conv1:C,conv2:$}=f(S,L,x,A),z=d(L,L,`${x}/conv3`);return{conv0:O,conv1:C,conv2:$,conv3:z}}return{extractDenseBlock3Params:f,extractDenseBlock4Params:g}}function JE(r){const l=[],{extractWeights:h,getRemainingWeights:d}=Hn(r),{extractDenseBlock4Params:f}=hg(h,l),g=f(3,32,"dense0",!0),S=f(32,64,"dense1"),L=f(64,128,"dense2"),x=f(128,256,"dense3");if(d().length!==0)throw new Error(`weights remaing after extract: ${d().length}`);return{paramMappings:l,params:{dense0:g,dense1:S,dense2:L,dense3:x}}}function ug(r){return function(l){const h=r(`${l}/filters`,4),d=r(`${l}/bias`,1);return{filters:h,bias:d}}}function dg(r,l){const h=ys(r,l),d=ug(h),f=Kc(h);function g(L,x=!1){const A=x?d(`${L}/conv0`):f(`${L}/conv0`),O=f(`${L}/conv1`),C=f(`${L}/conv2`);return{conv0:A,conv1:O,conv2:C}}function S(L,x=!1){const A=x?d(`${L}/conv0`):f(`${L}/conv0`),O=f(`${L}/conv1`),C=f(`${L}/conv2`),$=f(`${L}/conv3`);return{conv0:A,conv1:O,conv2:C,conv3:$}}return{extractDenseBlock3Params:g,extractDenseBlock4Params:S}}function ZE(r){const l=[],{extractDenseBlock4Params:h}=dg(r,l),d={dense0:h("dense0",!0),dense1:h("dense1"),dense2:h("dense2"),dense3:h("dense3")};return Vn(r,l),{params:d,paramMappings:l}}const Xc=Ye(Je());class pg extends kn{constructor(){super("FaceFeatureExtractor")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("FaceFeatureExtractor - load model before inference");return Xc.tidy(()=>{const h=r.toBatchTensor(112,!0),d=[122.782,117.001,104.298],f=yi(h,d).div(Xc.scalar(255));let g=Mu(f,l.dense0,!0);return g=Mu(g,l.dense1),g=Mu(g,l.dense2),g=Mu(g,l.dense3),g=Xc.avgPool(g,[7,7],[2,2],"valid"),g})}async forward(r){return this.forwardInput(await Rt(r))}getDefaultModelName(){return"face_feature_extractor_model"}extractParamsFromWeigthMap(r){return ZE(r)}extractParams(r){return JE(r)}}const Jc=Ye(Je());function zu(r,l){return Jc.tidy(()=>Jc.add(Jc.matMul(r,l.weights),l.bias))}function QE(r,l,h){const d=[],{extractWeights:f,getRemainingWeights:g}=Hn(r),S=lg(f,d),L=S(l,h,"fc");if(g().length!==0)throw new Error(`weights remaing after extract: ${g().length}`);return{paramMappings:d,params:{fc:L}}}function eD(r){const l=[],h=ys(r,l);function d(g){const S=h(`${g}/weights`,2),L=h(`${g}/bias`,1);return{weights:S,bias:L}}const f={fc:d("fc")};return Vn(r,l),{params:f,paramMappings:l}}function mg(r){const l={},h={};return Object.keys(r).forEach(d=>{const f=d.startsWith("fc")?h:l;f[d]=r[d]}),{featureExtractorMap:l,classifierMap:h}}const tD=Ye(Je());class fg extends kn{constructor(r,l){super(r);this._faceFeatureExtractor=l}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(r){const{params:l}=this;if(!l)throw new Error(`${this._name} - load model before inference`);return tD.tidy(()=>{const h=r instanceof po?this.faceFeatureExtractor.forwardInput(r):r;return zu(h.as2D(h.shape[0],-1),l.fc)})}dispose(r=!0){this.faceFeatureExtractor.dispose(r),super.dispose(r)}loadClassifierParams(r){const{params:l,paramMappings:h}=this.extractClassifierParams(r);this._params=l,this._paramMappings=h}extractClassifierParams(r){return QE(r,this.getClassifierChannelsIn(),this.getClassifierChannelsOut())}extractParamsFromWeigthMap(r){const{featureExtractorMap:l,classifierMap:h}=mg(r);return this.faceFeatureExtractor.loadFromWeightMap(l),eD(h)}extractParams(r){const l=this.getClassifierChannelsIn(),h=this.getClassifierChannelsOut(),d=h*l+h,f=r.slice(0,r.length-d),g=r.slice(r.length-d);return this.faceFeatureExtractor.extractWeights(f),this.extractClassifierParams(g)}}const ax=["neutral","happy","sad","angry","fearful","disgusted","surprised"];class fa{constructor(r){if(r.length!==7)throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${r.length}`);ax.forEach((l,h)=>{this[l]=r[h]})}asSortedArray(){return ax.map(r=>({expression:r,probability:this[r]})).sort((r,l)=>l.probability-r.probability)}}const Zc=Ye(Je());class cx extends fg{constructor(r=new pg){super("FaceExpressionNet",r)}forwardInput(r){return Zc.tidy(()=>Zc.softmax(this.runNet(r)))}async forward(r){return this.forwardInput(await Rt(r))}async predictExpressions(r){const l=await Rt(r),h=await this.forwardInput(l),d=await Promise.all(Zc.unstack(h).map(async g=>{const S=await g.data();return g.dispose(),S}));h.dispose();const f=d.map(g=>new fa(g));return l.isBatchInput?f:f[0]}getDefaultModelName(){return"face_expression_model"}getClassifierChannelsIn(){return 256}getClassifierChannelsOut(){return 7}}function lx(r){return r.expressions instanceof fa}function gg(r,l){const h={expressions:l};return Object.assign({},r,h)}function kZ(r,l,h=.1,d){const f=Array.isArray(l)?l:[l];f.forEach(g=>{const S=g instanceof fa?g:lx(g)?g.expressions:void 0;if(!S)throw new Error("drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof");const L=S.asSortedArray(),x=L.filter(C=>C.probability>h),A=Vi(g)?g.detection.box.bottomLeft:d||new Ze(0,0),O=new Rc(x.map(C=>`${C.expression} (${Zo(C.probability)})`),A);O.draw(r)})}function ga(r){return Vi(r)&&r.landmarks instanceof qs&&r.unshiftedLandmarks instanceof qs&&r.alignedRect instanceof Yt}function Qc(r,l){const{box:h}=r.detection,d=l.shiftBy(h.x,h.y),f=d.align(),{imageDims:g}=r.detection,S=new Yt(r.detection.score,f.rescale(g.reverse()),g),L={landmarks:d,unshiftedLandmarks:l,alignedRect:S};return Object.assign({},r,L)}class nD{constructor(r={}){const{drawLines:l=!0,drawPoints:h=!0,lineWidth:d,lineColor:f,pointSize:g,pointColor:S}=r;this.drawLines=l,this.drawPoints=h,this.lineWidth=d||1,this.pointSize=g||2,this.lineColor=f||"rgba(0, 255, 255, 1)",this.pointColor=S||"rgba(255, 0, 255, 1)"}}class sD{constructor(r,l={}){this.faceLandmarks=r,this.options=new nD(l)}draw(r){const l=es(r),{drawLines:h,drawPoints:d,lineWidth:f,lineColor:g,pointSize:S,pointColor:L}=this.options;if(h&&this.faceLandmarks instanceof Iu&&(l.strokeStyle=g,l.lineWidth=f,gr(l,this.faceLandmarks.getJawOutline()),gr(l,this.faceLandmarks.getLeftEyeBrow()),gr(l,this.faceLandmarks.getRightEyeBrow()),gr(l,this.faceLandmarks.getNose()),gr(l,this.faceLandmarks.getLeftEye(),!0),gr(l,this.faceLandmarks.getRightEye(),!0),gr(l,this.faceLandmarks.getMouth(),!0)),d){l.strokeStyle=L,l.fillStyle=L;const x=A=>{l.beginPath(),l.arc(A.x,A.y,S,0,2*Math.PI),l.fill()};this.faceLandmarks.positions.forEach(x)}}}function FZ(r,l){const h=Array.isArray(l)?l:[l];h.forEach(d=>{const f=d instanceof qs?d:ga(d)?d.landmarks:void 0;if(!f)throw new Error("drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks> or array thereof");new sD(f).draw(r)})}const hx={};Tc(hx,{AnchorPosition:()=>Hi,DrawBox:()=>ZS,DrawBoxOptions:()=>l2,DrawFaceLandmarks:()=>sD,DrawFaceLandmarksOptions:()=>nD,DrawTextField:()=>Rc,DrawTextFieldOptions:()=>Wm,drawContour:()=>gr,drawDetections:()=>GX,drawFaceExpressions:()=>kZ,drawFaceLandmarks:()=>FZ});function _Z(r,l){const h=qc(r,l),d=jc(r,l);function f(S,L,x){const A=d(S,L,`${x}/separable_conv0`),O=d(L,L,`${x}/separable_conv1`),C=h(S,L,1,`${x}/expansion_conv`);return{separable_conv0:A,separable_conv1:O,expansion_conv:C}}function g(S,L){const x=d(S,S,`${L}/separable_conv0`),A=d(S,S,`${L}/separable_conv1`),O=d(S,S,`${L}/separable_conv2`);return{separable_conv0:x,separable_conv1:A,separable_conv2:O}}return{extractConvParams:h,extractSeparableConvParams:d,extractReductionBlockParams:f,extractMainBlockParams:g}}function iD(r,l){const h=[],{extractWeights:d,getRemainingWeights:f}=Hn(r),{extractConvParams:g,extractSeparableConvParams:S,extractReductionBlockParams:L,extractMainBlockParams:x}=_Z(d,h),A=g(3,32,3,"entry_flow/conv_in"),O=L(32,64,"entry_flow/reduction_block_0"),C=L(64,128,"entry_flow/reduction_block_1"),$={conv_in:A,reduction_block_0:O,reduction_block_1:C},z={};zi(l,0,1).forEach(fe=>{z[`main_block_${fe}`]=x(128,`middle_flow/main_block_${fe}`)});const ne=L(128,256,"exit_flow/reduction_block"),te=S(256,512,"exit_flow/separable_conv"),se={reduction_block:ne,separable_conv:te};if(f().length!==0)throw new Error(`weights remaing after extract: ${f().length}`);return{paramMappings:h,params:{entry_flow:$,middle_flow:z,exit_flow:se}}}function WZ(r,l){const h=ys(r,l),d=ug(h),f=Kc(h);function g(L){const x=f(`${L}/separable_conv0`),A=f(`${L}/separable_conv1`),O=d(`${L}/expansion_conv`);return{separable_conv0:x,separable_conv1:A,expansion_conv:O}}function S(L){const x=f(`${L}/separable_conv0`),A=f(`${L}/separable_conv1`),O=f(`${L}/separable_conv2`);return{separable_conv0:x,separable_conv1:A,separable_conv2:O}}return{extractConvParams:d,extractSeparableConvParams:f,extractReductionBlockParams:g,extractMainBlockParams:S}}function rD(r,l){const h=[],{extractConvParams:d,extractSeparableConvParams:f,extractReductionBlockParams:g,extractMainBlockParams:S}=WZ(r,h),L=d("entry_flow/conv_in"),x=g("entry_flow/reduction_block_0"),A=g("entry_flow/reduction_block_1"),O={conv_in:L,reduction_block_0:x,reduction_block_1:A},C={};zi(l,0,1).forEach(te=>{C[`main_block_${te}`]=S(`middle_flow/main_block_${te}`)});const $=g("exit_flow/reduction_block"),z=f("exit_flow/separable_conv"),ne={reduction_block:$,separable_conv:z};return Vn(r,h),{params:{entry_flow:O,middle_flow:C,exit_flow:ne},paramMappings:h}}const pn=Ye(Je());function oD(r,l,h){return pn.add(pn.conv2d(r,l.filters,h,"same"),l.bias)}function ux(r,l,h=!0){let d=h?pn.relu(r):r;return d=ns(d,l.separable_conv0,[1,1]),d=ns(pn.relu(d),l.separable_conv1,[1,1]),d=pn.maxPool(d,[3,3],[2,2],"same"),d=pn.add(d,oD(r,l.expansion_conv,[2,2])),d}function $Z(r,l){let h=ns(pn.relu(r),l.separable_conv0,[1,1]);return h=ns(pn.relu(h),l.separable_conv1,[1,1]),h=ns(pn.relu(h),l.separable_conv2,[1,1]),h=pn.add(h,r),h}class aD extends kn{constructor(r){super("TinyXception");this._numMainBlocks=r}forwardInput(r){const{params:l}=this;if(!l)throw new Error("TinyXception - load model before inference");return pn.tidy(()=>{const h=r.toBatchTensor(112,!0),d=[122.782,117.001,104.298],f=yi(h,d).div(pn.scalar(256));let g=pn.relu(oD(f,l.entry_flow.conv_in,[2,2]));return g=ux(g,l.entry_flow.reduction_block_0,!1),g=ux(g,l.entry_flow.reduction_block_1),zi(this._numMainBlocks,0,1).forEach(S=>{g=$Z(g,l.middle_flow[`main_block_${S}`])}),g=ux(g,l.exit_flow.reduction_block),g=pn.relu(ns(g,l.exit_flow.separable_conv,[1,1])),g})}async forward(r){return this.forwardInput(await Rt(r))}getDefaultModelName(){return"tiny_xception_model"}extractParamsFromWeigthMap(r){return rD(r,this._numMainBlocks)}extractParams(r){return iD(r,this._numMainBlocks)}}function cD(r){const l=[],{extractWeights:h,getRemainingWeights:d}=Hn(r),f=lg(h,l),g=f(512,1,"fc/age"),S=f(512,2,"fc/gender");if(d().length!==0)throw new Error(`weights remaing after extract: ${d().length}`);return{paramMappings:l,params:{fc:{age:g,gender:S}}}}function lD(r){const l=[],h=ys(r,l);function d(g){const S=h(`${g}/weights`,2),L=h(`${g}/bias`,1);return{weights:S,bias:L}}const f={fc:{age:d("fc/age"),gender:d("fc/gender")}};return Vn(r,l),{params:f,paramMappings:l}}var Tr;(function(r){r.FEMALE="female",r.MALE="male"})(Tr||(Tr={}));const Xi=Ye(Je());class dx extends kn{constructor(r=new aD(2)){super("AgeGenderNet");this._faceFeatureExtractor=r}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(r){const{params:l}=this;if(!l)throw new Error(`${this._name} - load model before inference`);return Xi.tidy(()=>{const h=r instanceof po?this.faceFeatureExtractor.forwardInput(r):r,d=Xi.avgPool(h,[7,7],[2,2],"valid").as2D(h.shape[0],-1),f=zu(d,l.fc.age).as1D(),g=zu(d,l.fc.gender);return{age:f,gender:g}})}forwardInput(r){return Xi.tidy(()=>{const{age:l,gender:h}=this.runNet(r);return{age:l,gender:Xi.softmax(h)}})}async forward(r){return this.forwardInput(await Rt(r))}async predictAgeAndGender(r){const l=await Rt(r),h=await this.forwardInput(l),d=Xi.unstack(h.age),f=Xi.unstack(h.gender),g=d.map((L,x)=>({ageTensor:L,genderTensor:f[x]})),S=await Promise.all(g.map(async({ageTensor:L,genderTensor:x})=>{const A=(await L.data())[0],O=(await x.data())[0],C=O>.5,$=C?Tr.MALE:Tr.FEMALE,z=C?O:1-O;return L.dispose(),x.dispose(),{age:A,gender:$,genderProbability:z}}));return h.age.dispose(),h.gender.dispose(),l.isBatchInput?S:S[0]}getDefaultModelName(){return"age_gender_model"}dispose(r=!0){this.faceFeatureExtractor.dispose(r),super.dispose(r)}loadClassifierParams(r){const{params:l,paramMappings:h}=this.extractClassifierParams(r);this._params=l,this._paramMappings=h}extractClassifierParams(r){return cD(r)}extractParamsFromWeigthMap(r){const{featureExtractorMap:l,classifierMap:h}=mg(r);return this.faceFeatureExtractor.loadFromWeightMap(l),lD(h)}extractParams(r){const l=512*1+1+(512*2+2),h=r.slice(0,r.length-l),d=r.slice(r.length-l);return this.faceFeatureExtractor.extractWeights(h),this.extractClassifierParams(d)}}const bs=Ye(Je());class yg extends fg{postProcess(r,l,h){const d=h.map(({width:g,height:S})=>{const L=l/Math.max(S,g);return{width:g*L,height:S*L}}),f=d.length;return bs.tidy(()=>{const g=(O,C)=>bs.stack([bs.fill([68],O),bs.fill([68],C)],1).as2D(1,136).as1D(),S=(O,C)=>{const{width:$,height:z}=d[O];return C($,z)?Math.abs($-z)/2:0},L=O=>S(O,(C,$)=>C<$),x=O=>S(O,(C,$)=>$g(L(C),x(C))))).div(bs.stack(Array.from(Array(f),(O,C)=>g(d[C].width,d[C].height))));return A})}forwardInput(r){return bs.tidy(()=>{const l=this.runNet(r);return this.postProcess(l,r.inputSize,r.inputDimensions.map(([h,d])=>({height:h,width:d})))})}async forward(r){return this.forwardInput(await Rt(r))}async detectLandmarks(r){const l=await Rt(r),h=bs.tidy(()=>bs.unstack(this.forwardInput(l))),d=await Promise.all(h.map(async(f,g)=>{const S=Array.from(await f.data()),L=S.filter((A,O)=>Em(O)),x=S.filter((A,O)=>!Em(O));return new Iu(Array(68).fill(0).map((A,O)=>new Ze(L[O],x[O])),{height:l.getInputHeight(g),width:l.getInputWidth(g)})}));return h.forEach(f=>f.dispose()),l.isBatchInput?d:d[0]}getClassifierChannelsOut(){return 136}}class Gu extends yg{constructor(r=new pg){super("FaceLandmark68Net",r)}getDefaultModelName(){return"face_landmark_68_model"}getClassifierChannelsIn(){return 256}}function hD(r){const l=[],{extractDenseBlock3Params:h}=dg(r,l),d={dense0:h("dense0",!0),dense1:h("dense1"),dense2:h("dense2")};return Vn(r,l),{params:d,paramMappings:l}}function uD(r){const l=[],{extractWeights:h,getRemainingWeights:d}=Hn(r),{extractDenseBlock3Params:f}=hg(h,l),g=f(3,32,"dense0",!0),S=f(32,64,"dense1"),L=f(64,128,"dense2");if(d().length!==0)throw new Error(`weights remaing after extract: ${d().length}`);return{paramMappings:l,params:{dense0:g,dense1:S,dense2:L}}}const el=Ye(Je());class dD extends kn{constructor(){super("TinyFaceFeatureExtractor")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("TinyFaceFeatureExtractor - load model before inference");return el.tidy(()=>{const h=r.toBatchTensor(112,!0),d=[122.782,117.001,104.298],f=yi(h,d).div(el.scalar(255));let g=og(f,l.dense0,!0);return g=og(g,l.dense1),g=og(g,l.dense2),g=el.avgPool(g,[14,14],[2,2],"valid"),g})}async forward(r){return this.forwardInput(await Rt(r))}getDefaultModelName(){return"face_feature_extractor_tiny_model"}extractParamsFromWeigthMap(r){return hD(r)}extractParams(r){return uD(r)}}class px extends yg{constructor(r=new dD){super("FaceLandmark68TinyNet",r)}getDefaultModelName(){return"face_landmark_68_tiny_model"}getClassifierChannelsIn(){return 128}}class UZ extends Gu{}const bg=Ye(Je());function pD(r,l){return bg.add(bg.mul(r,l.weights),l.biases)}const tl=Ye(Je());function mx(r,l,h,d,f="same"){const{filters:g,bias:S}=l.conv;let L=tl.conv2d(r,g,h,f);return L=tl.add(L,S),L=pD(L,l.scale),d?tl.relu(L):L}function mD(r,l){return mx(r,l,[1,1],!0)}function fx(r,l){return mx(r,l,[1,1],!1)}function wg(r,l){return mx(r,l,[2,2],!0,"valid")}const ws=Ye(Je());function BZ(r,l){function h(L,x,A){const O=r(L),C=O.length/(x*A*A);if(MS(C))throw new Error(`depth has to be an integer: ${C}, weights.length: ${O.length}, numFilters: ${x}, filterSize: ${A}`);return ws.tidy(()=>ws.transpose(ws.tensor4d(O,[x,C,A,A]),[2,3,1,0]))}function d(L,x,A,O){const C=h(L,x,A),$=ws.tensor1d(r(x));return l.push({paramPath:`${O}/filters`},{paramPath:`${O}/bias`}),{filters:C,bias:$}}function f(L,x){const A=ws.tensor1d(r(L)),O=ws.tensor1d(r(L));return l.push({paramPath:`${x}/weights`},{paramPath:`${x}/biases`}),{weights:A,biases:O}}function g(L,x,A,O){const C=d(L,x,A,`${O}/conv`),$=f(x,`${O}/scale`);return{conv:C,scale:$}}function S(L,x,A,O,C=!1){const $=g((C?.5:1)*L,x,A,`${O}/conv1`),z=g(L,x,A,`${O}/conv2`);return{conv1:$,conv2:z}}return{extractConvLayerParams:g,extractResidualLayerParams:S}}function fD(r){const{extractWeights:l,getRemainingWeights:h}=Hn(r),d=[],{extractConvLayerParams:f,extractResidualLayerParams:g}=BZ(l,d),S=f(4704,32,7,"conv32_down"),L=g(9216,32,3,"conv32_1"),x=g(9216,32,3,"conv32_2"),A=g(9216,32,3,"conv32_3"),O=g(36864,64,3,"conv64_down",!0),C=g(36864,64,3,"conv64_1"),$=g(36864,64,3,"conv64_2"),z=g(36864,64,3,"conv64_3"),ne=g(147456,128,3,"conv128_down",!0),te=g(147456,128,3,"conv128_1"),se=g(147456,128,3,"conv128_2"),fe=g(589824,256,3,"conv256_down",!0),de=g(589824,256,3,"conv256_1"),Ae=g(589824,256,3,"conv256_2"),xe=g(589824,256,3,"conv256_down_out"),Me=ws.tidy(()=>ws.transpose(ws.tensor2d(l(256*128),[128,256]),[1,0]));if(d.push({paramPath:"fc"}),h().length!==0)throw new Error(`weights remaing after extract: ${h().length}`);const Ke={conv32_down:S,conv32_1:L,conv32_2:x,conv32_3:A,conv64_down:O,conv64_1:C,conv64_2:$,conv64_3:z,conv128_down:ne,conv128_1:te,conv128_2:se,conv256_down:fe,conv256_1:de,conv256_2:Ae,conv256_down_out:xe,fc:Me};return{params:Ke,paramMappings:d}}function MZ(r,l){const h=ys(r,l);function d(S){const L=h(`${S}/scale/weights`,1),x=h(`${S}/scale/biases`,1);return{weights:L,biases:x}}function f(S){const L=h(`${S}/conv/filters`,4),x=h(`${S}/conv/bias`,1),A=d(S);return{conv:{filters:L,bias:x},scale:A}}function g(S){return{conv1:f(`${S}/conv1`),conv2:f(`${S}/conv2`)}}return{extractConvLayerParams:f,extractResidualLayerParams:g}}function gD(r){const l=[],{extractConvLayerParams:h,extractResidualLayerParams:d}=MZ(r,l),f=h("conv32_down"),g=d("conv32_1"),S=d("conv32_2"),L=d("conv32_3"),x=d("conv64_down"),A=d("conv64_1"),O=d("conv64_2"),C=d("conv64_3"),$=d("conv128_down"),z=d("conv128_1"),ne=d("conv128_2"),te=d("conv256_down"),se=d("conv256_1"),fe=d("conv256_2"),de=d("conv256_down_out"),Ae=r.fc;if(l.push({originalPath:"fc",paramPath:"fc"}),!BS(Ae))throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${Ae}`);const xe={conv32_down:f,conv32_1:g,conv32_2:S,conv32_3:L,conv64_down:x,conv64_1:A,conv64_2:O,conv64_3:C,conv128_down:$,conv128_1:z,conv128_2:ne,conv256_down:te,conv256_1:se,conv256_2:fe,conv256_down_out:de,fc:Ae};return Vn(r,l),{params:xe,paramMappings:l}}const Yn=Ye(Je());function Li(r,l){let h=mD(r,l.conv1);return h=fx(h,l.conv2),h=Yn.add(h,r),h=Yn.relu(h),h}function Vu(r,l){let h=wg(r,l.conv1);h=fx(h,l.conv2);let d=Yn.avgPool(r,2,2,"valid");const f=Yn.zeros(d.shape),g=d.shape[3]!==h.shape[3],S=d.shape[1]!==h.shape[1]||d.shape[2]!==h.shape[2];if(S){const L=[...h.shape];L[1]=1;const x=Yn.zeros(L);h=Yn.concat([h,x],1);const A=[...h.shape];A[2]=1;const O=Yn.zeros(A);h=Yn.concat([h,O],2)}return d=g?Yn.concat([d,f],3):d,h=Yn.add(d,h),h=Yn.relu(h),h}const _s=Ye(Je());class Hu extends kn{constructor(){super("FaceRecognitionNet")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("FaceRecognitionNet - load model before inference");return _s.tidy(()=>{const h=_s.cast(r.toBatchTensor(150,!0),"float32"),d=[122.782,117.001,104.298],f=yi(h,d).div(_s.scalar(256));let g=wg(f,l.conv32_down);g=_s.maxPool(g,3,2,"valid"),g=Li(g,l.conv32_1),g=Li(g,l.conv32_2),g=Li(g,l.conv32_3),g=Vu(g,l.conv64_down),g=Li(g,l.conv64_1),g=Li(g,l.conv64_2),g=Li(g,l.conv64_3),g=Vu(g,l.conv128_down),g=Li(g,l.conv128_1),g=Li(g,l.conv128_2),g=Vu(g,l.conv256_down),g=Li(g,l.conv256_1),g=Li(g,l.conv256_2),g=Vu(g,l.conv256_down_out);const S=g.mean([1,2]),L=_s.matMul(S,l.fc);return L})}async forward(r){return this.forwardInput(await Rt(r))}async computeFaceDescriptor(r){const l=await Rt(r),h=_s.tidy(()=>_s.unstack(this.forwardInput(l))),d=await Promise.all(h.map(f=>f.data()));return h.forEach(f=>f.dispose()),l.isBatchInput?d:d[0]}getDefaultModelName(){return"face_recognition_model"}extractParamsFromWeigthMap(r){return gD(r)}extractParams(r){return fD(r)}}function PZ(r){const l=new Hu;return l.extractWeights(r),l}function Lg(r,l){const h={descriptor:l};return Object.assign({},r,h)}function zZ(r){return typeof r.age=="number"}function Sg(r,l){const h={age:l};return Object.assign({},r,h)}function GZ(r){return(r.gender===Tr.MALE||r.gender===Tr.FEMALE)&&Nc(r.genderProbability)}function Ig(r,l,h){const d={gender:l,genderProbability:h};return Object.assign({},r,d)}const Si=Ye(Je());function VZ(r,l){function h(x,A){const O=Si.tensor4d(r(3*3*x),[3,3,x,1]),C=Si.tensor1d(r(x)),$=Si.tensor1d(r(x)),z=Si.tensor1d(r(x)),ne=Si.tensor1d(r(x));return l.push({paramPath:`${A}/filters`},{paramPath:`${A}/batch_norm_scale`},{paramPath:`${A}/batch_norm_offset`},{paramPath:`${A}/batch_norm_mean`},{paramPath:`${A}/batch_norm_variance`}),{filters:O,batch_norm_scale:C,batch_norm_offset:$,batch_norm_mean:z,batch_norm_variance:ne}}function d(x,A,O,C,$){const z=Si.tensor4d(r(x*A*O*O),[O,O,x,A]),ne=Si.tensor1d(r(A));return l.push({paramPath:`${C}/filters`},{paramPath:`${C}/${$?"batch_norm_offset":"bias"}`}),{filters:z,bias:ne}}function f(x,A,O,C){const{filters:$,bias:z}=d(x,A,O,C,!0);return{filters:$,batch_norm_offset:z}}function g(x,A,O){const C=h(x,`${O}/depthwise_conv`),$=f(x,A,1,`${O}/pointwise_conv`);return{depthwise_conv:C,pointwise_conv:$}}function S(){const x=f(3,32,3,"mobilenetv1/conv_0"),A=g(32,64,"mobilenetv1/conv_1"),O=g(64,128,"mobilenetv1/conv_2"),C=g(128,128,"mobilenetv1/conv_3"),$=g(128,256,"mobilenetv1/conv_4"),z=g(256,256,"mobilenetv1/conv_5"),ne=g(256,512,"mobilenetv1/conv_6"),te=g(512,512,"mobilenetv1/conv_7"),se=g(512,512,"mobilenetv1/conv_8"),fe=g(512,512,"mobilenetv1/conv_9"),de=g(512,512,"mobilenetv1/conv_10"),Ae=g(512,512,"mobilenetv1/conv_11"),xe=g(512,1024,"mobilenetv1/conv_12"),Me=g(1024,1024,"mobilenetv1/conv_13");return{conv_0:x,conv_1:A,conv_2:O,conv_3:C,conv_4:$,conv_5:z,conv_6:ne,conv_7:te,conv_8:se,conv_9:fe,conv_10:de,conv_11:Ae,conv_12:xe,conv_13:Me}}function L(){const x=f(1024,256,1,"prediction_layer/conv_0"),A=f(256,512,3,"prediction_layer/conv_1"),O=f(512,128,1,"prediction_layer/conv_2"),C=f(128,256,3,"prediction_layer/conv_3"),$=f(256,128,1,"prediction_layer/conv_4"),z=f(128,256,3,"prediction_layer/conv_5"),ne=f(256,64,1,"prediction_layer/conv_6"),te=f(64,128,3,"prediction_layer/conv_7"),se=d(512,12,1,"prediction_layer/box_predictor_0/box_encoding_predictor"),fe=d(512,9,1,"prediction_layer/box_predictor_0/class_predictor"),de=d(1024,24,1,"prediction_layer/box_predictor_1/box_encoding_predictor"),Ae=d(1024,18,1,"prediction_layer/box_predictor_1/class_predictor"),xe=d(512,24,1,"prediction_layer/box_predictor_2/box_encoding_predictor"),Me=d(512,18,1,"prediction_layer/box_predictor_2/class_predictor"),Ke=d(256,24,1,"prediction_layer/box_predictor_3/box_encoding_predictor"),wt=d(256,18,1,"prediction_layer/box_predictor_3/class_predictor"),$t=d(256,24,1,"prediction_layer/box_predictor_4/box_encoding_predictor"),Kt=d(256,18,1,"prediction_layer/box_predictor_4/class_predictor"),Fn=d(128,24,1,"prediction_layer/box_predictor_5/box_encoding_predictor"),vn=d(128,18,1,"prediction_layer/box_predictor_5/class_predictor"),Nn={box_encoding_predictor:se,class_predictor:fe},Qs={box_encoding_predictor:de,class_predictor:Ae},Ai={box_encoding_predictor:xe,class_predictor:Me},ei={box_encoding_predictor:Ke,class_predictor:wt},Sa={box_encoding_predictor:$t,class_predictor:Kt},hl={box_encoding_predictor:Fn,class_predictor:vn};return{conv_0:x,conv_1:A,conv_2:O,conv_3:C,conv_4:$,conv_5:z,conv_6:ne,conv_7:te,box_predictor_0:Nn,box_predictor_1:Qs,box_predictor_2:Ai,box_predictor_3:ei,box_predictor_4:Sa,box_predictor_5:hl}}return{extractMobilenetV1Params:S,extractPredictionLayerParams:L}}function yD(r){const l=[],{extractWeights:h,getRemainingWeights:d}=Hn(r),{extractMobilenetV1Params:f,extractPredictionLayerParams:g}=VZ(h,l),S=f(),L=g(),x=Si.tensor3d(h(5118*4),[1,5118,4]),A={extra_dim:x};if(l.push({paramPath:"output_layer/extra_dim"}),d().length!==0)throw new Error(`weights remaing after extract: ${d().length}`);return{params:{mobilenetv1:S,prediction_layer:L,output_layer:A},paramMappings:l}}function HZ(r,l){const h=ys(r,l);function d(A,O,C){const $=h(`${A}/Conv2d_${O}_pointwise/weights`,4,`${C}/filters`),z=h(`${A}/Conv2d_${O}_pointwise/convolution_bn_offset`,1,`${C}/batch_norm_offset`);return{filters:$,batch_norm_offset:z}}function f(A){const O=`mobilenetv1/conv_${A}`,C=`MobilenetV1/Conv2d_${A}_depthwise`,$=`${O}/depthwise_conv`,z=`${O}/pointwise_conv`,ne=h(`${C}/depthwise_weights`,4,`${$}/filters`),te=h(`${C}/BatchNorm/gamma`,1,`${$}/batch_norm_scale`),se=h(`${C}/BatchNorm/beta`,1,`${$}/batch_norm_offset`),fe=h(`${C}/BatchNorm/moving_mean`,1,`${$}/batch_norm_mean`),de=h(`${C}/BatchNorm/moving_variance`,1,`${$}/batch_norm_variance`);return{depthwise_conv:{filters:ne,batch_norm_scale:te,batch_norm_offset:se,batch_norm_mean:fe,batch_norm_variance:de},pointwise_conv:d("MobilenetV1",A,z)}}function g(){return{conv_0:d("MobilenetV1",0,"mobilenetv1/conv_0"),conv_1:f(1),conv_2:f(2),conv_3:f(3),conv_4:f(4),conv_5:f(5),conv_6:f(6),conv_7:f(7),conv_8:f(8),conv_9:f(9),conv_10:f(10),conv_11:f(11),conv_12:f(12),conv_13:f(13)}}function S(A,O){const C=h(`${A}/weights`,4,`${O}/filters`),$=h(`${A}/biases`,1,`${O}/bias`);return{filters:C,bias:$}}function L(A){const O=S(`Prediction/BoxPredictor_${A}/BoxEncodingPredictor`,`prediction_layer/box_predictor_${A}/box_encoding_predictor`),C=S(`Prediction/BoxPredictor_${A}/ClassPredictor`,`prediction_layer/box_predictor_${A}/class_predictor`);return{box_encoding_predictor:O,class_predictor:C}}function x(){return{conv_0:d("Prediction",0,"prediction_layer/conv_0"),conv_1:d("Prediction",1,"prediction_layer/conv_1"),conv_2:d("Prediction",2,"prediction_layer/conv_2"),conv_3:d("Prediction",3,"prediction_layer/conv_3"),conv_4:d("Prediction",4,"prediction_layer/conv_4"),conv_5:d("Prediction",5,"prediction_layer/conv_5"),conv_6:d("Prediction",6,"prediction_layer/conv_6"),conv_7:d("Prediction",7,"prediction_layer/conv_7"),box_predictor_0:L(0),box_predictor_1:L(1),box_predictor_2:L(2),box_predictor_3:L(3),box_predictor_4:L(4),box_predictor_5:L(5)}}return{extractMobilenetV1Params:g,extractPredictionLayerParams:x}}function bD(r){const l=[],{extractMobilenetV1Params:h,extractPredictionLayerParams:d}=HZ(r,l),f=r["Output/extra_dim"];if(l.push({originalPath:"Output/extra_dim",paramPath:"output_layer/extra_dim"}),!yr(f))throw new Error(`expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have ${f}`);const g={mobilenetv1:h(),prediction_layer:d(),output_layer:{extra_dim:f}};return Vn(r,l),{params:g,paramMappings:l}}const fo=Ye(Je());function Xs(r,l,h){return fo.tidy(()=>{let d=fo.conv2d(r,l.filters,h,"same");return d=fo.add(d,l.batch_norm_offset),fo.clipByValue(d,0,6)})}const Ar=Ye(Je()),YZ=.0010000000474974513;function qZ(r,l,h){return Ar.tidy(()=>{let d=Ar.depthwiseConv2d(r,l.filters,h,"same");return d=Ar.batchNorm(d,l.batch_norm_mean,l.batch_norm_variance,l.batch_norm_offset,l.batch_norm_scale,YZ),Ar.clipByValue(d,0,6)})}function jZ(r){return[2,4,6,12].some(l=>l===r)?[2,2]:[1,1]}function wD(r,l){return Ar.tidy(()=>{let h,d=Xs(r,l.conv_0,[2,2]);const f=[l.conv_1,l.conv_2,l.conv_3,l.conv_4,l.conv_5,l.conv_6,l.conv_7,l.conv_8,l.conv_9,l.conv_10,l.conv_11,l.conv_12,l.conv_13];if(f.forEach((g,S)=>{const L=S+1,x=jZ(L);d=qZ(d,g.depthwise_conv,x),d=Xs(d,g.pointwise_conv,[1,1]),L===11&&(h=d)}),h===null)throw new Error("mobileNetV1 - output of conv layer 11 is null");return{out:d,conv11:h}})}function LD(r,l,h,d,f){const g=r.shape[0],S=Math.min(h,g),L=l.map((O,C)=>({score:O,boxIndex:C})).filter(O=>O.score>f).sort((O,C)=>C.score-O.score),x=O=>O<=d?1:0,A=[];return L.forEach(O=>{if(A.length>=S)return;const C=O.score;for(let $=A.length-1;$>=0;--$){const z=KZ(r,O.boxIndex,A[$]);if(z===0)continue;if(O.score*=x(z),O.score<=f)break}C===O.score&&A.push(O.boxIndex)}),A}function KZ(r,l,h){const d=r.arraySync(),f=Math.min(d[l][0],d[l][2]),g=Math.min(d[l][1],d[l][3]),S=Math.max(d[l][0],d[l][2]),L=Math.max(d[l][1],d[l][3]),x=Math.min(d[h][0],d[h][2]),A=Math.min(d[h][1],d[h][3]),O=Math.max(d[h][0],d[h][2]),C=Math.max(d[h][1],d[h][3]),$=(S-f)*(L-g),z=(O-x)*(C-A);if($<=0||z<=0)return 0;const ne=Math.max(f,x),te=Math.max(g,A),se=Math.min(S,O),fe=Math.min(L,C),de=Math.max(se-ne,0)*Math.max(fe-te,0);return de/($+z-de)}const De=Ye(Je());function XZ(r){const l=De.unstack(De.transpose(r,[1,0])),h=[De.sub(l[2],l[0]),De.sub(l[3],l[1])],d=[De.add(l[0],De.div(h[0],De.scalar(2))),De.add(l[1],De.div(h[1],De.scalar(2)))];return{sizes:h,centers:d}}function JZ(r,l){const{sizes:h,centers:d}=XZ(r),f=De.unstack(De.transpose(l,[1,0])),g=De.div(De.mul(De.exp(De.div(f[2],De.scalar(5))),h[0]),De.scalar(2)),S=De.add(De.mul(De.div(f[0],De.scalar(10)),h[0]),d[0]),L=De.div(De.mul(De.exp(De.div(f[3],De.scalar(5))),h[1]),De.scalar(2)),x=De.add(De.mul(De.div(f[1],De.scalar(10)),h[1]),d[1]);return De.transpose(De.stack([De.sub(S,g),De.sub(x,L),De.add(S,g),De.add(x,L)]),[1,0])}function SD(r,l,h){return De.tidy(()=>{const d=r.shape[0];let f=JZ(De.reshape(De.tile(h.extra_dim,[d,1,1]),[-1,4]),De.reshape(r,[-1,4]));f=De.reshape(f,[d,f.shape[0]/d,4]);const g=De.sigmoid(De.slice(l,[0,0,1],[-1,-1,-1]));let S=De.slice(g,[0,0,0],[-1,-1,1]);S=De.reshape(S,[d,S.shape[1]]);const L=De.unstack(f),x=De.unstack(S);return{boxes:L,scores:x}})}const Yu=Ye(Je());function ya(r,l){return Yu.tidy(()=>{const h=r.shape[0],d=Yu.reshape(ma(r,l.box_encoding_predictor),[h,-1,1,4]),f=Yu.reshape(ma(r,l.class_predictor),[h,-1,3]);return{boxPredictionEncoding:d,classPrediction:f}})}const qu=Ye(Je());function ID(r,l,h){return qu.tidy(()=>{const d=Xs(r,h.conv_0,[1,1]),f=Xs(d,h.conv_1,[2,2]),g=Xs(f,h.conv_2,[1,1]),S=Xs(g,h.conv_3,[2,2]),L=Xs(S,h.conv_4,[1,1]),x=Xs(L,h.conv_5,[2,2]),A=Xs(x,h.conv_6,[1,1]),O=Xs(A,h.conv_7,[2,2]),C=ya(l,h.box_predictor_0),$=ya(r,h.box_predictor_1),z=ya(f,h.box_predictor_2),ne=ya(S,h.box_predictor_3),te=ya(x,h.box_predictor_4),se=ya(O,h.box_predictor_5),fe=qu.concat([C.boxPredictionEncoding,$.boxPredictionEncoding,z.boxPredictionEncoding,ne.boxPredictionEncoding,te.boxPredictionEncoding,se.boxPredictionEncoding],1),de=qu.concat([C.classPrediction,$.classPrediction,z.classPrediction,ne.classPrediction,te.classPrediction,se.classPrediction],1);return{boxPredictions:fe,classPredictions:de}})}class Ii{constructor({minConfidence:r,maxResults:l}={}){this._name="SsdMobilenetv1Options";if(this._minConfidence=r||.5,this._maxResults=l||100,typeof this._minConfidence!="number"||this._minConfidence<=0||this._minConfidence>=1)throw new Error(`${this._name} - expected minConfidence to be a number between 0 and 1`);if(typeof this._maxResults!="number")throw new Error(`${this._name} - expected maxResults to be a number`)}get minConfidence(){return this._minConfidence}get maxResults(){return this._maxResults}}const xi=Ye(Je());class nl extends kn{constructor(){super("SsdMobilenetv1")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("SsdMobilenetv1 - load model before inference");return xi.tidy(()=>{const h=xi.cast(r.toBatchTensor(512,!1),"float32"),d=xi.sub(xi.mul(h,xi.scalar(.007843137718737125)),xi.scalar(1)),f=wD(d,l.mobilenetv1),{boxPredictions:g,classPredictions:S}=ID(f.out,f.conv11,l.prediction_layer);return SD(g,S,l.output_layer)})}async forward(r){return this.forwardInput(await Rt(r))}async locateFaces(r,l={}){const{maxResults:h,minConfidence:d}=new Ii(l),f=await Rt(r),{boxes:g,scores:S}=this.forwardInput(f),L=g[0],x=S[0];for(let de=1;de{const[Ae,xe]=[Math.max(0,se[de][0]),Math.min(1,se[de][2])].map(wt=>wt*te),[Me,Ke]=[Math.max(0,se[de][1]),Math.min(1,se[de][3])].map(wt=>wt*ne);return new Yt(A[de],new Su(Me,Ae,Ke-Me,xe-Ae),{height:f.getInputHeight(0),width:f.getInputWidth(0)})});return L.dispose(),x.dispose(),fe}getDefaultModelName(){return"ssd_mobilenetv1_model"}extractParamsFromWeigthMap(r){return bD(r)}extractParams(r){return yD(r)}}function xD(r){const l=new nl;return l.extractWeights(r),l}function ZZ(r){return xD(r)}class QZ extends nl{}const TD=.4,AD=[new Ze(.738768,.874946),new Ze(2.42204,2.65704),new Ze(4.30971,7.04493),new Ze(10.246,4.59428),new Ze(12.6868,11.8741)],vD=[new Ze(1.603231,2.094468),new Ze(6.041143,7.080126),new Ze(2.882459,3.518061),new Ze(4.266906,5.178857),new Ze(9.041765,10.66308)],ND=[117.001,114.697,97.404],CD="tiny_yolov2_model",RD="tiny_yolov2_separable_conv_model";const xg=r=>typeof r=="number";function gx(r){if(!r)throw new Error(`invalid config: ${r}`);if(typeof r.withSeparableConvs!="boolean")throw new Error(`config.withSeparableConvs has to be a boolean, have: ${r.withSeparableConvs}`);if(!xg(r.iouThreshold)||r.iouThreshold<0||r.iouThreshold>1)throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${r.iouThreshold}`);if(!Array.isArray(r.classes)||!r.classes.length||!r.classes.every(l=>typeof l=="string"))throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(r.classes)}`);if(!Array.isArray(r.anchors)||!r.anchors.length||!r.anchors.map(l=>l||{}).every(l=>xg(l.x)&&xg(l.y)))throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(r.anchors)}`);if(r.meanRgb&&(!Array.isArray(r.meanRgb)||r.meanRgb.length!==3||!r.meanRgb.every(xg)))throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(r.meanRgb)}`)}const Js=Ye(Je());function sl(r){return Js.tidy(()=>{const l=Js.mul(r,Js.scalar(.10000000149011612));return Js.add(Js.relu(Js.sub(r,l)),l)})}const Zs=Ye(Je());function vr(r,l){return Zs.tidy(()=>{let h=Zs.pad(r,[[0,0],[1,1],[1,1],[0,0]]);return h=Zs.conv2d(h,l.conv.filters,[1,1],"valid"),h=Zs.sub(h,l.bn.sub),h=Zs.mul(h,l.bn.truediv),h=Zs.add(h,l.conv.bias),sl(h)})}const go=Ye(Je());function Nr(r,l){return go.tidy(()=>{let h=go.pad(r,[[0,0],[1,1],[1,1],[0,0]]);return h=go.separableConv2d(h,l.depthwise_filter,l.pointwise_filter,[1,1],"valid"),h=go.add(h,l.bias),sl(h)})}const yx=Ye(Je());function eQ(r,l){const h=qc(r,l);function d(S,L){const x=yx.tensor1d(r(S)),A=yx.tensor1d(r(S));return l.push({paramPath:`${L}/sub`},{paramPath:`${L}/truediv`}),{sub:x,truediv:A}}function f(S,L,x){const A=h(S,L,3,`${x}/conv`),O=d(L,`${x}/bn`);return{conv:A,bn:O}}const g=jc(r,l);return{extractConvParams:h,extractConvWithBatchNormParams:f,extractSeparableConvParams:g}}function OD(r,l,h,d){const{extractWeights:f,getRemainingWeights:g}=Hn(r),S=[],{extractConvParams:L,extractConvWithBatchNormParams:x,extractSeparableConvParams:A}=eQ(f,S);let O;if(l.withSeparableConvs){const[C,$,z,ne,te,se,fe,de,Ae]=d,xe=l.isFirstLayerConv2d?L(C,$,3,"conv0"):A(C,$,"conv0"),Me=A($,z,"conv1"),Ke=A(z,ne,"conv2"),wt=A(ne,te,"conv3"),$t=A(te,se,"conv4"),Kt=A(se,fe,"conv5"),Fn=de?A(fe,de,"conv6"):void 0,vn=Ae?A(de,Ae,"conv7"):void 0,Nn=L(Ae||de||fe,5*h,1,"conv8");O={conv0:xe,conv1:Me,conv2:Ke,conv3:wt,conv4:$t,conv5:Kt,conv6:Fn,conv7:vn,conv8:Nn}}else{const[C,$,z,ne,te,se,fe,de,Ae]=d,xe=x(C,$,"conv0"),Me=x($,z,"conv1"),Ke=x(z,ne,"conv2"),wt=x(ne,te,"conv3"),$t=x(te,se,"conv4"),Kt=x(se,fe,"conv5"),Fn=x(fe,de,"conv6"),vn=x(de,Ae,"conv7"),Nn=L(Ae,5*h,1,"conv8");O={conv0:xe,conv1:Me,conv2:Ke,conv3:wt,conv4:$t,conv5:Kt,conv6:Fn,conv7:vn,conv8:Nn}}if(g().length!==0)throw new Error(`weights remaing after extract: ${g().length}`);return{params:O,paramMappings:S}}function tQ(r,l){const h=ys(r,l);function d(L){const x=h(`${L}/sub`,1),A=h(`${L}/truediv`,1);return{sub:x,truediv:A}}function f(L){const x=h(`${L}/filters`,4),A=h(`${L}/bias`,1);return{filters:x,bias:A}}function g(L){const x=f(`${L}/conv`),A=d(`${L}/bn`);return{conv:x,bn:A}}const S=Kc(h);return{extractConvParams:f,extractConvWithBatchNormParams:g,extractSeparableConvParams:S}}function ED(r,l){const h=[],{extractConvParams:d,extractConvWithBatchNormParams:f,extractSeparableConvParams:g}=tQ(r,h);let S;if(l.withSeparableConvs){const L=l.filterSizes&&l.filterSizes.length||9;S={conv0:l.isFirstLayerConv2d?d("conv0"):g("conv0"),conv1:g("conv1"),conv2:g("conv2"),conv3:g("conv3"),conv4:g("conv4"),conv5:g("conv5"),conv6:L>7?g("conv6"):void 0,conv7:L>8?g("conv7"):void 0,conv8:d("conv8")}}else S={conv0:f("conv0"),conv1:f("conv1"),conv2:f("conv2"),conv3:f("conv3"),conv4:f("conv4"),conv5:f("conv5"),conv6:f("conv6"),conv7:f("conv7"),conv8:d("conv8")};return Vn(r,h),{params:S,paramMappings:h}}var bx;(function(r){r[r.XS=224]="XS",r[r.SM=320]="SM",r[r.MD=416]="MD",r[r.LG=608]="LG"})(bx||(bx={}));class Cr{constructor({inputSize:r,scoreThreshold:l}={}){this._name="TinyYolov2Options";if(this._inputSize=r||416,this._scoreThreshold=l||.5,typeof this._inputSize!="number"||this._inputSize%32!==0)throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`);if(typeof this._scoreThreshold!="number"||this._scoreThreshold<=0||this._scoreThreshold>=1)throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`)}get inputSize(){return this._inputSize}get scoreThreshold(){return this._scoreThreshold}}const kt=Ye(Je());class il extends kn{constructor(r){super("TinyYolov2");gx(r),this._config=r}get config(){return this._config}get withClassScores(){return this.config.withClassScores||this.config.classes.length>1}get boxEncodingSize(){return 5+(this.withClassScores?this.config.classes.length:0)}runTinyYolov2(r,l){let h=vr(r,l.conv0);return h=kt.maxPool(h,[2,2],[2,2],"same"),h=vr(h,l.conv1),h=kt.maxPool(h,[2,2],[2,2],"same"),h=vr(h,l.conv2),h=kt.maxPool(h,[2,2],[2,2],"same"),h=vr(h,l.conv3),h=kt.maxPool(h,[2,2],[2,2],"same"),h=vr(h,l.conv4),h=kt.maxPool(h,[2,2],[2,2],"same"),h=vr(h,l.conv5),h=kt.maxPool(h,[2,2],[1,1],"same"),h=vr(h,l.conv6),h=vr(h,l.conv7),ma(h,l.conv8,"valid",!1)}runMobilenet(r,l){let h=this.config.isFirstLayerConv2d?sl(ma(r,l.conv0,"valid",!1)):Nr(r,l.conv0);return h=kt.maxPool(h,[2,2],[2,2],"same"),h=Nr(h,l.conv1),h=kt.maxPool(h,[2,2],[2,2],"same"),h=Nr(h,l.conv2),h=kt.maxPool(h,[2,2],[2,2],"same"),h=Nr(h,l.conv3),h=kt.maxPool(h,[2,2],[2,2],"same"),h=Nr(h,l.conv4),h=kt.maxPool(h,[2,2],[2,2],"same"),h=Nr(h,l.conv5),h=kt.maxPool(h,[2,2],[1,1],"same"),h=l.conv6?Nr(h,l.conv6):h,h=l.conv7?Nr(h,l.conv7):h,ma(h,l.conv8,"valid",!1)}forwardInput(r,l){const{params:h}=this;if(!h)throw new Error("TinyYolov2 - load model before inference");return kt.tidy(()=>{let d=kt.cast(r.toBatchTensor(l,!1),"float32");return d=this.config.meanRgb?yi(d,this.config.meanRgb):d,d=d.div(kt.scalar(256)),this.config.withSeparableConvs?this.runMobilenet(d,h):this.runTinyYolov2(d,h)})}async forward(r,l){return await this.forwardInput(await Rt(r),l)}async detect(r,l={}){const{inputSize:h,scoreThreshold:d}=new Cr(l),f=await Rt(r),g=await this.forwardInput(f,h),S=kt.tidy(()=>kt.unstack(g)[0].expandDims()),L={width:f.getInputWidth(0),height:f.getInputHeight(0)},x=await this.extractBoxes(S,f.getReshapedInputDimensions(0),d);g.dispose(),S.dispose();const A=x.map(te=>te.box),O=x.map(te=>te.score),C=x.map(te=>te.classScore),$=x.map(te=>this.config.classes[te.label]),z=VS(A.map(te=>te.rescale(h)),O,this.config.iouThreshold,!0),ne=z.map(te=>new Cc(O[te],C[te],$[te],A[te],L));return ne}getDefaultModelName(){return""}extractParamsFromWeigthMap(r){return ED(r,this.config)}extractParams(r){const l=this.config.filterSizes||il.DEFAULT_FILTER_SIZES,h=l?l.length:void 0;if(h!==7&&h!==8&&h!==9)throw new Error(`TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found ${h} filterSizes in config`);return OD(r,this.config,this.boxEncodingSize,l)}async extractBoxes(r,l,h){const{width:d,height:f}=l,g=Math.max(d,f),S=g/d,L=g/f,x=r.shape[1],A=this.config.anchors.length,[O,C,$]=kt.tidy(()=>{const se=r.reshape([x,x,A,this.boxEncodingSize]),fe=se.slice([0,0,0,0],[x,x,A,4]),de=se.slice([0,0,0,4],[x,x,A,1]),Ae=this.withClassScores?kt.softmax(se.slice([0,0,0,5],[x,x,A,this.config.classes.length]),3):kt.scalar(0);return[fe,de,Ae]}),z=[],ne=await C.array(),te=await O.array();for(let se=0;seh){const xe=(fe+Lu(te[se][fe][de][0]))/x*S,Me=(se+Lu(te[se][fe][de][1]))/x*L,Ke=Math.exp(te[se][fe][de][2])*this.config.anchors[de].x/x*S,wt=Math.exp(te[se][fe][de][3])*this.config.anchors[de].y/x*L,$t=xe-Ke/2,Kt=Me-wt/2,Fn={row:se,col:fe,anchor:de},{classScore:vn,label:Nn}=this.withClassScores?await this.extractPredictedClass($,Fn):{classScore:1,label:0};z.push({box:new wu($t,Kt,$t+Ke,Kt+wt),score:Ae,classScore:Ae*vn,label:Nn,...Fn})}}return O.dispose(),C.dispose(),$.dispose(),z}async extractPredictedClass(r,l){const{row:h,col:d,anchor:f}=l,g=await r.array();return Array(this.config.classes.length).fill(0).map((S,L)=>g[h][d][f][L]).map((S,L)=>({classScore:S,label:L})).reduce((S,L)=>S.classScore>L.classScore?S:L)}}il.DEFAULT_FILTER_SIZES=[3,16,32,64,128,256,512,1024,1024];class ju extends il{constructor(r=!0){const l=Object.assign({},{withSeparableConvs:r,iouThreshold:TD,classes:["face"]},r?{anchors:vD,meanRgb:ND}:{anchors:AD,withClassScores:!0});super(l)}get withSeparableConvs(){return this.config.withSeparableConvs}get anchors(){return this.config.anchors}async locateFaces(r,l){const h=await this.detect(r,l);return h.map(d=>new Yt(d.score,d.relativeBox,{width:d.imageWidth,height:d.imageHeight}))}getDefaultModelName(){return this.withSeparableConvs?RD:CD}extractParamsFromWeigthMap(r){return super.extractParamsFromWeigthMap(r)}}function nQ(r,l=!0){const h=new ju(l);return h.extractWeights(r),h}class wx extends Cr{constructor(){super(...arguments);this._name="TinyFaceDetectorOptions"}}class Ti{async then(r){return r(await this.run())}async run(){throw new Error("ComposableTask - run is not implemented")}}const Lx=Ye(Je());async function ba(r,l,h,d,f=({alignedRect:g})=>g){const g=r.map(x=>ga(x)?f(x):x.detection),S=d||(l instanceof Lx.Tensor?await Hc(l,g):await Vc(l,g)),L=await h(S);return S.forEach(x=>x instanceof Lx.Tensor&&x.dispose()),L}async function rl(r,l,h,d,f){return ba([r],l,async g=>h(g[0]),d,f)}const DD=.4,kD=[new Ze(1.603231,2.094468),new Ze(6.041143,7.080126),new Ze(2.882459,3.518061),new Ze(4.266906,5.178857),new Ze(9.041765,10.66308)],FD=[117.001,114.697,97.404];class Ku extends il{constructor(){const r={withSeparableConvs:!0,iouThreshold:DD,classes:["face"],anchors:kD,meanRgb:FD,isFirstLayerConv2d:!0,filterSizes:[3,16,32,64,128,256,512]};super(r)}get anchors(){return this.config.anchors}async locateFaces(r,l){const h=await this.detect(r,l);return h.map(d=>new Yt(d.score,d.relativeBox,{width:d.imageWidth,height:d.imageHeight}))}getDefaultModelName(){return"tiny_face_detector_model"}extractParamsFromWeigthMap(r){return super.extractParamsFromWeigthMap(r)}}const pt={ssdMobilenetv1:new nl,tinyFaceDetector:new Ku,tinyYolov2:new ju,faceLandmark68Net:new Gu,faceLandmark68TinyNet:new px,faceRecognitionNet:new Hu,faceExpressionNet:new cx,ageGenderNet:new dx},_D=(r,l)=>pt.ssdMobilenetv1.locateFaces(r,l),sQ=(r,l)=>pt.tinyFaceDetector.locateFaces(r,l),iQ=(r,l)=>pt.tinyYolov2.locateFaces(r,l),WD=r=>pt.faceLandmark68Net.detectLandmarks(r),rQ=r=>pt.faceLandmark68TinyNet.detectLandmarks(r),oQ=r=>pt.faceRecognitionNet.computeFaceDescriptor(r),aQ=r=>pt.faceExpressionNet.predictExpressions(r),cQ=r=>pt.ageGenderNet.predictAgeAndGender(r),$D=r=>pt.ssdMobilenetv1.load(r),lQ=r=>pt.tinyFaceDetector.load(r),hQ=r=>pt.tinyYolov2.load(r),uQ=r=>pt.faceLandmark68Net.load(r),dQ=r=>pt.faceLandmark68TinyNet.load(r),pQ=r=>pt.faceRecognitionNet.load(r),mQ=r=>pt.faceExpressionNet.load(r),fQ=r=>pt.ageGenderNet.load(r),gQ=$D,yQ=_D,bQ=WD;class UD extends Ti{constructor(r,l,h){super();this.parentTask=r;this.input=l;this.extractedFaces=h}}class Zu extends UD{async run(){const r=await this.parentTask,l=await ba(r,this.input,async h=>await Promise.all(h.map(d=>pt.faceExpressionNet.predictExpressions(d))),this.extractedFaces);return r.map((h,d)=>gg(h,l[d]))}withAgeAndGender(){return new Xu(this,this.input)}}class Qu extends UD{async run(){const r=await this.parentTask;if(!r)return;const l=await rl(r,this.input,h=>pt.faceExpressionNet.predictExpressions(h),this.extractedFaces);return gg(r,l)}withAgeAndGender(){return new Ju(this,this.input)}}class cl extends Zu{withAgeAndGender(){return new ol(this,this.input)}withFaceDescriptors(){return new wa(this,this.input)}}class ll extends Qu{withAgeAndGender(){return new al(this,this.input)}withFaceDescriptor(){return new La(this,this.input)}}class BD extends Ti{constructor(r,l,h){super();this.parentTask=r;this.input=l;this.extractedFaces=h}}class Xu extends BD{async run(){const r=await this.parentTask,l=await ba(r,this.input,async h=>await Promise.all(h.map(d=>pt.ageGenderNet.predictAgeAndGender(d))),this.extractedFaces);return r.map((h,d)=>{const{age:f,gender:g,genderProbability:S}=l[d];return Sg(Ig(h,g,S),f)})}withFaceExpressions(){return new Zu(this,this.input)}}class Ju extends BD{async run(){const r=await this.parentTask;if(!r)return;const{age:l,gender:h,genderProbability:d}=await rl(r,this.input,f=>pt.ageGenderNet.predictAgeAndGender(f),this.extractedFaces);return Sg(Ig(r,h,d),l)}withFaceExpressions(){return new Qu(this,this.input)}}class ol extends Xu{withFaceExpressions(){return new cl(this,this.input)}withFaceDescriptors(){return new wa(this,this.input)}}class al extends Ju{withFaceExpressions(){return new ll(this,this.input)}withFaceDescriptor(){return new La(this,this.input)}}class Sx extends Ti{constructor(r,l){super();this.parentTask=r;this.input=l}}class wa extends Sx{async run(){const r=await this.parentTask,l=await ba(r,this.input,h=>Promise.all(h.map(d=>pt.faceRecognitionNet.computeFaceDescriptor(d))),null,h=>h.landmarks.align(null,{useDlibAlignment:!0}));return l.map((h,d)=>Lg(r[d],h))}withFaceExpressions(){return new cl(this,this.input)}withAgeAndGender(){return new ol(this,this.input)}}class La extends Sx{async run(){const r=await this.parentTask;if(!r)return;const l=await rl(r,this.input,h=>pt.faceRecognitionNet.computeFaceDescriptor(h),null,h=>h.landmarks.align(null,{useDlibAlignment:!0}));return Lg(r,l)}withFaceExpressions(){return new ll(this,this.input)}withAgeAndGender(){return new al(this,this.input)}}const ed=Ye(Je());class Ix extends Ti{constructor(r,l,h){super();this.parentTask=r;this.input=l;this.useTinyLandmarkNet=h}get landmarkNet(){return this.useTinyLandmarkNet?pt.faceLandmark68TinyNet:pt.faceLandmark68Net}}class xx extends Ix{async run(){const r=await this.parentTask,l=r.map(f=>f.detection),h=this.input instanceof ed.Tensor?await Hc(this.input,l):await Vc(this.input,l),d=await Promise.all(h.map(f=>this.landmarkNet.detectLandmarks(f)));return h.forEach(f=>f instanceof ed.Tensor&&f.dispose()),r.map((f,g)=>Qc(f,d[g]))}withFaceExpressions(){return new cl(this,this.input)}withAgeAndGender(){return new ol(this,this.input)}withFaceDescriptors(){return new wa(this,this.input)}}class Tx extends Ix{async run(){const r=await this.parentTask;if(!r)return;const{detection:l}=r,h=this.input instanceof ed.Tensor?await Hc(this.input,[l]):await Vc(this.input,[l]),d=await this.landmarkNet.detectLandmarks(h[0]);return h.forEach(f=>f instanceof ed.Tensor&&f.dispose()),Qc(r,d)}withFaceExpressions(){return new ll(this,this.input)}withAgeAndGender(){return new al(this,this.input)}withFaceDescriptor(){return new La(this,this.input)}}class Ax extends Ti{constructor(r,l=new Ii){super();this.input=r;this.options=l}}class Tg extends Ax{async run(){const{input:r,options:l}=this,h=l instanceof wx?d=>pt.tinyFaceDetector.locateFaces(d,l):l instanceof Ii?d=>pt.ssdMobilenetv1.locateFaces(d,l):l instanceof Cr?d=>pt.tinyYolov2.locateFaces(d,l):null;if(!h)throw new Error("detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options");return h(r)}runAndExtendWithFaceDetections(){return new Promise(async r=>{const l=await this.run();return r(l.map(h=>ta({},h)))})}withFaceLandmarks(r=!1){return new xx(this.runAndExtendWithFaceDetections(),this.input,r)}withFaceExpressions(){return new Zu(this.runAndExtendWithFaceDetections(),this.input)}withAgeAndGender(){return new Xu(this.runAndExtendWithFaceDetections(),this.input)}}class vx extends Ax{async run(){const r=await new Tg(this.input,this.options);let l=r[0];return r.forEach(h=>{h.score>l.score&&(l=h)}),l}runAndExtendWithFaceDetection(){return new Promise(async r=>{const l=await this.run();return r(l?ta({},l):void 0)})}withFaceLandmarks(r=!1){return new Tx(this.runAndExtendWithFaceDetection(),this.input,r)}withFaceExpressions(){return new Qu(this.runAndExtendWithFaceDetection(),this.input)}withAgeAndGender(){return new Ju(this.runAndExtendWithFaceDetection(),this.input)}}function wQ(r,l=new Ii){return new vx(r,l)}function Ag(r,l=new Ii){return new Tg(r,l)}async function MD(r,l){return console.warn("allFacesSsdMobilenetv1 is deprecated and will be removed soon, use the high level api instead"),await Ag(r,new Ii(l?{minConfidence:l}:{})).withFaceLandmarks().withFaceDescriptors()}async function LQ(r,l={}){return console.warn("allFacesTinyYolov2 is deprecated and will be removed soon, use the high level api instead"),await Ag(r,new Cr(l)).withFaceLandmarks().withFaceDescriptors()}const SQ=MD;function Nx(r,l){if(r.length!==l.length)throw new Error("euclideanDistance: arr1.length !== arr2.length");const h=Array.from(r),d=Array.from(l);return Math.sqrt(h.map((f,g)=>f-d[g]).reduce((f,g)=>f+Math.pow(g,2),0))}class PD{constructor(r,l=.6){this._distanceThreshold=l;const h=Array.isArray(r)?r:[r];if(!h.length)throw new Error("FaceRecognizer.constructor - expected atleast one input");let d=1;const f=()=>`person ${d++}`;this._labeledDescriptors=h.map(g=>{if(g instanceof ea)return g;if(g instanceof Float32Array)return new ea(f(),[g]);if(g.descriptor&&g.descriptor instanceof Float32Array)return new ea(f(),[g.descriptor]);throw new Error("FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>")})}get labeledDescriptors(){return this._labeledDescriptors}get distanceThreshold(){return this._distanceThreshold}computeMeanDistance(r,l){return l.map(h=>Nx(h,r)).reduce((h,d)=>h+d,0)/(l.length||1)}matchDescriptor(r){return this.labeledDescriptors.map(({descriptors:l,label:h})=>new km(h,this.computeMeanDistance(r,l))).reduce((l,h)=>l.distancer.toJSON())}}static fromJSON(r){const l=r.labeledDescriptors.map(h=>ea.fromJSON(h));return new PD(l,r.distanceThreshold)}}function IQ(r){const l=new Ku;return l.extractWeights(r),l}function zD(r,l){const{width:h,height:d}=new ms(l.width,l.height);if(h<=0||d<=0)throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({width:h,height:d})}`);if(Array.isArray(r))return r.map(f=>zD(f,{width:h,height:d}));if(ga(r)){const f=r.detection.forSize(h,d),g=r.unshiftedLandmarks.forSize(f.box.width,f.box.height);return Qc(ta(r,f),g)}return Vi(r)?ta(r,r.detection.forSize(h,d)):r instanceof qs||r instanceof Yt?r.forSize(h,d):r}var GD="0.7.4";Tc(exports,{AgeGenderNet:()=>dx,BoundingBox:()=>wu,Box:()=>Ct,ComposableTask:()=>Ti,ComputeAllFaceDescriptorsTask:()=>wa,ComputeFaceDescriptorsTaskBase:()=>Sx,ComputeSingleFaceDescriptorTask:()=>La,DetectAllFaceLandmarksTask:()=>xx,DetectAllFacesTask:()=>Tg,DetectFaceLandmarksTaskBase:()=>Ix,DetectFacesTaskBase:()=>Ax,DetectSingleFaceLandmarksTask:()=>Tx,DetectSingleFaceTask:()=>vx,Dimensions:()=>ms,FACE_EXPRESSION_LABELS:()=>ax,FaceDetection:()=>Yt,FaceDetectionNet:()=>QZ,FaceExpressionNet:()=>cx,FaceExpressions:()=>fa,FaceLandmark68Net:()=>Gu,FaceLandmark68TinyNet:()=>px,FaceLandmarkNet:()=>UZ,FaceLandmarks:()=>qs,FaceLandmarks5:()=>BX,FaceLandmarks68:()=>Iu,FaceMatch:()=>km,FaceMatcher:()=>PD,FaceRecognitionNet:()=>Hu,Gender:()=>Tr,LabeledBox:()=>Fm,LabeledFaceDescriptors:()=>ea,NetInput:()=>po,NeuralNetwork:()=>kn,ObjectDetection:()=>Cc,Point:()=>Ze,PredictedBox:()=>MX,Rect:()=>Su,SsdMobilenetv1:()=>nl,SsdMobilenetv1Options:()=>Ii,TinyFaceDetector:()=>Ku,TinyFaceDetectorOptions:()=>wx,TinyYolov2:()=>ju,TinyYolov2Options:()=>Cr,TinyYolov2SizeType:()=>bx,allFaces:()=>SQ,allFacesSsdMobilenetv1:()=>MD,allFacesTinyYolov2:()=>LQ,awaitMediaLoaded:()=>QS,bufferToImage:()=>eI,computeFaceDescriptor:()=>oQ,createCanvas:()=>Oc,createCanvasFromMedia:()=>Tu,createFaceDetectionNet:()=>ZZ,createFaceRecognitionNet:()=>PZ,createSsdMobilenetv1:()=>xD,createTinyFaceDetector:()=>IQ,createTinyYolov2:()=>nQ,detectAllFaces:()=>Ag,detectFaceLandmarks:()=>WD,detectFaceLandmarksTiny:()=>rQ,detectLandmarks:()=>bQ,detectSingleFace:()=>wQ,draw:()=>hx,env:()=>gt,euclideanDistance:()=>Nx,extendWithAge:()=>Sg,extendWithFaceDescriptor:()=>Lg,extendWithFaceDetection:()=>ta,extendWithFaceExpressions:()=>gg,extendWithFaceLandmarks:()=>Qc,extendWithGender:()=>Ig,extractFaceTensors:()=>Hc,extractFaces:()=>Vc,fetchImage:()=>OZ,fetchJson:()=>ix,fetchNetWeights:()=>EZ,fetchOrThrow:()=>pa,getContext2dOrThrow:()=>es,getMediaDimensions:()=>sa,imageTensorToCanvas:()=>tI,imageToSquare:()=>sx,inverseSigmoid:()=>_X,iou:()=>zS,isMediaElement:()=>Um,isMediaLoaded:()=>xu,isWithAge:()=>zZ,isWithFaceDetection:()=>Vi,isWithFaceExpressions:()=>lx,isWithFaceLandmarks:()=>ga,isWithGender:()=>GZ,loadAgeGenderModel:()=>fQ,loadFaceDetectionModel:()=>gQ,loadFaceExpressionModel:()=>mQ,loadFaceLandmarkModel:()=>uQ,loadFaceLandmarkTinyModel:()=>dQ,loadFaceRecognitionModel:()=>pQ,loadSsdMobilenetv1Model:()=>$D,loadTinyFaceDetectorModel:()=>lQ,loadTinyYolov2Model:()=>hQ,loadWeightMap:()=>rx,locateFaces:()=>yQ,matchDimensions:()=>DZ,minBbox:()=>GS,nets:()=>pt,nonMaxSuppression:()=>VS,normalize:()=>yi,padToSquare:()=>HS,predictAgeAndGender:()=>cQ,recognizeFaceExpressions:()=>aQ,resizeResults:()=>zD,resolveInput:()=>na,shuffleArray:()=>FX,sigmoid:()=>Lu,ssdMobilenetv1:()=>_D,tf:()=>xQ,tinyFaceDetector:()=>sQ,tinyYolov2:()=>iQ,toNetInput:()=>Rt,utils:()=>US,validateConfig:()=>gx,version:()=>vQ});const xQ=Ye(Je()),TQ=typeof process!="undefined",AQ=typeof navigator!="undefined"&&typeof navigator.userAgent!="undefined",vQ={faceapi:GD,node:TQ,browser:AQ}; +`;return $[$.length-1]=" "+$[$.length-1]+"]"+(g?"":ne),$}function Ou(r){const l=[];for(let h=0;h`Length of values '${d}' does not match the size inferred by the shape '${this.size}'.`)}if(l==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=h||zR(l,this.size),this.strides=Nu(r)}set(r,...l){l.length===0&&(l=[0]),Z(l.length===this.rank,()=>`The number of provided coordinates (${l.length}) must match the rank (${this.rank})`);const h=this.locToIndex(l);this.values[h]=r}get(...r){r.length===0&&(r=[0]);let l=0;for(const d of r){if(d<0||d>=this.shape[l]){const f=`Requested out of range element at ${r}. Buffer shape=${this.shape}`;throw new Error(f)}l++}let h=r[r.length-1];for(let d=0;ddI(h))}catch(h){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return r}dataSync(){this.throwIfDisposed();const r=Yi().readSync(this.dataId);if(this.dtype==="string")try{return r.map(l=>dI(l))}catch(l){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return r}async bytes(){this.throwIfDisposed();const r=await Yi().read(this.dataId);return this.dtype==="string"?r:new Uint8Array(r.buffer)}dispose(){if(this.isDisposed)return;Yi().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(r=!1){return Uc.print(this,r)}clone(){return this.throwIfDisposed(),Uc.clone(this)}toString(r=!1){const l=this.dataSync();return ZR(l,this.shape,this.dtype,r)}cast(r){return this.throwIfDisposed(),Uc.cast(this,r)}variable(r=!0,l,h){return this.throwIfDisposed(),Yi().makeVariable(this,r,l,h)}}Object.defineProperty(Tn,Symbol.hasInstance,{value:r=>!!r&&r.data!=null&&r.dataSync!=null&&r.throwIfDisposed!=null});class Pf extends Tn{constructor(r,l,h,d){super(r.shape,r.dtype,r.dataId,d);this.trainable=l,this.name=h}assign(r){if(r.dtype!==this.dtype)throw new Error(`dtype of the new value (${r.dtype}) and previous value (${this.dtype}) must match`);if(!oa(r.shape,this.shape))throw new Error(`shape of the new value (${r.shape}) and previous value (${this.shape}) must match`);Yi().disposeTensor(this),this.dataId=r.dataId,Yi().incRef(this,null)}dispose(){Yi().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty(Pf,Symbol.hasInstance,{value:r=>r instanceof Tn&&r.assign!=null&&r.assign instanceof Function});var iO;(function(r){r.R0="R0",r.R1="R1",r.R2="R2",r.R3="R3",r.R4="R4",r.R5="R5",r.R6="R6"})(iO||(iO={}));var mI;(function(r){r.float32="float32",r.int32="int32",r.bool="int32",r.complex64="complex64"})(mI||(mI={}));var fI;(function(r){r.float32="float32",r.int32="int32",r.bool="bool",r.complex64="complex64"})(fI||(fI={}));var gI;(function(r){r.float32="float32",r.int32="float32",r.bool="float32",r.complex64="complex64"})(gI||(gI={}));var yI;(function(r){r.float32="complex64",r.int32="complex64",r.bool="complex64",r.complex64="complex64"})(yI||(yI={}));const rJ={float32:gI,int32:mI,bool:fI,complex64:yI};function rO(r,l){if(r==="string"||l==="string"){if(r==="string"&&l==="string")return"string";throw new Error(`Can not upcast ${r} with ${l}`)}return rJ[r][l]}function mt(r,l){if(r.dtype===l.dtype)return[r,l];const h=rO(r.dtype,l.dtype);return[r.cast(h),l.cast(h)]}function zf(r){const l=[],h=new Set;return oO(r,l,h),l}function oO(r,l,h){if(r==null)return;if(r instanceof Tn){l.push(r);return}if(!oJ(r))return;const d=r;for(const f in d){const g=d[f];h.has(g)||(h.add(g),oO(g,l,h))}}function oJ(r){return Array.isArray(r)||typeof r=="object"}class aO{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(const r in this.registeredVariables)this.registeredVariables[r].dispose()}}class Eu{constructor(r){this.ENV=r,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new aO}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;const r=this.getSortedBackends();for(let l=0;l{l.setupFunc!=null&&l.setupFunc(this.backendInstance)})}disposeRegisteredKernels(r){const l=cI(r);l.forEach(h=>{h.disposeFunc!=null&&h.disposeFunc(this.registry[r])})}initializeBackend(r){const l=this.registryFactory[r];if(l==null)throw new Error(`Cannot initialize backend ${r}, no registration found.`);try{const h=l.factory();if(h&&!(h instanceof h2)&&typeof h.then=="function"){const d=++this.pendingBackendInitId,f=h.then(g=>d(dthis.registryFactory[l].priority-this.registryFactory[r].priority)}initializeBackendsAndReturnBest(){const r=this.getSortedBackends();for(let l=0;lthis.startScope(h),()=>this.endScope(d),()=>(d=l(),d instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),d))}scopedRun(r,l,h){r();try{const d=h();return l(),d}catch(d){throw l(),d}}nextTensorId(){return Eu.nextTensorId++}nextVariableId(){return Eu.nextVariableId++}clone(r){const l=this.makeTensorFromDataId(r.dataId,r.shape,r.dtype),h={x:r},d=g=>({x:()=>{const S="float32",L={x:g},x={dtype:S};return H.runKernelFunc(A=>A.cast(g,S),L,null,Fc,x)}}),f=[];return this.addTapeNode(this.state.activeScope.name,h,[l],d,f,{}),l}runKernel(r,l,h,d,f){const g=null,S=null;return this.runKernelFunc(g,l,S,r,h,d,f)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(r,l,h){const d=this.backend.numDataIds();let f=0;h.forEach(L=>{f+=L.dtype==="complex64"?3:1});const g=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],S=d-l-f-g;if(S>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${S} data ids) after running '${r}'`)}runKernelFunc(r,l,h,d,f,g,S){let L,x=[];const A=this.isTapeOn();d==null&&(d=this.state.activeScope!=null?this.state.activeScope.name:"");const O=this.state.numBytes,C=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let $;const z=Wf(d,this.backendName);let ne;if(z!=null)$=()=>{const se=this.backend.numDataIds();ne=z.kernelFunc({inputs:l,attrs:f,backend:this.backend});const fe=Array.isArray(ne)?ne:[ne];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(d,se,fe);const de=fe.map(({dataId:Ae,shape:xe,dtype:Me})=>this.makeTensorFromDataId(Ae,xe,Me));if(A){let Ae=this.getTensorsForGradient(d,l,de);if(Ae==null){S==null&&(S=[]);const xe=de.filter((Me,Ke)=>S[Ke]);Ae=(g||[]).slice().concat(xe)}x=this.saveTensorsForBackwardMode(Ae)}return de};else{const se=fe=>{if(!A)return;x=fe.map(de=>this.keep(this.clone(de)))};$=()=>{const fe=this.backend.numDataIds();ne=this.tidy(()=>r(this.backend,se));const de=Array.isArray(ne)?ne:[ne];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(d,fe,de),de}}let te;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?L=$():(te=this.profiler.profileKernel(d,l,()=>$()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(te),L=te.outputs)}),A&&this.addTapeNode(d,l,L,h,x,f),this.state.profiling&&this.state.activeProfile.kernels.push({name:d,bytesAdded:this.state.numBytes-O,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-C,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(l).map(se=>l[se]!=null?l[se].shape:null),outputShapes:L.map(se=>se.shape),kernelTimeMs:te.timeMs,extraInfo:te.extraInfo}),Array.isArray(ne)?L:L[0]}saveTensorsForBackwardMode(r){const l=r.map(h=>this.keep(this.clone(h)));return l}getTensorsForGradient(r,l,h){const d=aI(r);if(d!=null){const f=d.inputsToSave||[],g=d.outputsToSave||[];let S;d.saveAllInputs?(Z(Array.isArray(l),()=>"saveAllInputs is true, expected inputs to be an array."),S=Object.keys(l).map(x=>l[x])):S=f.map(x=>l[x]);const L=h.filter((x,A)=>g[A]);return S.concat(L)}return null}makeTensor(r,l,h,d){if(r==null)throw new Error("Values passed to engine.makeTensor() are null");h=h||"float32",d=d||this.backend;let f=r;h==="string"&&Au(r[0])&&(f=r.map(L=>qR(L)));const g=d.write(f,l,h),S=new Tn(l,h,g,this.nextTensorId());if(this.incRef(S,d),h==="string"){const L=this.state.tensorInfo.get(g),x=HR(f);this.state.numBytes+=x-L.bytes,L.bytes=x}return S}makeTensorFromDataId(r,l,h,d){h=h||"float32";const f=new Tn(l,h,r,this.nextTensorId());return this.incRef(f,d),f}makeVariable(r,l=!0,h,d){h=h||this.nextVariableId().toString(),d!=null&&d!==r.dtype&&(r=r.cast(d));const f=new Pf(r,l,h,this.nextTensorId());if(this.state.registeredVariables[f.name]!=null)throw new Error(`Variable with name ${f.name} was already registered`);return this.state.registeredVariables[f.name]=f,this.incRef(f,this.backend),f}incRef(r,l){const h=this.state.tensorInfo.has(r.dataId)?this.state.tensorInfo.get(r.dataId).refCount:0;if(this.state.numTensors++,r.dtype==="string"&&this.state.numStringTensors++,h===0){this.state.numDataBuffers++;let d=0;r.dtype!=="complex64"&&r.dtype!=="string"&&(d=r.size*VR(r.dtype)),this.state.tensorInfo.set(r.dataId,{backend:l||this.backend,dtype:r.dtype,shape:r.shape,bytes:d,refCount:0}),this.state.numBytes+=d}this.state.tensorInfo.get(r.dataId).refCount++,r instanceof Pf||this.track(r)}disposeTensor(r){if(!this.state.tensorInfo.has(r.dataId))return;this.state.numTensors--,r.dtype==="string"&&this.state.numStringTensors--;const l=this.state.tensorInfo.get(r.dataId),h=l.refCount;h<=1?(r.dtype!=="complex64"&&(this.state.numBytes-=l.bytes),this.state.numDataBuffers--,l.backend.disposeData(r.dataId),this.state.tensorInfo.delete(r.dataId)):this.state.tensorInfo.get(r.dataId).refCount--}disposeVariables(){for(const r in this.state.registeredVariables){const l=this.state.registeredVariables[r];this.disposeVariable(l)}}disposeVariable(r){this.disposeTensor(r),this.state.registeredVariables[r.name]!=null&&delete this.state.registeredVariables[r.name]}memory(){const r=this.backend.memory();return r.numTensors=this.state.numTensors,r.numDataBuffers=this.state.numDataBuffers,r.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(r.unreliable=!0,r.reasons==null&&(r.reasons=[]),r.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),r}async profile(r){this.state.profiling=!0;const l=this.state.numBytes,h=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await r(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(d=>d.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-l,this.state.activeProfile.newTensors=this.state.numTensors-h;for(const d of this.state.activeProfile.kernels)d.kernelTimeMs=await d.kernelTimeMs,d.extraInfo=await d.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(r,l,h,d,f,g){const S={id:this.state.nextTapeNodeId++,kernelName:r,inputs:l,outputs:h,saved:f},L=aI(r);L!=null&&(d=L.gradFunc),d!=null&&(S.gradient=x=>(x=x.map((A,O)=>{if(A==null){const C=h[O],$=aa(C.size,C.dtype);return this.makeTensor($,C.shape,C.dtype)}return A}),d(x.length>1?x:x[0],f,g))),this.state.activeTape.push(S)}keep(r){return r.kept=!0,r}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(r){const l={track:[],name:"unnamed scope",id:this.state.nextScopeId++};r&&(l.name=r),this.state.scopeStack.push(l),this.state.activeScope=l}endScope(r){const l=zf(r),h=new Set(l.map(f=>f.id));for(let f=0;f{!f.kept&&f.scopeId===d.id&&this.track(f)})}gradients(r,l,h,d=!1){if(Z(l.length>0,()=>"gradients() received an empty list of xs."),h!=null&&h.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${h.dtype}'`);const f=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",r));Z(f instanceof Tn,()=>"The result y returned by f() must be a tensor.");const g=KR(this.state.activeTape,l,f);if(!d&&g.length===0&&l.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{const S={};S[f.id]=h==null?aJ(f.shape):h,XR(S,g,x=>this.tidy(x),cJ);const L=l.map(x=>S[x.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(x=>{for(const A of x.saved)A.dispose()}),this.state.activeTape=null),{value:f,grads:L}})}customGrad(r){return Z(lI(r),()=>"The f passed in customGrad(f) must be a function."),(...l)=>{Z(l.every(f=>f instanceof Tn),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let h;const d={};return l.forEach((f,g)=>{d[g]=f}),this.runKernelFunc((f,g)=>(h=r(...l,g),Z(h.value instanceof Tn,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),Z(lI(h.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),h.value),d,(f,g)=>{const S=h.gradFunc(f,g),L=Array.isArray(S)?S:[S];Z(L.length===l.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),Z(L.every(A=>A instanceof Tn),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");const x={};return L.forEach((A,O)=>{x[O]=()=>A}),x})}}readSync(r){const l=this.state.tensorInfo.get(r);return l.backend.readSync(r)}read(r){const l=this.state.tensorInfo.get(r);return l.backend.read(r)}async time(r){const l=uI(),h=await this.backend.time(r);return h.wallMs=uI()-l,h}track(r){return this.state.activeScope!=null&&(r.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(r)),r}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new aO;for(const r in this.registry)this.disposeRegisteredKernels(r),this.registry[r].dispose(),delete this.registry[r];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}Eu.nextTensorId=0;Eu.nextVariableId=0;function aJ(r){const l=Uf(qt(r),"float32");return H.makeTensor(l,r,"float32")}function bI(){const r=sI();if(r._tfengine==null){const l=new d2(r);r._tfengine=new Eu(l)}return m2(r._tfengine.ENV),tO(()=>r._tfengine),r._tfengine}const H=bI();function cJ(r,l){const h={a:r,b:l};return H.runKernelFunc((d,f)=>{const g=d.add(r,l);return f([r,l]),g},h,null,kc)}function cO(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}const br=Es();br.registerFlag("DEBUG",()=>!1,r=>{r&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")});br.registerFlag("IS_BROWSER",()=>cO());br.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined");br.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor));br.registerFlag("PROD",()=>!1);br.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>br.getBool("DEBUG"));br.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0);br.registerFlag("IS_TEST",()=>!1);function wr(r,l){let h=r;if(Ds(r))return l==="string"?[]:[r.length];if(!Array.isArray(r))return[];const d=[];for(;Array.isArray(h)||Ds(h)&&l!=="string";)d.push(h.length),h=h[0];return Array.isArray(r)&&Es().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&&lO(r,d,[]),d}function lO(r,l,h){if(h=h||[],!Array.isArray(r)&&!Ds(r)){Z(l.length===0,()=>`Element arr[${h.join("][")}] is a primitive, but should be an array/TypedArray of ${l[0]} elements`);return}Z(l.length>0,()=>`Element arr[${h.join("][")}] should be a primitive, but is an array of ${r.length} elements`),Z(r.length===l[0],()=>`Element arr[${h.join("][")}] should have ${l[0]} elements, but has ${r.length} elements`);const d=l.slice(1);for(let f=0;f=0&&(f=d),hO(d,f,l,h),r==null||!Ds(r)&&!Array.isArray(r)&&typeof r!="number"&&typeof r!="boolean"&&typeof r!="string"){const x=r==null?"null":r.constructor.name;throw new Error(`Argument '${l}' passed to '${h}' must be a Tensor or TensorLike, but got '${x}'`)}const g=wr(r,f);!Ds(r)&&!Array.isArray(r)&&(r=[r]);const S=!0,L=f!=="string"?$f(r,f):Wc(r,[],S);return H.makeTensor(L,g,f)}function Gf(r,l,h,d="numeric"){if(!Array.isArray(r))throw new Error(`Argument ${l} passed to ${h} must be a \`Tensor[]\` or \`TensorLike[]\``);const f=r;return f.map((g,S)=>M(g,`${l}[${S}]`,h),d)}const uO="__op";function G(r){const l=Object.keys(r);if(l.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${l.length} keys.`);let h=l[0];const d=r[h];h.endsWith("_")&&(h=h.substring(0,h.length-1)),h=h+uO;const f=(...g)=>{H.startScope(h);try{const S=d(...g);return S instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),H.endScope(S),S}catch(S){throw H.endScope(null),S}};return Object.defineProperty(f,"name",{value:h,configurable:!0}),f}function lJ(r,l){const h=M(r,"real","complex"),d=M(l,"imag","complex");Zt(h.shape,d.shape,`real and imag shapes, ${h.shape} and ${d.shape}, must match in call to tf.complex().`);const f=S=>S.complex(h,d),g={real:h,imag:d};return H.runKernelFunc(f,g,null,E2)}const qi=G({complex_:lJ});function ji(r,l,h,d){if(d==null&&(d=vu(r)),d==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!Ds(r)&&!Array.isArray(r)&&typeof r!="number"&&typeof r!="boolean"&&typeof r!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(l!=null){Bf(l);const f=qt(l),g=qt(h);Z(f===g,()=>`Based on the provided shape, [${l}], the tensor should have ${f} values but has ${g}`);for(let S=0;S`Error creating a new Tensor. Inferred shape (${h}) does not match the provided shape (${l}). `)}}return!Ds(r)&&!Array.isArray(r)&&(r=[r]),l=l||h,r=d!=="string"?$f(r,d):Wc(r,[],!0),H.makeTensor(r,l,d)}function wI(r,l,h){const d=wr(r,h);return ji(r,l,d,h)}function Du(r,l="float32",h){return l=l||"float32",Bf(r),new eO(r,l,h)}function hJ(r,l){const h=M(r,"x","cast");if(!GR(l))throw new Error(`Failed to cast to unknown dtype ${l}`);if(l==="string"&&h.dtype!=="string"||l!=="string"&&h.dtype==="string")throw new Error("Only strings can be casted to strings");const d={x:h},f={dtype:l};return H.runKernelFunc(g=>g.cast(h,l),d,null,Fc,f)}const Le=G({cast_:hJ});function uJ(r){const l=M(r,"x","clone",null),h=()=>H.makeTensorFromDataId(l.dataId,l.shape,l.dtype),d={x:l};return H.runKernelFunc(h,d,null,tf)}const bi=G({clone_:uJ});function LI(r,l=!1){console.log(r.toString(l))}bI();const dJ={buffer:Du,cast:Le,clone:bi,print:LI};nO(dJ);function pJ(r,l){const h=M(r,"x","reshape",null),d={x:h},f={shape:l},g=(S,L)=>(l=MR(l,h.size),Z(h.size===qt(l),()=>"new shape and old shape must have the same number of elements."),L([h]),S.reshape(h,l));return H.runKernelFunc(g,d,null,mf,f)}const re=G({reshape_:pJ});function mJ(r,l,h=!1,d=!1){let f=M(r,"a","matMul"),g=M(l,"b","matMul");[f,g]=mt(f,g),Z(f.rank>=2&&g.rank>=2&&f.rank===g.rank,()=>`Error in matMul: inputs must have the same rank of at least 2, got ranks ${f.rank} and ${g.rank}.`);const S=h?f.shape[f.rank-2]:f.shape[f.rank-1],L=d?g.shape[g.rank-1]:g.shape[g.rank-2],x=h?f.shape[f.rank-1]:f.shape[f.rank-2],A=d?g.shape[g.rank-2]:g.shape[g.rank-1],O=f.shape.slice(0,-2),C=g.shape.slice(0,-2),$=qt(O),z=qt(C);Z(oa(O,C),()=>`Error in matMul: outer dimensions (${O}) and (${C}) of Tensors with shapes ${f.shape} and ${g.shape} must match.`),Z(S===L,()=>`Error in matMul: inner shapes (${S}) and (${L}) of Tensors with shapes ${f.shape} and ${g.shape} and transposeA=${h} and transposeB=${d} must match.`);const ne=f.shape.slice(0,-2).concat([x,A]),te=h?re(f,[$,S,x]):re(f,[$,x,S]),se=d?re(g,[z,A,L]):re(g,[z,L,A]),fe=(Me,Ke)=>(Ke([te,se]),Me.batchMatMul(te,se,h,d)),de={a:te,b:se},Ae={transposeA:h,transposeB:d},xe=H.runKernelFunc(fe,de,null,Mm,Ae);return re(xe,ne)}const pn=G({matMul_:mJ});function fJ(r,l){const h=M(r,"x","transpose");if(l==null&&(l=h.shape.map((g,S)=>S).reverse()),Z(h.rank===l.length,()=>`Error in transpose: rank of input ${h.rank} must match length of perm ${l}.`),l.forEach(g=>{Z(g>=0&&g`All entries in 'perm' must be between 0 and ${h.rank-1} but got ${l}`)}),h.rank<=1)return h.clone();const d={x:h},f={perm:l};return H.runKernelFunc(g=>g.transpose(h,l),d,null,Ef,f)}const Wt=G({transpose_:fJ});function SI(r,l,h){if(_c(r),l!=null&&l.length!==3)throw new Error("tensor3d() requires shape to have three numbers");const d=wr(r,h);if(d.length!==3&&d.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(d.length===1&&l==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return ji(r,l,d,h)}const II={};vc(II,{fromPixels:()=>bJ,toPixels:()=>yJ});let Bc;function gJ(r,l=3){if(l>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(r==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let h=!1,d=!1,f=!1,g=!1,S=!1;if(r.data instanceof Uint8Array)h=!0;else if(typeof ImageData!="undefined"&&r instanceof ImageData)d=!0;else if(typeof HTMLVideoElement!="undefined"&&r instanceof HTMLVideoElement)f=!0;else if(typeof HTMLImageElement!="undefined"&&r instanceof HTMLImageElement)g=!0;else if(r.getContext!=null)S=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${r.constructor.name}`);if(f){const z=2;if(f&&r.readyState element.")}const L=Wf(rI,H.backendName);if(L!=null){const z={pixels:r},ne={numChannels:l};return H.runKernel(rI,z,ne)}const[x,A]=f?[r.videoWidth,r.videoHeight]:[r.width,r.height];let O;S?O=r.getContext("2d").getImageData(0,0,x,A).data:d||h?O=r.data:(g||f)&&(Bc==null&&(Bc=document.createElement("canvas").getContext("2d")),Bc.canvas.width=x,Bc.canvas.height=A,Bc.drawImage(r,0,0,x,A),O=Bc.getImageData(0,0,x,A).data);let C;if(l===4)C=new Int32Array(O);else{const z=x*A;C=new Int32Array(z*l);for(let ne=0;ne4||g===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${g}`);if(h.dtype!=="float32"&&h.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${h.dtype}. Please use float32 or int32 tensors.`);const S=await h.data(),L=h.dtype==="float32"?255:1,x=new Uint8ClampedArray(f*d*4);for(let A=0;A1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${z}.`)}else if(h.dtype==="int32"&&(z<0||z>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${z}.`);g===1?(O[0]=z*L,O[1]=z*L,O[2]=z*L):O[$]=z*L}const C=A*4;x[C+0]=Math.round(O[0]),x[C+1]=Math.round(O[1]),x[C+2]=Math.round(O[2]),x[C+3]=Math.round(O[3])}if(l!=null){l.width=f,l.height=d;const A=l.getContext("2d"),O=new ImageData(x,f,d);A.putImageData(O,0,0)}return h!==r&&h.dispose(),x}const bJ=G({fromPixels_:gJ});function dO(r,l,h){const d=r.shape.length;Z(d===l.length,()=>`Error in slice${d}D: Length of begin ${l} must match the rank of the array (${d}).`),Z(d===h.length,()=>`Error in slice${d}D: Length of size ${h} must match the rank of the array (${d}).`);for(let f=0;f`Error in slice${d}D: begin[${f}] + size[${f}] (${l[f]+h[f]}) would overflow input.shape[${f}] (${r.shape[f]})`)}function Vf(r,l,h){let d;const f=r.shape.length;typeof l=="number"?d=[l,...new Array(f-1).fill(0)]:l.length{Z(S!==-1,()=>"slice() does not support negative begin indexing.")});let g;return h==null?g=new Array(f).fill(-1):typeof h=="number"?g=[h,...new Array(f-1).fill(-1)]:h.lengthS>=0?S:(Z(S===-1,()=>`Negative size values should be exactly -1 but got ${S} for the slice() size at index ${L}.`),r.shape[L]-d[L])),[d,g]}function wJ(r){Es().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(r+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}sO(wJ);function pO(r,l){return H.tidy(r,l)}function mO(r){const l=zf(r);l.forEach(h=>h.dispose())}function LJ(r,l){let h=M(r,"a","add"),d=M(l,"b","add");[h,d]=mt(h,d);const f=(S,L)=>{const x=S.add(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,kc)}const St=G({add_:LJ});function SJ(r,l){let h=M(r,"a","floorDiv"),d=M(l,"b","floorDiv");[h,d]=mt(h,d);const f=(S,L)=>{const x=S.floorDiv(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,Zm)}const xI=G({floorDiv_:SJ});function IJ(r,l){let h=M(r,"a","div"),d=M(l,"b","div");if([h,d]=mt(h,d),h.dtype==="int32"&&d.dtype==="int32")return xI(h,d);const f=(L,x)=>{const A=L.realDivide(h,d);return x([h,d]),A},g={a:h,b:d},S={};return H.runKernelFunc(f,g,null,Km,S)}const ze=G({div_:IJ});function xJ(r,l){let h=M(r,"a","mul"),d=M(l,"b","mul");[h,d]=mt(h,d);const f=(S,L)=>{const x=S.multiply(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,lf)}const ae=G({mul_:xJ});function TJ(r){const l=M(r,"x","abs"),h={x:l};return H.runKernelFunc((d,f)=>(f([l]),l.dtype==="complex64"?d.complexAbs(l):d.abs(l)),h,null,Bm)}const Pn=G({abs_:TJ});function AJ(r,l){for(let h=0;hr[g]);return[h,f]}function ts(r,l){const h=l.map(d=>1);return vJ(r,h,l)}function fs(r,l){if(AJ(r,l))return null;const h=[];for(let d=0;dh.push(d)),h}function Mc(r){return r.map((l,h)=>[h,l]).sort((l,h)=>l[1]-h[1]).map(l=>l[0])}function oo(r,l){const h=[];for(let d=l-r;d`The output # of rows (${L}) must be an integer. Change the stride and/or zero pad parameters`);const x=ca((S-l+2*d)/h+1,f);return Z(Qt(x),()=>`The output # of columns (${x}) must be an integer. Change the stride and/or zero pad parameters`),[L,x]}function OJ(r,l,h,d,f,g){f==null&&(f=gO(r,l,d));const S=r[0],L=r[1],x=r[2],A=ca((S-l+2*f)/d+1,g);Z(Qt(A),()=>`The output # of depths (${A}) must be an integer. Change the stride and/or zero pad parameters`);const O=ca((L-l+2*f)/d+1,g);Z(Qt(O),()=>`The output # of rows (${O}) must be an integer. Change the stride and/or zero pad parameters`);const C=ca((x-l+2*f)/d+1,g);return Z(Qt(C),()=>`The output # of columns (${C}) must be an integer. Change the stride and/or zero pad parameters`),[A,O,C,h]}function gO(r,l,h,d=1){const f=Pc(l,d);return Math.floor((r[0]*(h-1)-h+f)/2)}function Hf(r){return typeof r=="number"?[r,r,r]:r.length===2?[r[0],r[1],1]:r}function TI(r){return typeof r=="number"?[r,r,r]:r}function Pc(r,l){return l<=1?r:r+(r-1)*(l-1)}function NJ(r,l,h,d,f,g,S,L,x){let A,O,C;if(typeof r=="number"){const $=r===0?"VALID":"NUMBER";A={top:r,bottom:r,left:r,right:r,type:$};const z=RJ([l,h],g,d,r,L);O=z[0],C=z[1]}else if(r==="same"){O=Math.ceil(l/d),C=Math.ceil(h/f);const $=Math.max(0,(O-1)*d+g-l),z=Math.max(0,(C-1)*f+S-h),ne=Math.floor($/2),te=$-ne,se=Math.floor(z/2),fe=z-se;A={top:ne,bottom:te,left:se,right:fe,type:"SAME"}}else if(r==="valid")A={top:0,bottom:0,left:0,right:0,type:"VALID"},O=Math.ceil((l-g+1)/d),C=Math.ceil((h-S+1)/f);else if(typeof r=="object"){const $=x==="channelsLast"?r[1][0]:r[2][0],z=x==="channelsLast"?r[1][1]:r[2][1],ne=x==="channelsLast"?r[2][0]:r[3][0],te=x==="channelsLast"?r[2][1]:r[3][1],se=$===0&&z===0&&ne===0&&te===0?"VALID":"EXPLICIT";A={top:$,bottom:z,left:ne,right:te,type:se},O=ca((l-g+$+z)/d+1,L),C=ca((h-S+ne+te)/f+1,L)}else throw Error(`Unknown padding parameter: ${r}`);return{padInfo:A,outHeight:O,outWidth:C}}function CJ(r,l,h,d,f,g,S,L,x,A,O){let C,$,z,ne;if(typeof r=="number"){const te=r===0?"VALID":"NUMBER";C={top:r,bottom:r,left:r,right:r,front:r,back:r,type:te};const se=OJ([l,h,d,1],L,1,f,r,O);$=se[0],z=se[1],ne=se[2]}else if(r==="same"){$=Math.ceil(l/f),z=Math.ceil(h/g),ne=Math.ceil(d/S);const te=($-1)*f+L-l,se=(z-1)*g+x-h,fe=(ne-1)*S+A-d,de=Math.floor(te/2),Ae=te-de,xe=Math.floor(se/2),Me=se-xe,Ke=Math.floor(fe/2),wt=fe-Ke;C={top:xe,bottom:Me,left:Ke,right:wt,front:de,back:Ae,type:"SAME"}}else if(r==="valid")C={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},$=Math.ceil((l-L+1)/f),z=Math.ceil((h-x+1)/g),ne=Math.ceil((d-A+1)/S);else throw Error(`Unknown padding parameter: ${r}`);return{padInfo:C,outDepth:$,outHeight:z,outWidth:ne}}function ca(r,l){if(!l)return r;switch(l){case"round":return Math.round(r);case"ceil":return Math.ceil(r);case"floor":return Math.floor(r);default:throw new Error(`Unknown roundingMode ${l}`)}}function ao(r){const[l,h,d]=Hf(r);return l===1&&h===1&&d===1}function co(r,l){return ao(r)||ao(l)}function zc(r){if(r==="NHWC")return"channelsLast";if(r==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${r}`)}function yO(r,l){const h=r[0].length;r.forEach((f,g)=>{Z(f.length===h,()=>`Error in concat${h}D: rank of tensors[${g}] must be the same as the rank of the rest (${h})`)}),Z(l>=0&&l`Error in concat${h}D: axis must be between 0 and ${h-1}.`);const d=r[0];r.forEach((f,g)=>{for(let S=0;S`Error in concat${h}D: Shape of tensors[${g}] (${f}) does not match the shape of the rest (${d}) along the non-concatenated axis ${g}.`)})}function bO(r,l){const h=r[0].slice();for(let d=1;d=1,()=>"Pass at least one tensor to concat");let h=Gf(r,"tensors","concat");h[0].dtype==="complex64"&&h.forEach(S=>{if(S.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor + with dtype ${S.dtype}. `)});const d=(S,L)=>{const x=ht(l,h[0].shape)[0],A=bO(h.map($=>$.shape),x);if(qt(A)===0)return wI([],A);if(h=h.filter($=>$.size>0),h.length===1)return h[0];const O=h.map($=>$.shape);yO(O,x);const C=S.concat(h,x);return L(h),C},f=h,g={axis:l};return H.runKernelFunc(d,f,null,Gm,g)}const bn=G({concat_:EJ});function DJ(r){const l=M(r,"x","sigmoid"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.sigmoid(l);return f([g]),g},h,null,xf)}const AI=G({sigmoid_:DJ});function kJ(r,l,h){const d=M(r,"x","slice");if(d.rank===0)throw new Error("Slicing scalar is not possible");const f=(L,x)=>{const[A,O]=Vf(d,l,h);return dO(d,A,O),x([d]),L.slice(d,A,O)},g={x:d},S={begin:l,size:h};return H.runKernelFunc(f,g,null,Lf,S)}const At=G({slice_:kJ});function FJ(r,l,h){const d=M(r,"x","batchToSpaceND"),f=l.reduce((x,A)=>x*A);Z(d.rank>=1+l.length,()=>`input rank is ${d.rank} but should be > than blockShape.length ${l.length}`),Z(h.length===l.length,()=>`crops.length is ${h.length} but should be equal to blockShape.length ${l.length}`),Z(d.shape[0]%f===0,()=>`input tensor batch is ${d.shape[0]} but is not divisible by the product of the elements of blockShape ${l.join(" * ")} === ${f}`);const g=x=>x.batchToSpaceND(d,l,h),S={x:d},L={blockShape:l,crops:h};return H.runKernelFunc(g,S,null,Pm,L)}const vI=G({batchToSpaceND_:FJ});function _J(r,l){let h=M(r,"broadcastTo","x");const d=h.shape;if(l.some(O=>!(O>0)||O%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${l}].`);if(l.lengthh.rank){const O=h.shape.slice();for(;O.length=0;O--)if(f[O]===l[O])g[O]=1;else if(h.shape[O]!==1)throw new Error(`broadcastTo(): [${d}] cannot be broadcast to [${l}].`);const S=g.map((O,C)=>O>1?C:-1).filter(O=>O>=0);if(S.length===0)return bi(h);const L=O=>O.tile(h,g),x={x:h},A={shape:l,inputShape:f};return H.runKernelFunc(L,x,null,zm,A)}const jf=G({broadcastTo_:_J});function WJ(r,l,h,d,f="NHWC",g=[1,1],S){const L=M(r,"x","conv2d"),x=M(l,"filter","conv2d");let A=L,O=!1;L.rank===3&&(O=!0,A=re(L,[1,L.shape[0],L.shape[1],L.shape[2]])),Z(A.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${A.rank}.`),Z(x.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${x.rank}.`),S!=null&&Z(Qt(d),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${S} but got pad ${d}.`);const C=f==="NHWC"?A.shape[3]:A.shape[1];Z(C===x.shape[2],()=>`Error in conv2d: depth of input (${C}) must match input depth for filter ${x.shape[2]}.`),Z(co(h,g),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${h} and dilations '${g}'`);const $=(se,fe)=>{const de=zc(f),Ae=Lr(A.shape,x.shape,h,g,d,S,!1,de),xe=se.conv2d(A,x,Ae);return fe([A,x]),xe},z={x:A,filter:x},ne={strides:h,pad:d,dataFormat:f,dilations:g,dimRoundingMode:S},te=H.runKernelFunc($,z,null,Vm,ne);return O?re(te,[te.shape[1],te.shape[2],te.shape[3]]):te}const NI=G({conv2d_:WJ});function $J(r,l,h,d,f,g="NHWC",S){Z(r.length===l.rank,()=>`Length of inShape (${r.length}) and rank of dy (${l.rank}) must match`);let L=r,x=l,A=!1;l.rank===3&&(A=!0,x=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]),L=[1,r[0],r[1],r[2]]),Z(L.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${L.length}.`),Z(x.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${x.rank}`),Z(h.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${h.rank}`);const O=g==="NHWC"?L[3]:L[1],C=g==="NHWC"?x.shape[3]:x.shape[1];Z(O===h.shape[2],()=>`Error in conv2dDerInput: depth of input (${O}) must match input depth for filter ${h.shape[2]}.`),Z(C===h.shape[3],()=>`Error in conv2dDerInput: depth of output (${C}) must match output depth for filter ${h.shape[3]}.`),S!=null&&Z(Qt(f),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${S} but got pad ${f}.`);const $=(se,fe)=>{const de=1,Ae=zc(g),xe=Lr(L,h.shape,d,de,f,S,!1,Ae),Me=se.conv2dDerInput(x,h,xe);return fe([x,h]),Me},z={dy:x,filter:h},ne={strides:d,pad:f,dataFormat:g,dimRoundingMode:S,inputShape:L},te=H.runKernelFunc($,z,null,Hm,ne);return A?re(te,[te.shape[1],te.shape[2],te.shape[3]]):te}const wO=G({conv2DBackpropInput_:$J});function UJ(r,l,h,d,f){Z(r.length===l.rank,()=>`Length of inShape (${r.length}) and rank of dy (${l.rank}) must match`);let g=r,S=l,L=!1;l.rank===4&&(L=!0,S=re(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]]),g=[1,r[0],r[1],r[2],r[3]]);const x=g[4],A=S.shape[4];Z(g.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${g.length}.`),Z(S.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${S.rank}`),Z(h.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${h.rank}`),Z(x===h.shape[3],()=>`Error in conv3dDerInput: depth of input (${x}) must match input depth for filter ${h.shape[3]}.`),Z(A===h.shape[4],()=>`Error in conv3dDerInput: depth of output (${A}) must match output depth for filter ${h.shape[4]}.`);const O=ne=>{const te=1,se=ku(g,h.shape,d,te,f);return ne.conv3dDerInput(S,h,se)},C={dy:S},$={pad:f},z=H.runKernelFunc(O,C,null,_2,$);return L?re(z,[z.shape[1],z.shape[2],z.shape[3],z.shape[4]]):z}const LO=G({conv3DBackpropInput_:UJ});function BJ(r){const l=M(r,"x","cos"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.cos(l);return f([l]),g},h,null,Ym)}const Fu=G({cos_:BJ});function MJ(r){const l=M(r,"x","cosh"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.cosh(l);return f([l]),g},h,null,qm)}const CI=G({cosh_:MJ});function PJ(r,l=0,h=!1,d=!1){const f=M(r,"x","cumsum"),g=(x,A)=>{const O=fs([l],f.rank);let C=f;O!=null&&(C=Wt(f,O));const $=oo(1,f.rank)[0];let z=x.cumsum(C,$,h,d);if(A([f]),O!=null){const ne=Mc(O);z=Wt(z,ne)}return z},S={x:f},L={axis:l,exclusive:h,reverse:d};return H.runKernelFunc(g,S,null,jm,L)}const RI=G({cumsum_:PJ});function vt(r,l){const h=[];for(let d=0;d1)&&h.unshift(g)}return h}function rt(r,l){const h=[],d=Math.max(r.length,l.length);for(let f=0;fS.equal(h,d),g={a:h,b:d};return H.runKernelFunc(f,g,null,Y2)}const OI=G({equal_:zJ});function GJ(r,l,h){const d=M(l,"a","where"),f=M(h,"b","where"),g=M(r,"condition","where","bool"),S=rt(d.shape,f.shape),L=jf(d,S),x=jf(f,S);g.rank===1&&Z(g.shape[0]===d.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),g.rank!==1&&Zt(g.shape,x.shape,"Error in where: ");const A=(C,$)=>{const z=C.select(g,L,x);return $([g]),z},O={condition:g,t:L,e:x};return H.runKernelFunc(A,O,null,wf)}const zn=G({where_:GJ});function VJ(r){const l=M(r,"x","zerosLike"),h={x:l};return H.runKernelFunc(d=>d.zerosLike(l),h,null,Ff)}const je=G({zerosLike_:VJ});function HJ(r){const l=M(r,"x","exp"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.exp(l);return f([g]),g},h,null,Xm)}const Gn=G({exp_:HJ});function YJ(r,l=0){const h=null,d=M(r,"x","expandDims",h);Z(l<=d.rank,()=>"Axis must be <= rank of the tensor");const f=d.shape.slice();return l<0&&(Z(-(d.rank+1)<=l,()=>`Axis must be in the interval [${-(d.rank+1)}, ${d.rank}]`),l=d.rank+l+1),f.splice(l,0,1),re(d,f)}const ks=G({expandDims_:YJ});function qJ(r,l){const h=null,d=M(r,"x","tile",h);Z(d.rank===l.length,()=>`Error in transpose: rank of input ${d.rank} must match length of reps ${l}.`);const f=(x,A)=>{const O=x.tile(d,l);return A([d]),O},g=[d],S={x:d},L={reps:l};return H.runKernelFunc(f,S,null,Of,L,g)}const la=G({tile_:qJ});function jJ(r,l,h,d="float32"){l==null&&(l=r);const f=Du([r,l],d),g=r<=l?r:l;for(let L=0;Lf.fill(r,l,h),{},null,K2,d)}function KJ(r){const l=M(r,"x","floor"),h={x:l};return H.runKernelFunc(d=>d.floor(l),h,null,Jm)}const kI=G({floor_:KJ});function SO(r,l,h){const d=r.shape[h],f=[];let g=1,S=1;for(let L=0;L{const O=ht(h,d.shape)[0],C=SO(d,f,O),$=x.gather(d,re(f,[f.size]),O);return A([d,f]),re($,C.outputShape)};return H.runKernelFunc(L,g,null,Qm,S)}const FI=G({gather_:XJ});function JJ(r,l){let h=M(r,"a","greater"),d=M(l,"b","greater");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=S=>S.greater(h,d),g={a:h,b:d};return H.runKernelFunc(f,g,null,Z2)}const wi=G({greater_:JJ});function ZJ(r,l){let h=M(r,"a","greaterEqual"),d=M(l,"b","greaterEqual");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=(S,L)=>{const x=S.greaterEqual(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,ef)}const Sr=G({greaterEqual_:ZJ});function QJ(r){const l=M(r,"input","imag"),h=f=>f.imag(l),d={input:l};return H.runKernelFunc(h,d,null,eR)}const _u=G({imag_:QJ});function e9(r,l){let h=M(r,"a","maximum"),d=M(l,"b","maximum");[h,d]=mt(h,d),h.dtype==="bool"&&(h=Le(h,"int32"),d=Le(d,"int32")),rt(h.shape,d.shape);const f=(S,L)=>{const x=S.maximum(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,of)}const _I=G({maximum_:e9});function ke(r,l){if((Ds(r)&&l!=="string"||Array.isArray(r))&&l!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(l==="string"&&Ds(r)&&!(r instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");const h=[],d=[];return ji(r,h,d,l)}function t9(r,l){let h=M(r,"a","less"),d=M(l,"b","less");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=S=>S.less(h,d),g={a:h,b:d};return H.runKernelFunc(f,g,null,iR)}const WI=G({less_:t9});function n9(r,l){let h=M(r,"a","lessEqual"),d=M(l,"b","lessEqual");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=(S,L)=>{const x=S.lessEqual(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,rR)}const Ir=G({lessEqual_:n9});function s9(r){const l=M(r,"x","log"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.log(l);return f([l]),g},h,null,nf)}const lo=G({log_:s9});function i9(r){const l=M(r,"x","log1p"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.log1p(l);return f([l]),g},h,null,sf)}const $I=G({log1p_:i9});function Kf(r){return H.customGrad(r)}function r9(r){const l=M(r,"x","neg"),h={x:l};return H.runKernelFunc(d=>d.neg(l),h,null,hf)}const yt=G({neg_:r9});function o9(r,l=null,h=!1){const d=M(r,"x","max"),f=(L,x)=>{const A=ht(l,d.shape);let O=A;const C=fs(O,d.rank);let $=d;C!=null&&($=Wt(d,C),O=oo(O.length,$.rank));const z=L.max($,O);C!=null&&$.dispose();let ne=z;if(h){const te=ts(ne.shape,ht(l,d.shape));ne=re(ne,te),z.dispose()}return x([d,ne]),ne},g={x:d},S={reductionIndices:l,keepDims:h};return H.runKernelFunc(f,g,null,rf,S)}const ha=G({max_:o9});function a9(r,l){let h=M(r,"a","sub"),d=M(l,"b","sub");[h,d]=mt(h,d);const f=(S,L)=>{const x=S.subtract(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,Rf)}const Be=G({sub_:a9});function c9(r,l=null,h=!1){let d=M(r,"x","sum");d.dtype==="bool"&&(d=Le(d,"int32"));const f=(L,x)=>{x([d]);const A=ht(l,d.shape),O=fs(A,d.rank);let C=A,$=d;O!=null&&($=Wt(d,O),C=oo(C.length,d.rank));let z=L.sum($,C);if(h){const ne=ts(z.shape,A);z=re(z,ne)}return z},g={x:d},S={axis:l,keepDims:h};return H.runKernelFunc(f,g,null,Af,S)}const Fe=G({sum_:c9});function l9(r,l=null,h=!1){const d=M(r,"x","logSumExp"),f=ht(l,d.shape),g=ha(d,f,!0),S=Be(d,g),L=Gn(S),x=Fe(L,f),A=lo(x),O=St(re(g,A.shape),A);if(h){const C=ts(O.shape,f);return re(O,C)}return O}const UI=G({logSumExp_:l9});function h9(r,l){const h=M(r,"a","logicalAnd","bool"),d=M(l,"b","logicalAnd","bool");rt(h.shape,d.shape);const f={a:h,b:d};return H.runKernelFunc(g=>g.logicalAnd(h,d),f,null,oR)}const ua=G({logicalAnd_:h9});function u9(r){const l=M(r,"x","logicalNot","bool"),h={x:l};return H.runKernelFunc(d=>d.logicalNot(l),h,null,aR)}const BI=G({logicalNot_:u9});function Fs(r,l="float32"){if(l==="complex64"){const d=Fs(r,"float32"),f=Fs(r,"float32");return qi(d,f)}const h=aa(qt(r),l);return H.makeTensor(h,r,l)}function Ki(r,l="float32"){if(l==="complex64"){const d=Ki(r,"float32"),f=Fs(r,"float32");return qi(d,f)}const h=Uf(qt(r),l);return H.makeTensor(h,r,l)}function d9(r,l=null,h=!1){const d=M(r,"x","mean"),f=ht(l,d.shape),g=fO(d.shape,f),S=g[1],L=qt(S),x=Kf(A=>{const O=ke(L),C=O.dtype===A.dtype?A:Le(A,O.dtype),$=ze(C,O),z=Fe($,l,h),ne=te=>{const se=A.shape.slice();f.forEach(Ae=>{se[Ae]=1});const fe=re(te,se),de=ze(ae(fe,Ki(A.shape,"float32")),L);return de};return{value:z,gradFunc:ne}});return x(d)}const MI=G({mean_:d9});function p9(r,l=null,h=!1){const d=M(r,"x","min"),f=(L,x)=>{const A=ht(l,d.shape);let O=A;const C=fs(O,d.rank);let $=d;C!=null&&($=Wt(d,C),O=oo(O.length,d.rank));const z=L.min($,O);C!=null&&$.dispose();let ne=z;if(h){const te=ts(ne.shape,A);ne=re(z,te),z.dispose()}return x([d,ne]),ne},g={x:d},S={axis:l,keepDims:h};return H.runKernelFunc(f,g,null,af,S)}const Xf=G({min_:p9});function m9(r,l){let h=M(r,"a","minimum"),d=M(l,"b","minimum");[h,d]=mt(h,d),h.dtype==="bool"&&(h=Le(h,"int32"),d=Le(d,"int32")),rt(h.shape,d.shape);const f=(S,L)=>{const x=S.minimum(h,d);return L([h,d]),x},g={a:h,b:d};return H.runKernelFunc(f,g,null,cf)}const PI=G({minimum_:m9});function f9(r){const l=M(r,"x","square"),h={},d=[l],f=[];return H.runKernelFunc((g,S)=>(S([l]),g.square(l)),{x:l},null,"Square",h,d,f)}const ut=G({square_:f9});function g9(r,l){let h=M(r,"a","notEqual"),d=M(l,"b","notEqual");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=S=>S.notEqual(h,d),g={a:h,b:d};return H.runKernelFunc(f,g,null,gR)}const zI=G({notEqual_:g9});function y9(r){const l=M(r,"input","real"),h=f=>f.real(l),d={input:l};return H.runKernelFunc(h,d,null,TR)}const Gc=G({real_:y9});function b9(r,l,h=0){const d=M(r,"x","pad");if(d.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");const f=(L,x)=>(x([d]),L.pad(d,l,h)),g={paddings:l,constantValue:h},S={x:d};return H.runKernelFunc(f,S,null,uf,g)}const GI=G({pad_:b9});function w9(r,l,h){const d=M(r,"x","spaceToBatchND");Z(d.rank>=1+l.length,()=>`input rank ${d.rank} should be > than [blockShape] ${l.length}`),Z(h.length===l.length,()=>`paddings.shape[0] ${h.length} must be equal to [blockShape] ${l.length}`),Z(d.shape.reduce((L,x,A)=>A>0&&A<=l.length?L&&(x+h[A-1][0]+h[A-1][1])%l[A-1]===0:L,!0),()=>`input spatial dimensions ${d.shape.slice(1)} with paddings ${h.toString()} must be divisible by blockShapes ${l.toString()}`);const f=L=>L.spaceToBatchND(d,l,h),g={x:d},S={blockShape:l,paddings:h};return H.runKernelFunc(f,g,null,vf,S)}const VI=G({spaceToBatchND_:w9});function L9(r,l){let h=M(r,"base","pow"),d=M(l,"exp","pow");[h,d]=mt(h,d);const f={a:h,b:d},g=(S,L)=>{const x=S.pow(h,d);return L([h,d,x]),x};return H.runKernelFunc(g,f,null,df)}const da=G({pow_:L9});function ho(r,l){_c(r);const h=wr(r,l);if(h.length!==1)throw new Error("tensor1d() requires values to be a flat/TypedArray");const d=null;return ji(r,d,h,l)}function Jf(r,l,h=1,d="float32"){if(h===0)throw new Error("Cannot have a step of zero");const f=()=>{const S=r===l,L=r1;if(S||L||x)return Fs([0],d);const A=Math.abs(Math.ceil((l-r)/h)),O=aa(A,d);l(g([l]),l.dtype==="bool"?Le(l,"int32"):f.relu(l)),d={x:l};return H.runKernelFunc(h,d,null,pf)}const Wu=G({relu_:S9});function I9(r,l){const h=M(r,"x","reverse"),d=S=>{const L=ht(l,h.shape);if(h.rank===0)return bi(h);const x=S.reverse(h,L);return re(x,h.shape)},f={x:h},g={dims:l};return H.runKernelFunc(d,f,null,yf,g)}const Vc=G({reverse_:I9});function x9(r){const l=M(r,"x","rsqrt"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.rsqrt(l);return f([l]),g},h,null,bf)}const HI=G({rsqrt_:x9});function T9(r){const l=M(r,"x","sin"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.sin(l);return f([l]),g},h,null,Sf)}const YI=G({sin_:T9});function A9(r){const l=M(r,"x","sinh"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.sinh(l);return f([l]),g},h,null,If)}const qI=G({sinh_:A9});function v9(r){Z(r.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${r.dtype}.`);const l={input:r};return H.runKernelFunc(h=>{const d=r.shape[r.shape.length-1],f=r.size/d,g=r.as2D(f,d),S=h.fft(g);return S.reshape(r.shape)},l,null,j2)}const $u=G({fft_:v9});function N9(r){Z(r.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${r.dtype}.`);const l={input:r};return H.runKernelFunc(h=>{const d=r.shape[r.shape.length-1],f=r.size/d,g=re(r,[f,d]),S=h.ifft(g);return re(S,r.shape)},l,null,Q2)}const Hc=G({ifft_:N9});function C9(r){const l=r.shape[r.shape.length-1],h=r.size/l;let d;if(l<=2){const f=re(r,[h,l]);d=Hc(f)}else{const f=[h,2*(l-1)],g=re(Gc(r),[h,l]),S=re(_u(r),[h,l]),L=Vc(At(g,[0,1],[h,l-2]),1),x=ae(Vc(At(S,[0,1],[h,l-2]),1),ke(-1)),A=bn([g,L],1),O=bn([S,x],1),C=re(qi(A,O),[f[0],f[1]]);d=Hc(C)}if(d=Gc(d),r.rank===3&&r.shape[0]!==0){const f=d,g=r.shape[0];d=re(d,[g,d.shape[0]/g,d.shape[1]]),f.dispose()}return d}const jI=G({irfft_:C9});function IO(r,l,h=0){let d=[];if(typeof l=="number")Z(r.shape[h]%l===0,()=>"Number of splits must evenly divide the axis."),d=new Array(l).fill(r.shape[h]/l);else{const f=l.reduce((S,L)=>(L===-1&&(S+=1),S),0);Z(f<=1,()=>"There should be only one negative value in split array.");const g=l.indexOf(-1);if(g!==-1){const S=l.reduce((L,x)=>x>0?L+x:L);l[g]=r.shape[h]-S}Z(r.shape[h]===l.reduce((S,L)=>S+L),()=>"The sum of sizes must match the size of the axis dimension."),d=l}return d}function R9(r,l,h=0){const d=M(r,"x","split"),f=(L,x)=>{const A=ht(h,d.shape)[0],O=IO(d,l,A);return L.split(d,O,A)},g={x:d},S={numOrSizeSplits:l,axis:h};return H.runKernelFunc(f,g,null,Nf,S)}const uo=G({split_:R9});function O9(r,l){Z(r.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${r.dtype}`);let h=r.shape[r.shape.length-1];const d=r.size/h;let f;if(l!=null&&l0),te=r.shape.map(se=>se);te[r.shape.length-1]=l,f=At(r,ne,te),h=l}else if(l!=null&&l>h){const ne=r.shape.map(te=>te);ne[r.shape.length-1]=l-h,f=bn([r,Fs(ne)],r.shape.length-1),h=l}else f=r;const g=je(f),S=re(qi(f,g),[d,h]),L=$u(S),x=Math.floor(h/2)+1,A=Gc(L),O=_u(L),C=uo(A,[x,h-x],A.shape.length-1),$=uo(O,[x,h-x],O.shape.length-1),z=f.shape.slice();return z[f.shape.length-1]=x,re(qi(C[0],$[0]),z)}const Uu=G({rfft_:O9});function E9(r){const l=M(r,"x","sqrt"),h={x:l};return H.runKernelFunc((d,f)=>{const g=d.sqrt(l);return f([l]),g},h,null,Tf)}const gs=G({sqrt_:E9});function D9(r,l){let h=M(r,"a","squaredDifference"),d=M(l,"b","squaredDifference");[h,d]=mt(h,d),rt(h.shape,d.shape);const f=(L,x)=>{const A=L.squaredDifference(h,d);return x([h,d]),A},g={a:h,b:d},S={};return H.runKernelFunc(f,g,null,Cf,S)}const KI=G({squaredDifference_:D9});function k9(r,l){const h=M(r,"x","squeeze");return re(h,PR(h.shape,l).newShape)}const XI=G({squeeze_:k9});function F9(r,l=0){const h=Gf(r,"tensors","stack");if(Z(h.length>=1,()=>"Pass at least one tensor to tf.stack"),h.length===1)return ks(h[0],l);const d=h[0].rank,f=h[0].shape,g=h[0].dtype;Z(l<=d,()=>"Axis must be <= rank of the tensor"),h.forEach(L=>{Zt(f,L.shape,"All tensors passed to stack must have matching shapes"),Z(g===L.dtype,()=>"All tensors passed to stack must have matching dtypes")});const S=h.map(L=>ks(L,l));return bn(S,l)}const js=G({stack_:F9});function _9(r,l=0){const h=M(r,"x","step"),d={x:h},f={alpha:l};return H.runKernelFunc(g=>g.step(h,l),d,null,_f,f)}const pa=G({step_:_9});function ma(r,l,h){if(_c(r),l!=null&&l.length!==2)throw new Error("tensor2d() requires shape to have two numbers");const d=wr(r,h);if(d.length!==2&&d.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(d.length===1&&l==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return ji(r,l,d,h)}function W9(r,l,h){const d=M(r,"x","unsortedSegmentSum"),f=M(l,"segmentIds","unsortedSegmentSum","int32");Z(Qt(h),()=>"numSegments must be of dtype int");const g={x:d,segmentIds:f},S={numSegments:h},L=(x,A)=>{const O=x.unsortedSegmentSum(d,f,h);return A([f]),O};return H.runKernelFunc(L,g,null,kf,S)}const JI=G({unsortedSegmentSum_:W9});function $9(r,l=0){const h=M(r,"x","unstack");Z(l>=-h.shape.length&&l`Axis = ${l} is not in [-${h.shape.length}, ${h.shape.length})`),l<0&&(l+=h.shape.length);const d={value:h},f={axis:l},g=S=>S.unstack(h,l);return H.runKernelFunc(g,d,null,Df,f)}const Bu=G({unstack_:$9});function U9(r,l="euclidean",h=null,d=!1){r=M(r,"x","norm");const f=xO(r,l,h);let g=f.shape;if(d){const S=ht(h,r.shape);g=ts(f.shape,S)}return re(f,g)}function xO(r,l,h=null){if(r.rank===0)return Pn(r);if(r.rank!==1&&h===null)return xO(re(r,[-1]),l,h);if(r.rank===1||typeof h=="number"||Array.isArray(h)&&h.length===1){if(l===1)return Fe(Pn(r),h);if(l===Infinity)return ha(Pn(r),h);if(l===-Infinity)return Xf(Pn(r),h);if(l==="euclidean"||l===2)return gs(Fe(da(Pn(r),ke(2,"int32")),h));throw new Error(`Error in norm: invalid ord value: ${l}`)}if(Array.isArray(h)&&h.length===2){if(l===1)return ha(Fe(Pn(r),h[0]),h[1]-1);if(l===Infinity)return ha(Fe(Pn(r),h[1]),h[0]);if(l===-Infinity)return Xf(Fe(Pn(r),h[1]),h[0]);if(l==="fro"||l==="euclidean")return gs(Fe(ut(r),h));throw new Error(`Error in norm: invalid ord value: ${l}`)}throw new Error(`Error in norm: invalid axis: ${h}`)}const Zf=G({norm_:U9});function TO(r){return Math.floor(Math.pow(2,Math.ceil(Math.log(r)/Math.log(2))))}function Qf(r,l,h){const d=1-r%2,f=new Float32Array(r);for(let g=0;g`Error in conv2dDerFilter: input must be rank 4, but got shape ${L.shape}.`),Z(x.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${x.shape}.`),Z(h.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${h}.`);const A=g==="NHWC"?L.shape[3]:L.shape[1],O=g==="NHWC"?x.shape[3]:x.shape[1];Z(A===h[2],()=>`Error in conv2dDerFilter: depth of input ${A}) must match input depth in filter (${h[2]}.`),Z(O===h[3],()=>`Error in conv2dDerFilter: depth of dy (${O}) must match output depth for filter (${h[3]}).`),S!=null&&Z(Qt(f),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${S} but got pad ${f}.`);const C=ne=>{const te=1,se=zc(g),fe=Lr(L.shape,h,d,te,f,S,!1,se);return ne.conv2dDerFilter(L,x,fe)},$={x:L,dy:x},z={strides:d,pad:f,dataFormat:g,dimRoundingMode:S};return H.runKernelFunc(C,$,null,D2,z)}const eg=G({conv2DBackpropFilter_:B9});function M9(r,l,h,d){let f=r;r.rank===3&&(f=re(r,[1,r.shape[0],r.shape[1],r.shape[2]]));let g=l;g.rank===3&&(g=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]));const S=x=>x.depthwiseConv2DDerFilter(f,g,d),L={x:f,dy:g};return H.runKernelFunc(S,L,null,U2)}const AO=G({depthwiseConv2dNativeBackpropFilter_:M9});function P9(r,l,h,d){let f=l,g=!1;l.rank===3&&(g=!0,f=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]));const S=A=>A.depthwiseConv2DDerInput(f,h,d),L={dy:f},x=H.runKernelFunc(S,L,null,B2);return g?re(x,[x.shape[1],x.shape[2],x.shape[3]]):x}const vO=G({depthwiseConv2dNativeBackpropInput_:P9});function z9(r){return Qf(r,.54,.46)}const NO=G({hammingWindow_:z9});function G9(r){return Qf(r,.5,.5)}const tg=G({hannWindow_:G9});function V9(r,l,h,d=!1,f=0){let g=0;const S=[];for(;g+l<=r.size;)S.push(At(r,g,l)),g+=h;if(d)for(;g`Error in cropAndResize: image must be rank 4,but got rank ${S.rank}.`),Z(L.rank===2&&L.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${A},4] but had shape ${L.shape}.`),Z(x.rank===1&&x.shape[0]===A,()=>`Error in cropAndResize: boxInd must be have size [${A}] but had shape ${L.shape}.`),Z(d.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${d.length}.`),Z(d[0]>=1&&d[1]>=1,()=>`cropSize must be atleast [1,1], but was ${d}`),Z(f==="bilinear"||f==="nearest",()=>`method must be bilinear or nearest, but was ${f}`);const O=ne=>ne.cropAndResize(S,L,x,d,f,g),C={image:S,boxes:L,boxInd:x},$={method:f,extrapolationValue:g,cropSize:d},z=H.runKernelFunc(O,C,null,W2,$);return z}const RO=G({cropAndResize_:Y9});function q9(r){const l=M(r,"image","flipLeftRight","float32");Z(l.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${l.rank}.`);const h={image:l},d=H.runKernel(X2,h,{});return d}const OO=G({flipLeftRight_:q9});function j9(r,l,h=0,d=.5){const f=M(r,"image","rotateWithOffset","float32");Z(f.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${f.rank}.`);const g={image:f},S={radians:l,fillValue:h,center:d},L=H.runKernel($R,g,S);return L}const EO=G({rotateWithOffset_:j9});function Ks(r,l,h,d,f,g){d==null&&(d=.5),f==null&&(f=Number.NEGATIVE_INFINITY),g==null&&(g=0);const S=r.shape[0];return h=Math.min(h,S),Z(0<=d&&d<=1,()=>`iouThreshold must be in [0, 1], but was '${d}'`),Z(r.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${r.rank}'`),Z(r.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${r.shape[1]}`),Z(l.rank===1,()=>"scores must be a 1D tensor"),Z(l.shape[0]===S,()=>`scores has incompatible shape with boxes. Expected ${S}, but was ${l.shape[0]}`),Z(0<=g&&g<=1,()=>`softNmsSigma must be in [0, 1], but was '${g}'`),{maxOutputSize:h,iouThreshold:d,scoreThreshold:f,softNmsSigma:g}}function K9(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY){const g=M(r,"boxes","nonMaxSuppression"),S=M(l,"scores","nonMaxSuppression"),L=Ks(g,S,h,d,f);h=L.maxOutputSize,d=L.iouThreshold,f=L.scoreThreshold;const x={maxOutputSize:h,iouThreshold:d,scoreThreshold:f};return H.runKernelFunc(A=>A.nonMaxSuppression(g,S,h,d,f),{boxes:g,scores:S},null,yR,x)}const DO=G({nonMaxSuppression_:K9});function kO(r,l,h){const d=X9(r,l,h),f=d<0?-(d+1):d;r.splice(f,0,l)}function X9(r,l,h){return Z9(r,l,h||J9)}function J9(r,l){return r>l?1:r>>1);const L=h(l,r[g]);L>0?d=g+1:(f=g,S=!L)}return S?d:-d-1}function FO(r,l,h,d,f){return ZI(r,l,h,d,f,0).selectedIndices}function _O(r,l,h,d,f,g){return ZI(r,l,h,d,f,0,!1,g,!0)}function WO(r,l,h,d,f,g){return ZI(r,l,h,d,f,g,!0)}function ZI(r,l,h,d,f,g,S=!1,L=!1,x=!1){const A=[];for(let se=0;sef&&A.push({score:l[se],boxIndex:se,suppressBeginIndex:0});A.sort($O);const O=g>0?-.5/g:0,C=[],$=[];for(;C.length0;){const se=A.pop(),{score:fe,boxIndex:de,suppressBeginIndex:Ae}=se;if(fe=Ae;--Me){const Ke=Q9(r,de,C[Me]);if(Ke>=d){xe=!0;break}if(se.score=se.score*eZ(d,O,Ke),se.score<=f)break}se.suppressBeginIndex=C.length,xe||(se.score===fe?(C.push(de),$.push(se.score)):se.score>f&&kO(A,se,$O))}const z=C.length,ne=h-z;L&&ne>0&&(C.push(...new Array(ne).fill(0)),$.push(...new Array(ne).fill(0)));const te={selectedIndices:ho(C,"int32")};return S&&(te.selectedScores=ho($,"float32")),x&&(te.validOutputs=ke(z,"int32")),te}function Q9(r,l,h){const d=r.subarray(l*4,l*4+4),f=r.subarray(h*4,h*4+4),g=Math.min(d[0],d[2]),S=Math.min(d[1],d[3]),L=Math.max(d[0],d[2]),x=Math.max(d[1],d[3]),A=Math.min(f[0],f[2]),O=Math.min(f[1],f[3]),C=Math.max(f[0],f[2]),$=Math.max(f[1],f[3]),z=(L-g)*(x-S),ne=(C-A)*($-O);if(z<=0||ne<=0)return 0;const te=Math.max(g,A),se=Math.max(S,O),fe=Math.min(L,C),de=Math.min(x,$),Ae=Math.max(fe-te,0)*Math.max(de-se,0);return Ae/(z+ne-Ae)}function eZ(r,l,h){const d=Math.exp(l*h*h);return h<=r?d:0}function $O(r,l){return r.score-l.score||r.score===l.score&&l.boxIndex-r.boxIndex}async function tZ(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY){const g=M(r,"boxes","nonMaxSuppressionAsync"),S=M(l,"scores","nonMaxSuppressionAsync"),L=Ks(g,S,h,d,f);h=L.maxOutputSize,d=L.iouThreshold,f=L.scoreThreshold;const x=await Promise.all([g.data(),S.data()]),A=x[0],O=x[1],C=FO(A,O,h,d,f);return g!==r&&g.dispose(),S!==l&&S.dispose(),C}const UO=tZ;function nZ(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY,g=0){const S=M(r,"boxes","nonMaxSuppression"),L=M(l,"scores","nonMaxSuppression"),x=Ks(S,L,h,d,f,g);h=x.maxOutputSize,d=x.iouThreshold,f=x.scoreThreshold,g=x.softNmsSigma;const A={boxes:S,scores:L},O={maxOutputSize:h,iouThreshold:d,scoreThreshold:f,softNmsSigma:g},C=H.runKernel(wR,A,O);return{selectedIndices:C[0],selectedScores:C[1]}}const BO=G({nonMaxSuppressionWithScore_:nZ});async function sZ(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY,g=0){const S=M(r,"boxes","nonMaxSuppressionAsync"),L=M(l,"scores","nonMaxSuppressionAsync"),x=Ks(S,L,h,d,f,g);h=x.maxOutputSize,d=x.iouThreshold,f=x.scoreThreshold,g=x.softNmsSigma;const A=await Promise.all([S.data(),L.data()]),O=A[0],C=A[1],$=WO(O,C,h,d,f,g);return S!==r&&S.dispose(),L!==l&&L.dispose(),$}const MO=sZ;function iZ(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY,g=!1){const S=M(r,"boxes","nonMaxSuppression"),L=M(l,"scores","nonMaxSuppression"),x=Ks(S,L,h,d,f,null),A=x.maxOutputSize,O=x.iouThreshold,C=x.scoreThreshold,$={boxes:S,scores:L},z={maxOutputSize:A,iouThreshold:O,scoreThreshold:C,padToMaxOutputSize:g},ne=H.runKernel(bR,$,z);return{selectedIndices:ne[0],validOutputs:ne[1]}}const PO=G({nonMaxSuppressionPadded_:iZ});async function rZ(r,l,h,d=.5,f=Number.NEGATIVE_INFINITY,g=!1){const S=M(r,"boxes","nonMaxSuppressionAsync"),L=M(l,"scores","nonMaxSuppressionAsync"),x=Ks(S,L,h,d,f,null),A=x.maxOutputSize,O=x.iouThreshold,C=x.scoreThreshold,[$,z]=await Promise.all([S.data(),L.data()]),ne=_O($,z,A,O,C,g);return S!==r&&S.dispose(),L!==l&&L.dispose(),ne}const zO=rZ;function oZ(r,l,h=!1){const d=M(r,"images","resizeBilinear");Z(d.rank===3||d.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${d.rank}.`),Z(l.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${l}.`);let f=d,g=!1;d.rank===3&&(g=!0,f=re(d,[1,d.shape[0],d.shape[1],d.shape[2]]));const[S,L]=l,x=($,z)=>(z([f]),$.resizeBilinear(f,S,L,h)),A={images:f},O={alignCorners:h,size:l},C=H.runKernelFunc(x,A,null,gf,O);return g?re(C,[C.shape[1],C.shape[2],C.shape[3]]):C}const GO=G({resizeBilinear_:oZ});function aZ(r,l,h=!1){const d=M(r,"images","resizeNearestNeighbor");Z(d.rank===3||d.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${d.rank}.`),Z(l.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${l}.`),Z(d.dtype==="float32"||d.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let f=d,g=!1;d.rank===3&&(g=!0,f=re(d,[1,d.shape[0],d.shape[1],d.shape[2]]));const[S,L]=l,x={images:f},A={alignCorners:h,size:l},O=($,z)=>(z([f]),$.resizeNearestNeighbor(f,S,L,h)),C=H.runKernelFunc(O,x,null,ff,A);return g?re(C,[C.shape[1],C.shape[2],C.shape[3]]):C}const VO=G({resizeNearestNeighbor_:aZ});function cZ(r,l,h){Z(l%1===0,()=>`bandPart(): numLower must be an integer, got ${l}.`),Z(h%1===0,()=>`bandPart(): numUpper must be an integer, got ${h}.`);const d=M(r,"a","bandPart");Z(d.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${d.rank}.`);const f=d.shape,[g,S]=d.shape.slice(-2);if(!(l<=g))throw new Error(`bandPart(): numLower (${l}) must not be greater than the number of rows (${g}).`);if(!(h<=S))throw new Error(`bandPart(): numUpper (${h}) must not be greater than the number of columns (${S}).`);l<0&&(l=g),h<0&&(h=S);const L=re(Jf(0,g,1,"int32"),[-1,1]),x=Jf(0,S,1,"int32"),A=Be(L,x),O=ua(Ir(A,ke(+l,"int32")),Sr(A,ke(-h,"int32"))),C=Fs([g,S],d.dtype);return re(js(Bu(re(d,[-1,g,S])).map($=>zn(O,$,C))),f)}const HO=G({bandPart_:cZ});function lZ(r){let l;if(Array.isArray(r)){l=!1,Z(r!=null&&r.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");const f=r[0].shape[0];for(let g=1;g`Gram-Schmidt: Non-unique lengths found in the input vectors: (${r[g].shape[0]} vs. ${f})`)}else l=!0,r=uo(r,r.shape[0],0).map(f=>XI(f,[0]));Z(r.length<=r[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${r.length}) exceeds number of dimensions (${r[0].shape[0]}).`);const h=[],d=r;for(let f=0;f{let g=d[f];if(f>0)for(let S=0;S=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${r.rank}`),r.rank===2)return qO(r,l);{const h=r.shape.slice(0,r.shape.length-2).reduce((x,A)=>x*A),d=Bu(re(r,[h,r.shape[r.shape.length-2],r.shape[r.shape.length-1]]),0),f=[],g=[];d.forEach(x=>{const[A,O]=qO(x,l);f.push(A),g.push(O)});const S=re(js(f,0),r.shape),L=re(js(g,0),r.shape);return[S,L]}}function qO(r,l=!1){return H.tidy(()=>{Z(r.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${r.shape.length}D Tensor.`);const h=r.shape[0],d=r.shape[1];let f=EI(h),g=bi(r);const S=ma([[1]],[1,1]);let L=bi(S);const x=h>=d?d:h;for(let A=0;A{const z=At(g,[A,A],[h-A,1]),ne=Zf(z),te=At(g,[A,A],[1,1]),se=zn(wi(te,0),ma([[-1]]),ma([[1]])),fe=Be(te,ae(se,ne)),de=ze(z,fe);de.shape[0]===1?L=bi(S):L=bn([S,At(de,[1,0],[de.shape[0]-1,de.shape[1]])],0);const Ae=yt(ze(pn(se,fe),ne)),xe=At(g,[A,0],[h-A,d]),Me=ae(Ae,L),Ke=Wt(L);if(A===0)g=Be(xe,pn(Me,pn(Ke,xe)));else{const Kt=Be(xe,pn(Me,pn(Ke,xe)));g=bn([At(g,[0,0],[A,d]),Kt],0)}const wt=Wt(Me),$t=At(f,[0,A],[h,f.shape[1]-A]);if(A===0)f=Be($t,pn(pn($t,L),wt));else{const Kt=Be($t,pn(pn($t,L),wt));f=bn([At(f,[0,0],[h,A]),Kt],1)}return[L,g,f]}),mO([O,C,$])}return!l&&h>d&&(f=At(f,[0,0],[h,d]),g=At(g,[0,0],[d,d])),[f,g]})}const jO=G({qr_:hZ});var jt;(function(r){r[r.NONE=0]="NONE",r[r.MEAN=1]="MEAN",r[r.SUM=2]="SUM",r[r.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(jt||(jt={}));function uZ(r,l,h=jt.SUM_BY_NONZERO_WEIGHTS){const d=M(r,"losses","computeWeightedLoss");let f=null;l!=null&&(f=M(l,"weights","computeWeightedLoss"));const g=f==null?d:ae(d,f);if(h===jt.NONE)return g;if(h===jt.SUM)return Fe(g);if(h===jt.MEAN){if(f==null)return MI(g);{const S=d.size/f.size,L=ze(Fe(g),Fe(f));return S>1?ze(L,ke(S)):L}}if(h===jt.SUM_BY_NONZERO_WEIGHTS){if(f==null)return ze(Fe(g),ke(d.size));{const S=ae(f,Ki(d.shape)),L=Le(Fe(zI(S,ke(0))),"float32");return ze(Fe(g),L)}}throw Error(`Unknown reduction: ${h}`)}const An=G({computeWeightedLoss_:uZ});function dZ(r,l,h,d=jt.SUM_BY_NONZERO_WEIGHTS){const f=M(r,"labels","absoluteDifference"),g=M(l,"predictions","absoluteDifference");let S=null;h!=null&&(S=M(h,"weights","absoluteDifference")),Zt(f.shape,g.shape,"Error in absoluteDifference: ");const L=Pn(Be(f,g));return An(L,S,d)}const KO=G({absoluteDifference_:dZ});function pZ(r,l,h,d,f=jt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","cosineDistance"),S=M(l,"predictions","cosineDistance");let L=null;d!=null&&(L=M(d,"weights","cosineDistance")),Zt(g.shape,S.shape,"Error in cosineDistance: ");const x=ke(1),A=Be(x,Fe(ae(g,S),h,!0));return An(A,L,f)}const XO=G({cosineDistance_:pZ});function mZ(r,l,h,d=jt.SUM_BY_NONZERO_WEIGHTS){let f=M(r,"labels","hingeLoss");const g=M(l,"predictions","hingeLoss");let S=null;h!=null&&(S=M(h,"weights","hingeLoss")),Zt(f.shape,g.shape,"Error in hingeLoss: ");const L=ke(1);f=Be(ae(ke(2),f),L);const x=Wu(Be(L,ae(f,g)));return An(x,S,d)}const JO=G({hingeLoss_:mZ});function fZ(r,l,h,d=1,f=jt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","huberLoss"),S=M(l,"predictions","huberLoss");let L=null;h!=null&&(L=M(h,"weights","huberLoss")),Zt(g.shape,S.shape,"Error in huberLoss: ");const x=ke(d),A=Pn(Be(S,g)),O=PI(A,x),C=Be(A,O),$=St(ae(ke(.5),ut(O)),ae(x,C));return An($,L,f)}const ZO=G({huberLoss_:fZ});function gZ(r,l,h,d=1e-7,f=jt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","logLoss"),S=M(l,"predictions","logLoss");let L=null;h!=null&&(L=M(h,"weights","logLoss")),Zt(g.shape,S.shape,"Error in logLoss: ");const x=ke(1),A=ke(d),O=yt(ae(g,lo(St(S,A)))),C=ae(Be(x,g),lo(St(Be(x,S),A))),$=Be(O,C);return An($,L,f)}const QO=G({logLoss_:gZ});function yZ(r,l,h,d=jt.SUM_BY_NONZERO_WEIGHTS){const f=M(r,"labels","meanSquaredError"),g=M(l,"predictions","meanSquaredError");let S=null;h!=null&&(S=M(h,"weights","meanSquaredError")),Zt(f.shape,g.shape,"Error in meanSquaredError: ");const L=KI(f,g);return An(L,S,d)}const e1=G({meanSquaredError_:yZ});function bZ(r,l){const h=M(r,"labels","sigmoidCrossEntropyWithLogits"),d=M(l,"logits","sigmoidCrossEntropyWithLogits");Zt(h.shape,d.shape,"Error in sigmoidCrossEntropyWithLogits: ");const f=Wu(d),g=ae(d,h),S=$I(Gn(yt(Pn(d))));return St(Be(f,g),S)}function wZ(r,l,h,d=0,f=jt.SUM_BY_NONZERO_WEIGHTS){let g=M(r,"multiClassLabels","sigmoidCrossEntropy");const S=M(l,"logits","sigmoidCrossEntropy");let L=null;if(h!=null&&(L=M(h,"weights","sigmoidCrossEntropy")),Zt(g.shape,S.shape,"Error in sigmoidCrossEntropy: "),d>0){const A=ke(d),O=ke(1),C=ke(.5);g=St(ae(g,Be(O,A)),ae(C,A))}const x=bZ(g,S);return An(x,L,f)}const t1=G({sigmoidCrossEntropy_:wZ});function LZ(r,l,h=-1){if(h===-1&&(h=l.rank-1),h!==l.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${l.rank} and dim was ${h}`);const d=Kf((f,g,S)=>{const L=!0,x=UI(g,[h],L),A=Be(Le(g,"float32"),x);S([f,A]);const O=yt(ae(A,f)),C=Fe(O,[h]),$=(z,ne)=>{const[te,se]=ne,fe=ts(z.shape,[h]);return[ae(re(z,fe),Be(Le(te,"float32"),Gn(se))),ae(re(z,fe),Be(Gn(se),Le(te,"float32")))]};return{value:C,gradFunc:$}});return d(r,l)}function SZ(r,l,h,d=0,f=jt.SUM_BY_NONZERO_WEIGHTS){let g=M(r,"onehotLabels","softmaxCrossEntropy");const S=M(l,"logits","softmaxCrossEntropy");let L=null;if(h!=null&&(L=M(h,"weights","softmaxCrossEntropy")),Zt(g.shape,S.shape,"Error in softmaxCrossEntropy: "),d>0){const A=ke(d),O=ke(1),C=ke(g.shape[1]);g=St(ae(g,Be(O,A)),ze(A,C))}const x=LZ(g,S);return An(x,L,f)}const n1=G({softmaxCrossEntropy_:SZ});const wAe={fft:$u,ifft:Hc,rfft:Uu,irfft:jI},TAe={hammingWindow:NO,hannWindow:tg,frame:ng,stft:CO},s1={flipLeftRight:OO,resizeNearestNeighbor:VO,resizeBilinear:GO,rotateWithOffset:EO,cropAndResize:RO,nonMaxSuppression:DO,nonMaxSuppressionAsync:UO,nonMaxSuppressionWithScore:BO,nonMaxSuppressionWithScoreAsync:MO,nonMaxSuppressionPadded:PO,nonMaxSuppressionPaddedAsync:zO},BAe={bandPart:HO,gramSchmidt:YO,qr:jO},KAe={absoluteDifference:KO,computeWeightedLoss:An,cosineDistance:XO,hingeLoss:JO,huberLoss:ZO,logLoss:QO,meanSquaredError:e1,sigmoidCrossEntropy:t1,softmaxCrossEntropy:n1};const i1=1.7580993408473768,r1=1.0507009873554805;const o1={kernelName:Bm,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,pa(Le(h,"float32"),-1))}}};const a1={kernelName:f2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>{const d=ut(Le(h,"float32")),f=gs(Be(ke(1),d));return yt(ze(r,f))}}}};const c1={kernelName:g2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>{const d=gs(Be(ut(Le(h,"float32")),1));return ze(r,d)}}}};const l1={kernelName:kc,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{let L=r;const x=vt(h.shape,f);return x.length>0&&(L=Fe(L,x)),re(L,h.shape)},S=()=>{let L=r;const x=vt(d.shape,f);return x.length>0&&(L=Fe(L,x)),re(L,d.shape)};return{a:g,b:S}}};const h1={kernelName:y2,saveAllInputs:!0,gradFunc:(r,l)=>{const h={};return l.forEach((d,f)=>{h[f]=()=>r.clone()}),h}};const u1={kernelName:b2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>je(h)}}};const d1={kernelName:w2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>je(h)}}};const p1={kernelName:L2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,gs(Be(ke(1),ut(Le(h,"float32")))))}}};const m1={kernelName:S2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>{const d=gs(St(ke(1),ut(Le(h,"float32"))));return ze(r,d)}}}};const f1={kernelName:T2,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{const L=St(ut(h),ut(d));let x=ae(r,ze(d,L));const A=vt(h.shape,f);return A.length>0&&(x=Fe(x,A)),re(x,h.shape)},S=()=>{const L=St(ut(h),ut(d));let x=yt(ae(r,ze(h,L)));const A=vt(d.shape,f);return A.length>0&&(x=Fe(x,A)),re(x,d.shape)};return{a:g,b:S}}};const g1={kernelName:I2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,St(ut(Le(h,"float32")),1))}}};const y1={kernelName:x2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,Be(ke(1),ut(Le(h,"float32"))))}}};function IZ(r,l,h,d,f=[1,1,1],g,S){const L=M(r,"dy","avgPool3dBackprop"),x=M(l,"input","avgPool3dBackprop");let A=L,O=x,C=!1;x.rank===4&&(C=!0,A=re(L,[1,L.shape[0],L.shape[1],L.shape[2],L.shape[3]]),O=re(x,[1,x.shape[0],x.shape[1],x.shape[2],x.shape[3]])),Z(A.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${A.rank}.`),Z(O.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${O.rank}.`),Z(co(d,f),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${d} and dilations '${f}'`),S!=null&&Z(Qt(g),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${S} but got pad ${g}.`);const $=se=>{const fe=qf(O.shape,h,d,f,g,S);return se.avgPool3dBackprop(A,O,fe)},z={dy:A,input:O},ne={filterSize:h,strides:d,dilations:f,pad:g,dimRoundingMode:S},te=H.runKernelFunc($,z,null,C2,ne);return C?re(te,[te.shape[1],te.shape[2],te.shape[3],te.shape[4]]):te}const b1=G({avgPool3dBackprop_:IZ});const w1={kernelName:N2,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{filterSize:f,strides:g,dilations:S,pad:L,dimRoundingMode:x}=h,A=S==null?[1,1,1]:S;return{x:()=>b1(r,d,f,g,A,L,x)}}};function xZ(r,l,h,d,f){const g=M(r,"dy","avgPoolBackprop"),S=M(l,"input","avgPoolBackprop");Z(S.rank===g.rank,()=>`Rank of input (${S.rank}) does not match rank of dy (${g.rank})`);let L=S,x=g,A=!1;S.rank===3&&(A=!0,L=re(S,[1,S.shape[0],S.shape[1],S.shape[2]]),x=re(g,[1,g.shape[0],g.shape[1],g.shape[2]])),Z(x.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${x.rank}.`),Z(L.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${L.rank}.`);const O=ne=>{const te=Yf(L.shape,h,d,1,f);return ne.avgPoolBackprop(x,L,te)},C={dy:x,input:L},$={filterSize:h,strides:d,pad:f},z=H.runKernelFunc(O,C,null,v2,$);return A?re(z,[z.shape[1],z.shape[2],z.shape[3]]):z}const L1=G({avgPoolBackprop_:xZ});const S1={kernelName:A2,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{filterSize:f,strides:g,pad:S}=h;return{x:()=>L1(r,d,f,g,S)}}};const I1={kernelName:Mm,inputsToSave:["a","b"],gradFunc:(r,l,h)=>{const[d,f]=l,{transposeA:g,transposeB:S}=h;return!g&&!S?{a:()=>pn(r,f,!1,!0),b:()=>pn(d,r,!0,!1)}:!g&&S?{a:()=>pn(r,f,!1,!1),b:()=>pn(r,d,!0,!1)}:g&&!S?{a:()=>pn(f,r,!1,!0),b:()=>pn(d,r,!1,!1)}:{a:()=>pn(f,r,!0,!0),b:()=>pn(r,d,!0,!0)}}};const x1={kernelName:Pm,gradFunc:(r,l,h)=>{const{blockShape:d,crops:f}=h;return{x:()=>VI(r,d,f)}}};const T1={kernelName:zm,gradFunc:(r,l,h)=>{const d=h,f=d.inputShape,g=d.shape,S=Array.from(g);for(let x=f.length-1;x>=0;x--)if(f[x]===g[x])S[x]=1;else if(f[x]!==1)throw new Error(`broadcastTo(): [${f}] cannot be broadcast to [${g}].`);const L=[];for(let x=0;x1&&L.push(x);return{x:()=>Fe(r,L,!0)}}};const A1={kernelName:Fc,gradFunc:r=>({x:()=>r.clone()})};const v1={kernelName:R2,gradFunc:r=>({x:()=>je(r)})};const N1={kernelName:O2,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{clipValueMin:f,clipValueMax:g}=h;return{x:()=>zn(ua(Sr(d,f),Ir(d,g)),r,je(r))}}};const C1={kernelName:Gm,saveAllInputs:!0,gradFunc:(r,l,h)=>{const d=l.map(x=>x.shape),{axis:f}=h,g=ht(f,l[0].shape)[0],S=d.map(x=>x[g]),L=uo(r,S,g);return L.map(x=>()=>x)}};const R1={kernelName:Vm,inputsToSave:["x","filter"],gradFunc:(r,l,h)=>{const[d,f]=l,{dilations:g,strides:S,pad:L,dataFormat:x}=h;return Z(ao(g),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${g}'`),{x:()=>wO(d.shape,r,f,S,L,x),filter:()=>eg(d,r,f.shape,S,L,x)}}};const O1={kernelName:Hm,inputsToSave:["dy","filter"],gradFunc:(r,l,h)=>{const[d,f]=l,{strides:g,pad:S,dataFormat:L,dimRoundingMode:x}=h;return{dy:()=>NI(r,f,g,S,L,1,x),filter:()=>eg(r,d,f.shape,g,S,L,x)}}};function TZ(r,l,h,d,f){let g=r;r.rank===4&&(g=re(r,[1,r.shape[0],r.shape[1],r.shape[2],r.shape[3]]));let S=l;S.rank===4&&(S=re(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]])),Z(g.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${g.shape}.`),Z(S.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${S.shape}.`),Z(h.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${h}.`),Z(g.shape[4]===h[3],()=>`Error in conv3dDerFilter: depth of input ${g.shape[4]}) must match input depth in filter (${h[3]}.`),Z(S.shape[4]===h[4],()=>`Error in conv3dDerFilter: depth of dy (${S.shape[4]}) must match output depth for filter (${h[4]}).`);const L=O=>{const C=1,$=ku(g.shape,h,d,C,f);return O.conv3dDerFilter(g,S,$)},x={x:g,y:S},A={strides:d,pad:f};return H.runKernelFunc(L,x,null,F2,A)}const E1=G({conv3DBackpropFilter_:TZ});const D1={kernelName:k2,inputsToSave:["x","filter"],gradFunc:(r,l,h)=>{const{dilations:d,strides:f,pad:g}=h;Z(ao(d),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${d}'`);const[S,L]=l;return{x:()=>LO(S.shape,r,L,f,g),filter:()=>E1(S,r,L.shape,f,g)}}};const k1={kernelName:Ym,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(yt(YI(Le(h,"float32"))),r)}}};const F1={kernelName:qm,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(qI(Le(h,"float32")),r)}}};const _1={kernelName:jm,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{axis:f,exclusive:g,reverse:S}=h;return{x:()=>{const L=fs([f],d.rank);let x=RI(r,f,g,!S);return L!=null&&(x=Wt(x,L)),x}}}};const W1={kernelName:$2,inputsToSave:["x","filter"],gradFunc:(r,l,h)=>{const{dilations:d,strides:f,pad:g,dimRoundingMode:S}=h,L=d==null?[1,1]:d;Z(ao(L),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${L}'`);const[x,A]=l;Z(x.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${x.rank}.`),Z(A.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${A.rank}.`),Z(x.shape[3]===A.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${x.shape[3]}) must match the inChannels dimension in filter ${A.shape[2]}.`),Z(co(f,L),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${f} and dilations '${L}'.`),S!=null&&Z(Qt(g),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${S} but got pad ${g}.`);const O=Lr(x.shape,A.shape,f,L,g,S,!0);return{x:()=>vO(x.shape,r,A,O),filter:()=>AO(x,r,A.shape,O)}}};const $1={kernelName:M2,inputsToSave:["x","filter"],gradFunc:(r,l,h)=>{const[d,f]=l,g={x:d,filter:f,dy:r},S={x:d,filter:f,dy:r};return{x:()=>H.runKernel(P2,g,h),filter:()=>H.runKernel(z2,S,h)}}};const U1={kernelName:Km,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{const L=ze(r,Le(d,"float32")),x=vt(h.shape,f);return x.length>0?re(Fe(L,x),h.shape):L},S=()=>{let L=ae(r,Le(h,"float32"));const x=vt(d.shape,f);x.length>0&&(L=re(Fe(L,x),d.shape));const A=ut(d);return yt(ze(L,Le(A,"float32")))};return{a:g,b:S}}};const B1={kernelName:G2,outputsToSave:[!0],gradFunc:(r,l)=>{const[h]=l,d=g=>g.eluDer(r,h),f={dy:r,y:h};return{x:()=>H.runKernelFunc(d,f,null,V2)}}};const M1={kernelName:H2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l,d=ae(Gn(yt(ut(h))),2/Math.sqrt(Math.PI));return{x:()=>ae(r,d)}}};const P1={kernelName:Xm,outputsToSave:[!0],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,h)}}};const z1={kernelName:q2,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,Gn(h))}}};const G1={kernelName:Jm,gradFunc:r=>({x:()=>je(r)})};const V1={kernelName:Zm,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{const L=ze(r,Le(d,"float32")),x=vt(h.shape,f);return x.length>0?re(Fe(L,x),h.shape):L},S=()=>{let L=ae(r,Le(h,"float32"));const x=vt(d.shape,f);x.length>0&&(L=re(Fe(L,x),d.shape));const A=ut(d);return yt(ze(L,Le(A,"float32")))};return{a:g,b:S}}};const H1={kernelName:J2,inputsToSave:["x","mean","variance","scale"],gradFunc:(r,l,h)=>{const{varianceEpsilon:d}=h,[f,g,S,L]=l,x=L==null?ke(1):L,A=vt(g.shape,f.shape),O=[];if(g.rank===1){for(let xe=0;xeg.rank===1?re(ae(ae(r,la(re(z,[1,1,1,g.shape[0]]),O)),x),f.shape):re(ae(ae(r,z),x),f.shape),se=()=>{let xe=ae(ae(z,ke(-1)),$);return g.rank===1&&(xe=Fe(xe,A)),re(xe,g.shape)},fe=()=>{let xe=ae(ae(ne,C),$);return g.rank===1&&(xe=Fe(xe,A)),re(xe,g.shape)},de=()=>{const xe=ae(C,z);let Me=ae(r,xe);return g.rank===1&&(Me=Fe(Me,A)),re(Me,g.shape)},Ae=()=>{let xe=r;return g.rank===1&&(xe=Fe(xe,A)),re(xe,g.shape)};return{x:te,mean:se,variance:fe,scale:de,offset:Ae}}};const j1={kernelName:Qm,inputsToSave:["x","indices"],gradFunc:(r,l,h)=>{const[d,f]=l,{axis:g}=h,S=ht(g,d.shape)[0],L=()=>{const x=d.shape,A=f.size,O=x.slice(0,S),C=O.length,$=x.slice(g,x.length).slice(1),z=$.length,ne=Y1(0,C),te=Y1(C+1,C+1+z),se=q1([O,[A],$]),fe=re(r,se),de=re(f,[A]),Ae=q1([[C],ne,te]),xe=Wt(fe,Ae);let Me=JI(xe,de,d.shape[S]);const Ke=Mc(Ae);return Me=Wt(Me,Ke),Me};return{x:L,indices:()=>f}}};function Y1(r,l){const h=[];for(let d=r;d{const[h,d]=l;return{a:()=>je(h),b:()=>je(d)}}};const X1={kernelName:tf,gradFunc:r=>({x:()=>Le(r,"float32")})};const J1={kernelName:tR,gradFunc:r=>({x:()=>je(r)})};const Z1={kernelName:nR,gradFunc:r=>({x:()=>je(r)})};const Q1={kernelName:sR,gradFunc:r=>({x:()=>je(r)})};const eE={kernelName:sf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,St(h,1))}}};const tE={kernelName:nf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,Le(h,"float32"))}}};const nE={kernelName:cR,inputsToSave:[],outputsToSave:[!0],gradFunc:(r,l,h)=>{const[d]=l,{axis:f}=h;return{logits:()=>{const g=!0,S=Gn(d);return Be(r,ae(Fe(r,f,g),S))}}}};function AZ(r,l,h,d=5,f=1,g=1,S=.5){const L=O=>O.LRNGrad(h,r,l,d,f,g,S),x={x:r,y:l,dy:h},A={depthRadius:d,bias:f,alpha:g,beta:S};return H.runKernelFunc(L,x,null,hR,A)}const sE=G({localResponseNormalizationBackprop_:AZ});const iE={kernelName:lR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,h)=>{const[d,f]=l,{depthRadius:g,bias:S,alpha:L,beta:x}=h;return{x:()=>sE(d,f,r,g,S,L,x)}}};function sg(r,l,h,d,f){return l.rank{const g=ae(r,Le(OI(h,l),r.dtype));return f==null?g:Wt(g,f)}}}const QI={kernelName:rf,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,h)=>{const d=h,{reductionIndices:f}=d,[g,S]=l,L=ht(f,g.shape),x=fs(L,g.rank),A=sg(r,S,g,L,x);return{x:()=>{let O=A.x();return x!=null&&(O=Wt(O)),O}}}};const rE={kernelName:of,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=()=>ae(r,Le(Sr(h,d),"float32")),g=()=>ae(r,Le(WI(h,d),"float32"));return{a:f,b:g}}};function vZ(r,l,h,d,f,g=[1,1,1],S,L){const x=M(r,"dy","maxPool3dBackprop"),A=M(l,"input","maxPool3dBackprop"),O=M(h,"output","maxPool3dBackprop");let C=x,$=A,z=O,ne=!1;A.rank===4&&(ne=!0,C=re(x,[1,x.shape[0],x.shape[1],x.shape[2],x.shape[3]]),$=re(A,[1,A.shape[0],A.shape[1],A.shape[2],A.shape[3]]),z=re(O,[1,O.shape[0],O.shape[1],O.shape[2],O.shape[3]])),Z(C.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${C.rank}.`),Z($.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${$.rank}.`),Z(z.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${z.rank}.`),Z(co(f,g),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${f} and dilations '${g}'`),L!=null&&Z(Qt(S),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${L} but got pad ${S}.`);const te=Ae=>{const xe=qf($.shape,d,f,g,S,L);return Ae.maxPool3dBackprop(C,$,z,xe)},se={dy:C,input:$,output:z},fe={filterSize:d,strides:f,dilations:g,pad:S,dimRoundingMode:L},de=H.runKernelFunc(te,se,null,mR,fe);return ne?re(de,[de.shape[1],de.shape[2],de.shape[3],de.shape[4]]):de}const oE=G({maxPool3dBackprop_:vZ});const aE={kernelName:pR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,h)=>{const[d,f]=l,{filterSize:g,strides:S,dilations:L,pad:x,dimRoundingMode:A}=h,O=L==null?[1,1,1]:L;return{x:()=>oE(r,d,f,g,S,O,x,A)}}};function NZ(r,l,h,d,f,g,S){const L=M(r,"dy","maxPoolBackprop"),x=M(l,"input","maxPoolBackprop"),A=M(h,"output","maxPoolBackprop");Z(x.rank===L.rank,()=>`Rank of input (${x.rank}) does not match rank of dy (${L.rank})`),Z(L.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${L.rank}.`),Z(x.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${x.rank}.`),S!=null&&Z(Qt(g),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${S} but got pad ${g}.`);const O=z=>{const ne=Yf(x.shape,d,f,1,g,S);return z.maxPoolBackprop(L,x,A,ne)},C={dy:L,input:x,output:A},$={filterSize:d,strides:f,pad:g,dimRoundingMode:S};return H.runKernelFunc(O,C,null,dR,$)}const cE=G({maxPoolBackprop_:NZ});const lE={kernelName:uR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,h)=>{const[d,f]=l,{filterSize:g,strides:S,pad:L}=h;return{x:()=>cE(r,d,f,g,S,L)}}};const hE={kernelName:af,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,h)=>{const d=h,{axis:f}=d,[g,S]=l,L=ht(f,g.shape),x=fs(L,g.rank),A=sg(r,S,g,L,x);return{x:()=>{let O=A.x();return x!=null&&(O=Wt(O)),O}}}};const uE={kernelName:cf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=()=>ae(r,Le(Ir(h,d),"float32")),g=()=>ae(r,Le(wi(h,d),"float32"));return{a:f,b:g}}};const dE={kernelName:fR,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{const L=vt(h.shape,f);return L.length>0?re(Fe(r,L),h.shape):r},S=()=>{const L=ae(r,yt(kI(ze(h,d)))),x=vt(d.shape,f);return x.length>0?re(Fe(L,x),d.shape):L};return{a:g,b:S}}};const pE={kernelName:lf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{const L=ae(r,Le(d,"float32")),x=vt(h.shape,f);return x.length>0?re(Fe(L,x),h.shape):L},S=()=>{const L=ae(r,Le(h,"float32")),x=vt(d.shape,f);return x.length>0?re(Fe(L,x),d.shape):L};return{a:g,b:S}}};const mE={kernelName:hf,gradFunc:r=>({x:()=>yt(r)})};const fE={kernelName:SR,inputsToSave:["indices"],gradFunc:(r,l)=>{const h=l[0];return{indices:()=>Fs(h.shape,"float32")}}};const gE={kernelName:LR,gradFunc:r=>({x:()=>je(r)})};const ex={kernelName:uf,inputsToSave:["x"],gradFunc:(r,l,h)=>{const d=l[0],{paddings:f}=h,g=f.map(S=>S[0]);return{x:()=>At(r,g,d.shape)}}};const yE={kernelName:df,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(r,l)=>{const[h,d,f]=l,g=h,S=d,L=rt(g.shape,S.shape),x=()=>{const O=Le(S,"float32");let C=ae(r,ae(O,da(g,Be(O,ke(1)))));const $=vt(g.shape,L);return $.length>0&&(C=Fe(C,$)),re(C,g.shape)},A=()=>{const O=wi(g,0),C=zn(O,lo(g),je(g));let $=ae(r,ae(f,C));const z=vt(S.shape,L);return z.length>0&&($=Fe($,z)),re($,S.shape)};return{a:x,b:A}}};const bE={kernelName:IR,inputsToSave:["x","alpha"],gradFunc:(r,l)=>{const[h,d]=l,f=wi(h,0);return{x:()=>zn(f,r,ae(r,d)),alpha:()=>{let g=zn(f,je(r),ae(r,h));const S=vt(d.shape,r.shape);return S.length>0&&(g=Fe(g,S)),re(g,d.shape)}}}};const wE={kernelName:AR,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,yt(ut(h)))}}};const LE={kernelName:CR,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l,d=ae(Ir(h,6),pa(h));return{x:()=>ae(r,Le(d,"float32"))}}};const SE={kernelName:pf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,Le(pa(h),"float32"))}}};const IE={kernelName:mf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>re(r,h.shape)}}};const xE={kernelName:gf,inputsToSave:["images"],gradFunc:(r,l,h)=>{const[d]=l,f=L=>{const{alignCorners:x}=h;return L.resizeBilinearBackprop(r,d,x)},g={images:d},S=()=>H.runKernelFunc(f,g,null,NR,h);return{images:S}}};const TE={kernelName:ff,inputsToSave:["images"],gradFunc:(r,l,h)=>{const[d]=l,f=L=>{const{alignCorners:x}=h;return L.resizeNearestNeighborBackprop(r,d,x)},g={images:d},S=()=>H.runKernelFunc(f,g,null,vR,h);return{images:S}}};const AE={kernelName:yf,gradFunc:(r,l,h)=>{const{dims:d}=h,f=ht(d,r.shape);return{x:()=>Vc(r,f)}}};const vE={kernelName:RR,gradFunc:r=>({x:()=>je(r)})};const NE={kernelName:bf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>yt(ze(r,ae(da(h,1.5),2)))}}};const CE={kernelName:wf,inputsToSave:["condition"],gradFunc:(r,l)=>{const[h]=l;return{condition:()=>Le(je(h),"float32"),t:()=>ae(r,Le(h,r.dtype)),e:()=>ae(r,Le(BI(h),r.dtype))}}};const RE={kernelName:OR,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>{const d=wi(h,ke(0)),f=ke(i1),g=ke(r1),S=ae(r,g),L=ae(ae(r,f),Gn(Le(h,"float32")));return zn(d,S,L)}}}};const OE={kernelName:xf,outputsToSave:[!0],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,ae(h,Be(ke(1),h)))}}};const EE={kernelName:ER,gradFunc:r=>({x:()=>je(r)})};const DE={kernelName:Sf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(Fu(Le(h,"float32")),r)}}};const kE={kernelName:If,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(CI(Le(h,"float32")),r)}}};const FE={kernelName:Lf,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{begin:f,size:g}=h,S=d.shape,[L,x]=Vf(d,f,g),A=[];for(let O=0;OGI(r,A)}}};const _E={kernelName:kR,outputsToSave:[!0],gradFunc:(r,l,h)=>{const[d]=l,{dim:f}=h,g=!0,S=ae(r,d);return{logits:()=>Be(S,ae(Fe(S,[f],g),d))}}};const WE={kernelName:DR,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,AI(h))}}};const tx={kernelName:vf,gradFunc:(r,l,h)=>{const{blockShape:d,paddings:f}=h;return{x:()=>vI(r,d,f)}}};const nx={kernelName:Nf,gradFunc:(r,l,h)=>{const{axis:d}=h;return{x:()=>bn(r,d)}}};const $E={kernelName:Tf,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,ae(gs(Le(h,"float32")),2))}}};const UE={kernelName:FR,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(r,ae(Le(h,"float32"),2))}}};const BE={kernelName:Cf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=ke(2),g=()=>ae(r,ae(f,Be(h,d))),S=()=>ae(r,ae(f,Be(d,h)));return{a:g,b:S}}};const ME={kernelName:_f,gradFunc:r=>({x:()=>je(r)})};const PE={kernelName:Rf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[h,d]=l,f=rt(h.shape,d.shape),g=()=>{let L=r;const x=vt(h.shape,f);return x.length>0&&(L=Fe(L,x)),re(L,h.shape)},S=()=>{let L=r;const x=vt(d.shape,f);return x.length>0&&(L=Fe(L,x)),re(yt(L),d.shape)};return{a:g,b:S}}};const zE={kernelName:Af,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,f=d.shape.slice(),{axis:g}=h,S=ht(g,d.shape);S.forEach(A=>{f[A]=1});const L=re(r,f),x=ae(L,Ki(d.shape,"float32"));return{x:()=>x}}};const GE={kernelName:_R,inputsToSave:["x"],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ze(r,ut(Fu(h)))}}};const VE={kernelName:WR,outputsToSave:[!0],gradFunc:(r,l)=>{const[h]=l;return{x:()=>ae(Be(ke(1),ut(h)),r)}}};const HE={kernelName:Of,inputsToSave:["x"],gradFunc:(r,l,h)=>{const[d]=l,{reps:f}=h,g=()=>{let S=je(d);if(d.rank===1)for(let L=0;L{const d=h,{perm:f}=d,g=Mc(f);return{x:()=>Wt(r,g)}}};const qE={kernelName:Df,gradFunc:(r,l,h)=>{const d=h,{axis:f}=d;return{value:()=>js(r,f)}}};const jE={kernelName:kf,inputsToSave:["segmentIds"],gradFunc:(r,l)=>{const[h]=l,d=()=>CZ(r,h);return{x:d}}};function CZ(r,l){const h=_I(l,je(l)),d=FI(r,h);let f=Sr(l,ke(0,"int32"));const g=d.rank-f.rank;for(let L=0;L({x:()=>je(r)})};const RZ=[o1,a1,c1,l1,h1,u1,d1,p1,m1,f1,g1,y1,w1,S1,I1,x1,T1,A1,v1,N1,C1,O1,R1,D1,k1,F1,_1,W1,$1,U1,B1,M1,P1,z1,V1,G1,H1,j1,K1,X1,J1,Z1,Q1,eE,tE,nE,iE,QI,QI,rE,aE,lE,hE,uE,dE,pE,mE,fE,gE,ex,ex,yE,bE,wE,LE,SE,IE,xE,TE,AE,vE,NE,CE,RE,OE,EE,DE,kE,FE,_E,WE,tx,tx,nx,nx,$E,BE,UE,ME,PE,zE,GE,VE,HE,YE,qE,jE,KE];for(const r of RZ)BR(r);function sx(r,l,h=!1){const{Image:d,Canvas:f}=gt.getEnv();if(!(r instanceof d||r instanceof f))throw new Error("imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement");const g=ra(r),S=l/Math.max(g.height,g.width),L=S*g.width,x=S*g.height,A=Dc({width:l,height:l}),O=r instanceof f?r:Tu(r),C=Math.abs(L-x)/2,$=h&&L{if(yr(h)){this._imageTensors[d]=h,this._inputDimensions[d]=h.shape;return}if(Os(h)){const g=h.shape[0];if(g!==1)throw new Error(`NetInput - tf.Tensor4D with batchSize ${g} passed, but not supported in input array`);this._imageTensors[d]=h,this._inputDimensions[d]=h.shape.slice(1);return}const f=h instanceof gt.getEnv().Canvas?h:Tu(h);this._canvases[d]=f,this._inputDimensions[d]=[f.height,f.width,3]})}get imageTensors(){return this._imageTensors}get canvases(){return this._canvases}get isBatchInput(){return this.batchSize>1||this._treatAsBatchInput}get batchSize(){return this._batchSize}get inputDimensions(){return this._inputDimensions}get inputSize(){return this._inputSize}get reshapedInputDimensions(){return zi(this.batchSize,0,1).map((r,l)=>this.getReshapedInputDimensions(l))}getInput(r){return this.canvases[r]||this.imageTensors[r]}getInputDimensions(r){return this._inputDimensions[r]}getInputHeight(r){return this._inputDimensions[r][0]}getInputWidth(r){return this._inputDimensions[r][1]}getReshapedInputDimensions(r){if(typeof this.inputSize!="number")throw new Error("getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet");const l=this.getInputWidth(r),h=this.getInputHeight(r);return PS({width:l,height:h},this.inputSize)}toBatchTensor(r,l=!0){return this._inputSize=r,pO(()=>{const h=zi(this.batchSize,0,1).map(f=>{const g=this.getInput(f);if(g instanceof Tn){let S=Os(g)?g:g.expandDims();return S=HS(S,l),(S.shape[1]!==r||S.shape[2]!==r)&&(S=s1.resizeBilinear(S,[r,r])),S.as3D(r,r,3)}if(g instanceof gt.getEnv().Canvas)return II.fromPixels(sx(g,r,l));throw new Error(`toBatchTensor - at batchIdx ${f}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${g}`)}),d=js(h.map(f=>Le(f,"float32"))).as4D(this.batchSize,r,r,3);return d})}}async function Rt(r){if(r instanceof po)return r;let l=Array.isArray(r)?r:[r];if(!l.length)throw new Error("toNetInput - empty array passed as input");const h=f=>Array.isArray(r)?` at input index ${f}:`:"",d=l.map(ia);return d.forEach((f,g)=>{if(!Um(f)&&!yr(f)&&!Os(f))throw typeof l[g]=="string"?new Error(`toNetInput -${h(g)} string passed, but could not resolve HTMLElement for element id ${l[g]}`):new Error(`toNetInput -${h(g)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`);if(Os(f)){const S=f.shape[0];if(S!==1)throw new Error(`toNetInput -${h(g)} tf.Tensor4D with batchSize ${S} passed, but not supported in input array`)}}),await Promise.all(d.map(f=>Um(f)&&QS(f))),new po(d,Array.isArray(r))}async function Yc(r,l){const{Canvas:h}=gt.getEnv();let d=r;if(!(r instanceof h)){const S=await Rt(r);if(S.batchSize>1)throw new Error("extractFaces - batchSize > 1 not supported");const L=S.getInput(0);d=L instanceof h?L:await tI(L)}const f=es(d),g=l.map(S=>S instanceof Yt?S.forSize(d.width,d.height).box.floor():S).map(S=>S.clipAtImageBorders(d.width,d.height));return g.map(({x:S,y:L,width:x,height:A})=>{const O=Dc({width:x,height:A});return es(O).putImageData(f.getImageData(S,L,x,A),0,0),O})}const ig=Ye(Je());async function qc(r,l){if(!yr(r)&&!Os(r))throw new Error("extractFaceTensors - expected image tensor to be 3D or 4D");if(Os(r)&&r.shape[0]>1)throw new Error("extractFaceTensors - batchSize > 1 not supported");return ig.tidy(()=>{const[h,d,f]=r.shape.slice(Os(r)?1:0),g=l.map(L=>L instanceof Yt?L.forSize(d,h).box:L).map(L=>L.clipAtImageBorders(d,h)),S=g.map(({x:L,y:x,width:A,height:O})=>ig.slice3d(r.as3D(h,d,f),[x,L,0],[O,A,f]));return S})}async function fa(r,l){const h=gt.getEnv().fetch,d=await h(r,l);if(!(d.status<400))throw new Error(`failed to fetch: (${d.status}) ${d.statusText}, from url: ${d.url}`);return d}async function OZ(r){const l=await fa(r),h=await l.blob();if(!h.type.startsWith("image/"))throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${h.type}, for url: ${l.url}`);return eI(h)}async function ix(r){return(await fa(r)).json()}async function EZ(r){return new Float32Array(await(await fa(r)).arrayBuffer())}function rg(r,l){const h=`${l}-weights_manifest.json`;if(!r)return{modelBaseUri:"",manifestUri:h};if(r==="/")return{modelBaseUri:"/",manifestUri:`/${h}`};const d=r.startsWith("http://")?"http://":r.startsWith("https://")?"https://":"";r=r.replace(d,"");const f=r.split("/").filter(L=>L),g=r.endsWith(".json")?f[f.length-1]:h;let S=d+(r.endsWith(".json")?f.slice(0,f.length-1):f).join("/");return S=r.startsWith("/")?`/${S}`:S,{modelBaseUri:S,manifestUri:S==="/"?`/${g}`:`${S}/${g}`}}const XE=Ye(Je());async function rx(r,l){const{manifestUri:h,modelBaseUri:d}=rg(r,l);let f=await ix(h);return XE.io.loadWeights(f,d)}function DZ(r,l,h=!1){const{width:d,height:f}=h?ra(l):l;return r.width=d,r.height=f,{width:d,height:f}}const xr=Ye(Je());class kn{constructor(r){this._name=r;this._params=void 0;this._paramMappings=[]}get params(){return this._params}get paramMappings(){return this._paramMappings}get isLoaded(){return!!this.params}getParamFromPath(r){const{obj:l,objProp:h}=this.traversePropertyPath(r);return l[h]}reassignParamFromPath(r,l){const{obj:h,objProp:d}=this.traversePropertyPath(r);h[d].dispose(),h[d]=l}getParamList(){return this._paramMappings.map(({paramPath:r})=>({path:r,tensor:this.getParamFromPath(r)}))}getTrainableParams(){return this.getParamList().filter(r=>r.tensor instanceof xr.Variable)}getFrozenParams(){return this.getParamList().filter(r=>!(r.tensor instanceof xr.Variable))}variable(){this.getFrozenParams().forEach(({path:r,tensor:l})=>{this.reassignParamFromPath(r,l.variable())})}freeze(){this.getTrainableParams().forEach(({path:r,tensor:l})=>{const h=xr.tensor(l.dataSync());l.dispose(),this.reassignParamFromPath(r,h)})}dispose(r=!0){this.getParamList().forEach(l=>{if(r&&l.tensor.isDisposed)throw new Error(`param tensor has already been disposed for path ${l.path}`);l.tensor.dispose()}),this._params=void 0}serializeParams(){return new Float32Array(this.getParamList().map(({tensor:r})=>Array.from(r.dataSync())).reduce((r,l)=>r.concat(l)))}async load(r){if(r instanceof Float32Array){this.extractWeights(r);return}await this.loadFromUri(r)}async loadFromUri(r){if(r&&typeof r!="string")throw new Error(`${this._name}.loadFromUri - expected model uri`);const l=await rx(r,this.getDefaultModelName());this.loadFromWeightMap(l)}async loadFromDisk(r){if(r&&typeof r!="string")throw new Error(`${this._name}.loadFromDisk - expected model file path`);const{readFile:l}=gt.getEnv(),{manifestUri:h,modelBaseUri:d}=rg(r,this.getDefaultModelName()),f=x=>Promise.all(x.map(A=>l(A).then(O=>O.buffer))),g=xr.io.weightsLoaderFactory(f),S=JSON.parse((await l(h)).toString()),L=await g(S,d);this.loadFromWeightMap(L)}loadFromWeightMap(r){const{paramMappings:l,params:h}=this.extractParamsFromWeigthMap(r);this._paramMappings=l,this._params=h}extractWeights(r){const{paramMappings:l,params:h}=this.extractParams(r);this._paramMappings=l,this._params=h}traversePropertyPath(r){if(!this.params)throw new Error("traversePropertyPath - model has no loaded params");const l=r.split("/").reduce((f,g)=>{if(!f.nextObj.hasOwnProperty(g))throw new Error(`traversePropertyPath - object does not have property ${g}, for path ${r}`);return{obj:f.nextObj,objProp:g,nextObj:f.nextObj[g]}},{nextObj:this.params}),{obj:h,objProp:d}=l;if(!h||!d||!(h[d]instanceof xr.Tensor))throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${r}`);return{obj:h,objProp:d}}}const jc=Ye(Je());function ns(r,l,h){return jc.tidy(()=>{let d=jc.separableConv2d(r,l.depthwise_filter,l.pointwise_filter,h,"same");return d=jc.add(d,l.bias),d})}const Dt=Ye(Je());function og(r,l,h=!1){return Dt.tidy(()=>{const d=Dt.relu(h?Dt.add(Dt.conv2d(r,l.conv0.filters,[2,2],"same"),l.conv0.bias):ns(r,l.conv0,[2,2])),f=ns(d,l.conv1,[1,1]),g=Dt.relu(Dt.add(d,f)),S=ns(g,l.conv2,[1,1]);return Dt.relu(Dt.add(d,Dt.add(f,S)))})}function Mu(r,l,h=!1,d=!0){return Dt.tidy(()=>{const f=Dt.relu(h?Dt.add(Dt.conv2d(r,l.conv0.filters,d?[2,2]:[1,1],"same"),l.conv0.bias):ns(r,l.conv0,d?[2,2]:[1,1])),g=ns(f,l.conv1,[1,1]),S=Dt.relu(Dt.add(f,g)),L=ns(S,l.conv2,[1,1]),x=Dt.relu(Dt.add(f,Dt.add(g,L))),A=ns(x,l.conv3,[1,1]);return Dt.relu(Dt.add(f,Dt.add(g,Dt.add(L,A))))})}const mo=Ye(Je());function ga(r,l,h="same",d=!1){return mo.tidy(()=>{const f=mo.add(mo.conv2d(r,l.filters,[1,1],h),l.bias);return d?mo.relu(f):f})}function Vn(r,l){Object.keys(r).forEach(h=>{l.some(d=>d.originalPath===h)||r[h].dispose()})}const ag=Ye(Je());function Kc(r,l){return function(h,d,f,g){const S=ag.tensor4d(r(h*d*f*f),[f,f,h,d]),L=ag.tensor1d(r(d));return l.push({paramPath:`${g}/filters`},{paramPath:`${g}/bias`}),{filters:S,bias:L}}}const cg=Ye(Je());function lg(r,l){return function(h,d,f){const g=cg.tensor2d(r(h*d),[h,d]),S=cg.tensor1d(r(d));return l.push({paramPath:`${f}/weights`},{paramPath:`${f}/bias`}),{weights:g,bias:S}}}class ox{constructor(r,l,h){this.depthwise_filter=r;this.pointwise_filter=l;this.bias=h}}const Pu=Ye(Je());function Xc(r,l){return function(h,d,f){const g=Pu.tensor4d(r(3*3*h),[3,3,h,1]),S=Pu.tensor4d(r(h*d),[1,1,h,d]),L=Pu.tensor1d(r(d));return l.push({paramPath:`${f}/depthwise_filter`},{paramPath:`${f}/pointwise_filter`},{paramPath:`${f}/bias`}),new ox(g,S,L)}}function Jc(r){return function(l){const h=r(`${l}/depthwise_filter`,4),d=r(`${l}/pointwise_filter`,4),f=r(`${l}/bias`,1);return new ox(h,d,f)}}function ys(r,l){return function(h,d,f){const g=r[h];if(!Qo(g,d))throw new Error(`expected weightMap[${h}] to be a Tensor${d}D, instead have ${g}`);return l.push({originalPath:h,paramPath:f||h}),g}}function Hn(r){let l=r;function h(f){const g=l.slice(0,f);return l=l.slice(f),g}function d(){return l}return{extractWeights:h,getRemainingWeights:d}}function hg(r,l){const h=Kc(r,l),d=Xc(r,l);function f(S,L,x,A=!1){const O=A?h(S,L,3,`${x}/conv0`):d(S,L,`${x}/conv0`),C=d(L,L,`${x}/conv1`),$=d(L,L,`${x}/conv2`);return{conv0:O,conv1:C,conv2:$}}function g(S,L,x,A=!1){const{conv0:O,conv1:C,conv2:$}=f(S,L,x,A),z=d(L,L,`${x}/conv3`);return{conv0:O,conv1:C,conv2:$,conv3:z}}return{extractDenseBlock3Params:f,extractDenseBlock4Params:g}}function JE(r){const l=[],{extractWeights:h,getRemainingWeights:d}=Hn(r),{extractDenseBlock4Params:f}=hg(h,l),g=f(3,32,"dense0",!0),S=f(32,64,"dense1"),L=f(64,128,"dense2"),x=f(128,256,"dense3");if(d().length!==0)throw new Error(`weights remaing after extract: ${d().length}`);return{paramMappings:l,params:{dense0:g,dense1:S,dense2:L,dense3:x}}}function ug(r){return function(l){const h=r(`${l}/filters`,4),d=r(`${l}/bias`,1);return{filters:h,bias:d}}}function dg(r,l){const h=ys(r,l),d=ug(h),f=Jc(h);function g(L,x=!1){const A=x?d(`${L}/conv0`):f(`${L}/conv0`),O=f(`${L}/conv1`),C=f(`${L}/conv2`);return{conv0:A,conv1:O,conv2:C}}function S(L,x=!1){const A=x?d(`${L}/conv0`):f(`${L}/conv0`),O=f(`${L}/conv1`),C=f(`${L}/conv2`),$=f(`${L}/conv3`);return{conv0:A,conv1:O,conv2:C,conv3:$}}return{extractDenseBlock3Params:g,extractDenseBlock4Params:S}}function ZE(r){const l=[],{extractDenseBlock4Params:h}=dg(r,l),d={dense0:h("dense0",!0),dense1:h("dense1"),dense2:h("dense2"),dense3:h("dense3")};return Vn(r,l),{params:d,paramMappings:l}}const fo=Ye(Je());class pg extends kn{constructor(){super("FaceFeatureExtractor")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("FaceFeatureExtractor - load model before inference");return fo.tidy(()=>{const h=fo.cast(r.toBatchTensor(112,!0),"float32"),d=[122.782,117.001,104.298],f=yi(h,d).div(fo.scalar(255));let g=Mu(f,l.dense0,!0);return g=Mu(g,l.dense1),g=Mu(g,l.dense2),g=Mu(g,l.dense3),g=fo.avgPool(g,[7,7],[2,2],"valid"),g})}async forward(r){return this.forwardInput(await Rt(r))}getDefaultModelName(){return"face_feature_extractor_model"}extractParamsFromWeigthMap(r){return ZE(r)}extractParams(r){return JE(r)}}const Zc=Ye(Je());function zu(r,l){return Zc.tidy(()=>Zc.add(Zc.matMul(r,l.weights),l.bias))}function QE(r,l,h){const d=[],{extractWeights:f,getRemainingWeights:g}=Hn(r),S=lg(f,d),L=S(l,h,"fc");if(g().length!==0)throw new Error(`weights remaing after extract: ${g().length}`);return{paramMappings:d,params:{fc:L}}}function eD(r){const l=[],h=ys(r,l);function d(g){const S=h(`${g}/weights`,2),L=h(`${g}/bias`,1);return{weights:S,bias:L}}const f={fc:d("fc")};return Vn(r,l),{params:f,paramMappings:l}}function mg(r){const l={},h={};return Object.keys(r).forEach(d=>{const f=d.startsWith("fc")?h:l;f[d]=r[d]}),{featureExtractorMap:l,classifierMap:h}}const tD=Ye(Je());class fg extends kn{constructor(r,l){super(r);this._faceFeatureExtractor=l}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(r){const{params:l}=this;if(!l)throw new Error(`${this._name} - load model before inference`);return tD.tidy(()=>{const h=r instanceof po?this.faceFeatureExtractor.forwardInput(r):r;return zu(h.as2D(h.shape[0],-1),l.fc)})}dispose(r=!0){this.faceFeatureExtractor.dispose(r),super.dispose(r)}loadClassifierParams(r){const{params:l,paramMappings:h}=this.extractClassifierParams(r);this._params=l,this._paramMappings=h}extractClassifierParams(r){return QE(r,this.getClassifierChannelsIn(),this.getClassifierChannelsOut())}extractParamsFromWeigthMap(r){const{featureExtractorMap:l,classifierMap:h}=mg(r);return this.faceFeatureExtractor.loadFromWeightMap(l),eD(h)}extractParams(r){const l=this.getClassifierChannelsIn(),h=this.getClassifierChannelsOut(),d=h*l+h,f=r.slice(0,r.length-d),g=r.slice(r.length-d);return this.faceFeatureExtractor.extractWeights(f),this.extractClassifierParams(g)}}const ax=["neutral","happy","sad","angry","fearful","disgusted","surprised"];class ya{constructor(r){if(r.length!==7)throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${r.length}`);ax.forEach((l,h)=>{this[l]=r[h]})}asSortedArray(){return ax.map(r=>({expression:r,probability:this[r]})).sort((r,l)=>l.probability-r.probability)}}const Qc=Ye(Je());class cx extends fg{constructor(r=new pg){super("FaceExpressionNet",r)}forwardInput(r){return Qc.tidy(()=>Qc.softmax(this.runNet(r)))}async forward(r){return this.forwardInput(await Rt(r))}async predictExpressions(r){const l=await Rt(r),h=await this.forwardInput(l),d=await Promise.all(Qc.unstack(h).map(async g=>{const S=await g.data();return g.dispose(),S}));h.dispose();const f=d.map(g=>new ya(g));return l.isBatchInput?f:f[0]}getDefaultModelName(){return"face_expression_model"}getClassifierChannelsIn(){return 256}getClassifierChannelsOut(){return 7}}function lx(r){return r.expressions instanceof ya}function gg(r,l){const h={expressions:l};return Object.assign({},r,h)}function kZ(r,l,h=.1,d){const f=Array.isArray(l)?l:[l];f.forEach(g=>{const S=g instanceof ya?g:lx(g)?g.expressions:void 0;if(!S)throw new Error("drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof");const L=S.asSortedArray(),x=L.filter(C=>C.probability>h),A=Vi(g)?g.detection.box.bottomLeft:d||new Ze(0,0),O=new Ec(x.map(C=>`${C.expression} (${ea(C.probability)})`),A);O.draw(r)})}function ba(r){return Vi(r)&&r.landmarks instanceof qs&&r.unshiftedLandmarks instanceof qs&&r.alignedRect instanceof Yt}function el(r,l){const{box:h}=r.detection,d=l.shiftBy(h.x,h.y),f=d.align(),{imageDims:g}=r.detection,S=new Yt(r.detection.score,f.rescale(g.reverse()),g),L={landmarks:d,unshiftedLandmarks:l,alignedRect:S};return Object.assign({},r,L)}class nD{constructor(r={}){const{drawLines:l=!0,drawPoints:h=!0,lineWidth:d,lineColor:f,pointSize:g,pointColor:S}=r;this.drawLines=l,this.drawPoints=h,this.lineWidth=d||1,this.pointSize=g||2,this.lineColor=f||"rgba(0, 255, 255, 1)",this.pointColor=S||"rgba(255, 0, 255, 1)"}}class sD{constructor(r,l={}){this.faceLandmarks=r,this.options=new nD(l)}draw(r){const l=es(r),{drawLines:h,drawPoints:d,lineWidth:f,lineColor:g,pointSize:S,pointColor:L}=this.options;if(h&&this.faceLandmarks instanceof Iu&&(l.strokeStyle=g,l.lineWidth=f,gr(l,this.faceLandmarks.getJawOutline()),gr(l,this.faceLandmarks.getLeftEyeBrow()),gr(l,this.faceLandmarks.getRightEyeBrow()),gr(l,this.faceLandmarks.getNose()),gr(l,this.faceLandmarks.getLeftEye(),!0),gr(l,this.faceLandmarks.getRightEye(),!0),gr(l,this.faceLandmarks.getMouth(),!0)),d){l.strokeStyle=L,l.fillStyle=L;const x=A=>{l.beginPath(),l.arc(A.x,A.y,S,0,2*Math.PI),l.fill()};this.faceLandmarks.positions.forEach(x)}}}function FZ(r,l){const h=Array.isArray(l)?l:[l];h.forEach(d=>{const f=d instanceof qs?d:ba(d)?d.landmarks:void 0;if(!f)throw new Error("drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks> or array thereof");new sD(f).draw(r)})}const hx={};vc(hx,{AnchorPosition:()=>Hi,DrawBox:()=>ZS,DrawBoxOptions:()=>l2,DrawFaceLandmarks:()=>sD,DrawFaceLandmarksOptions:()=>nD,DrawTextField:()=>Ec,DrawTextFieldOptions:()=>Wm,drawContour:()=>gr,drawDetections:()=>GX,drawFaceExpressions:()=>kZ,drawFaceLandmarks:()=>FZ});function _Z(r,l){const h=Kc(r,l),d=Xc(r,l);function f(S,L,x){const A=d(S,L,`${x}/separable_conv0`),O=d(L,L,`${x}/separable_conv1`),C=h(S,L,1,`${x}/expansion_conv`);return{separable_conv0:A,separable_conv1:O,expansion_conv:C}}function g(S,L){const x=d(S,S,`${L}/separable_conv0`),A=d(S,S,`${L}/separable_conv1`),O=d(S,S,`${L}/separable_conv2`);return{separable_conv0:x,separable_conv1:A,separable_conv2:O}}return{extractConvParams:h,extractSeparableConvParams:d,extractReductionBlockParams:f,extractMainBlockParams:g}}function iD(r,l){const h=[],{extractWeights:d,getRemainingWeights:f}=Hn(r),{extractConvParams:g,extractSeparableConvParams:S,extractReductionBlockParams:L,extractMainBlockParams:x}=_Z(d,h),A=g(3,32,3,"entry_flow/conv_in"),O=L(32,64,"entry_flow/reduction_block_0"),C=L(64,128,"entry_flow/reduction_block_1"),$={conv_in:A,reduction_block_0:O,reduction_block_1:C},z={};zi(l,0,1).forEach(fe=>{z[`main_block_${fe}`]=x(128,`middle_flow/main_block_${fe}`)});const ne=L(128,256,"exit_flow/reduction_block"),te=S(256,512,"exit_flow/separable_conv"),se={reduction_block:ne,separable_conv:te};if(f().length!==0)throw new Error(`weights remaing after extract: ${f().length}`);return{paramMappings:h,params:{entry_flow:$,middle_flow:z,exit_flow:se}}}function WZ(r,l){const h=ys(r,l),d=ug(h),f=Jc(h);function g(L){const x=f(`${L}/separable_conv0`),A=f(`${L}/separable_conv1`),O=d(`${L}/expansion_conv`);return{separable_conv0:x,separable_conv1:A,expansion_conv:O}}function S(L){const x=f(`${L}/separable_conv0`),A=f(`${L}/separable_conv1`),O=f(`${L}/separable_conv2`);return{separable_conv0:x,separable_conv1:A,separable_conv2:O}}return{extractConvParams:d,extractSeparableConvParams:f,extractReductionBlockParams:g,extractMainBlockParams:S}}function rD(r,l){const h=[],{extractConvParams:d,extractSeparableConvParams:f,extractReductionBlockParams:g,extractMainBlockParams:S}=WZ(r,h),L=d("entry_flow/conv_in"),x=g("entry_flow/reduction_block_0"),A=g("entry_flow/reduction_block_1"),O={conv_in:L,reduction_block_0:x,reduction_block_1:A},C={};zi(l,0,1).forEach(te=>{C[`main_block_${te}`]=S(`middle_flow/main_block_${te}`)});const $=g("exit_flow/reduction_block"),z=f("exit_flow/separable_conv"),ne={reduction_block:$,separable_conv:z};return Vn(r,h),{params:{entry_flow:O,middle_flow:C,exit_flow:ne},paramMappings:h}}const tn=Ye(Je());function oD(r,l,h){return tn.add(tn.conv2d(r,l.filters,h,"same"),l.bias)}function ux(r,l,h=!0){let d=h?tn.relu(r):r;return d=ns(d,l.separable_conv0,[1,1]),d=ns(tn.relu(d),l.separable_conv1,[1,1]),d=tn.maxPool(d,[3,3],[2,2],"same"),d=tn.add(d,oD(r,l.expansion_conv,[2,2])),d}function $Z(r,l){let h=ns(tn.relu(r),l.separable_conv0,[1,1]);return h=ns(tn.relu(h),l.separable_conv1,[1,1]),h=ns(tn.relu(h),l.separable_conv2,[1,1]),h=tn.add(h,r),h}class aD extends kn{constructor(r){super("TinyXception");this._numMainBlocks=r}forwardInput(r){const{params:l}=this;if(!l)throw new Error("TinyXception - load model before inference");return tn.tidy(()=>{const h=tn.cast(r.toBatchTensor(112,!0),"float32"),d=[122.782,117.001,104.298],f=yi(h,d).div(tn.scalar(256));let g=tn.relu(oD(f,l.entry_flow.conv_in,[2,2]));return g=ux(g,l.entry_flow.reduction_block_0,!1),g=ux(g,l.entry_flow.reduction_block_1),zi(this._numMainBlocks,0,1).forEach(S=>{g=$Z(g,l.middle_flow[`main_block_${S}`])}),g=ux(g,l.exit_flow.reduction_block),g=tn.relu(ns(g,l.exit_flow.separable_conv,[1,1])),g})}async forward(r){return this.forwardInput(await Rt(r))}getDefaultModelName(){return"tiny_xception_model"}extractParamsFromWeigthMap(r){return rD(r,this._numMainBlocks)}extractParams(r){return iD(r,this._numMainBlocks)}}function cD(r){const l=[],{extractWeights:h,getRemainingWeights:d}=Hn(r),f=lg(h,l),g=f(512,1,"fc/age"),S=f(512,2,"fc/gender");if(d().length!==0)throw new Error(`weights remaing after extract: ${d().length}`);return{paramMappings:l,params:{fc:{age:g,gender:S}}}}function lD(r){const l=[],h=ys(r,l);function d(g){const S=h(`${g}/weights`,2),L=h(`${g}/bias`,1);return{weights:S,bias:L}}const f={fc:{age:d("fc/age"),gender:d("fc/gender")}};return Vn(r,l),{params:f,paramMappings:l}}var Tr;(function(r){r.FEMALE="female",r.MALE="male"})(Tr||(Tr={}));const Xi=Ye(Je());class dx extends kn{constructor(r=new aD(2)){super("AgeGenderNet");this._faceFeatureExtractor=r}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(r){const{params:l}=this;if(!l)throw new Error(`${this._name} - load model before inference`);return Xi.tidy(()=>{const h=r instanceof po?this.faceFeatureExtractor.forwardInput(r):r,d=Xi.avgPool(h,[7,7],[2,2],"valid").as2D(h.shape[0],-1),f=zu(d,l.fc.age).as1D(),g=zu(d,l.fc.gender);return{age:f,gender:g}})}forwardInput(r){return Xi.tidy(()=>{const{age:l,gender:h}=this.runNet(r);return{age:l,gender:Xi.softmax(h)}})}async forward(r){return this.forwardInput(await Rt(r))}async predictAgeAndGender(r){const l=await Rt(r),h=await this.forwardInput(l),d=Xi.unstack(h.age),f=Xi.unstack(h.gender),g=d.map((L,x)=>({ageTensor:L,genderTensor:f[x]})),S=await Promise.all(g.map(async({ageTensor:L,genderTensor:x})=>{const A=(await L.data())[0],O=(await x.data())[0],C=O>.5,$=C?Tr.MALE:Tr.FEMALE,z=C?O:1-O;return L.dispose(),x.dispose(),{age:A,gender:$,genderProbability:z}}));return h.age.dispose(),h.gender.dispose(),l.isBatchInput?S:S[0]}getDefaultModelName(){return"age_gender_model"}dispose(r=!0){this.faceFeatureExtractor.dispose(r),super.dispose(r)}loadClassifierParams(r){const{params:l,paramMappings:h}=this.extractClassifierParams(r);this._params=l,this._paramMappings=h}extractClassifierParams(r){return cD(r)}extractParamsFromWeigthMap(r){const{featureExtractorMap:l,classifierMap:h}=mg(r);return this.faceFeatureExtractor.loadFromWeightMap(l),lD(h)}extractParams(r){const l=512*1+1+(512*2+2),h=r.slice(0,r.length-l),d=r.slice(r.length-l);return this.faceFeatureExtractor.extractWeights(h),this.extractClassifierParams(d)}}const bs=Ye(Je());class yg extends fg{postProcess(r,l,h){const d=h.map(({width:g,height:S})=>{const L=l/Math.max(S,g);return{width:g*L,height:S*L}}),f=d.length;return bs.tidy(()=>{const g=(O,C)=>bs.stack([bs.fill([68],O,"float32"),bs.fill([68],C,"float32")],1).as2D(1,136).as1D(),S=(O,C)=>{const{width:$,height:z}=d[O];return C($,z)?Math.abs($-z)/2:0},L=O=>S(O,(C,$)=>C<$),x=O=>S(O,(C,$)=>$g(L(C),x(C))))).div(bs.stack(Array.from(Array(f),(O,C)=>g(d[C].width,d[C].height))));return A})}forwardInput(r){return bs.tidy(()=>{const l=this.runNet(r);return this.postProcess(l,r.inputSize,r.inputDimensions.map(([h,d])=>({height:h,width:d})))})}async forward(r){return this.forwardInput(await Rt(r))}async detectLandmarks(r){const l=await Rt(r),h=bs.tidy(()=>bs.unstack(this.forwardInput(l))),d=await Promise.all(h.map(async(f,g)=>{const S=Array.from(await f.data()),L=S.filter((A,O)=>Em(O)),x=S.filter((A,O)=>!Em(O));return new Iu(Array(68).fill(0).map((A,O)=>new Ze(L[O],x[O])),{height:l.getInputHeight(g),width:l.getInputWidth(g)})}));return h.forEach(f=>f.dispose()),l.isBatchInput?d:d[0]}getClassifierChannelsOut(){return 136}}class Gu extends yg{constructor(r=new pg){super("FaceLandmark68Net",r)}getDefaultModelName(){return"face_landmark_68_model"}getClassifierChannelsIn(){return 256}}function hD(r){const l=[],{extractDenseBlock3Params:h}=dg(r,l),d={dense0:h("dense0",!0),dense1:h("dense1"),dense2:h("dense2")};return Vn(r,l),{params:d,paramMappings:l}}function uD(r){const l=[],{extractWeights:h,getRemainingWeights:d}=Hn(r),{extractDenseBlock3Params:f}=hg(h,l),g=f(3,32,"dense0",!0),S=f(32,64,"dense1"),L=f(64,128,"dense2");if(d().length!==0)throw new Error(`weights remaing after extract: ${d().length}`);return{paramMappings:l,params:{dense0:g,dense1:S,dense2:L}}}const go=Ye(Je());class dD extends kn{constructor(){super("TinyFaceFeatureExtractor")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("TinyFaceFeatureExtractor - load model before inference");return go.tidy(()=>{const h=go.cast(r.toBatchTensor(112,!0),"float32"),d=[122.782,117.001,104.298],f=yi(h,d).div(go.scalar(255));let g=og(f,l.dense0,!0);return g=og(g,l.dense1),g=og(g,l.dense2),g=go.avgPool(g,[14,14],[2,2],"valid"),g})}async forward(r){return this.forwardInput(await Rt(r))}getDefaultModelName(){return"face_feature_extractor_tiny_model"}extractParamsFromWeigthMap(r){return hD(r)}extractParams(r){return uD(r)}}class px extends yg{constructor(r=new dD){super("FaceLandmark68TinyNet",r)}getDefaultModelName(){return"face_landmark_68_tiny_model"}getClassifierChannelsIn(){return 128}}class UZ extends Gu{}const bg=Ye(Je());function pD(r,l){return bg.add(bg.mul(r,l.weights),l.biases)}const tl=Ye(Je());function mx(r,l,h,d,f="same"){const{filters:g,bias:S}=l.conv;let L=tl.conv2d(r,g,h,f);return L=tl.add(L,S),L=pD(L,l.scale),d?tl.relu(L):L}function mD(r,l){return mx(r,l,[1,1],!0)}function fx(r,l){return mx(r,l,[1,1],!1)}function wg(r,l){return mx(r,l,[2,2],!0,"valid")}const ws=Ye(Je());function BZ(r,l){function h(L,x,A){const O=r(L),C=O.length/(x*A*A);if(MS(C))throw new Error(`depth has to be an integer: ${C}, weights.length: ${O.length}, numFilters: ${x}, filterSize: ${A}`);return ws.tidy(()=>ws.transpose(ws.tensor4d(O,[x,C,A,A]),[2,3,1,0]))}function d(L,x,A,O){const C=h(L,x,A),$=ws.tensor1d(r(x));return l.push({paramPath:`${O}/filters`},{paramPath:`${O}/bias`}),{filters:C,bias:$}}function f(L,x){const A=ws.tensor1d(r(L)),O=ws.tensor1d(r(L));return l.push({paramPath:`${x}/weights`},{paramPath:`${x}/biases`}),{weights:A,biases:O}}function g(L,x,A,O){const C=d(L,x,A,`${O}/conv`),$=f(x,`${O}/scale`);return{conv:C,scale:$}}function S(L,x,A,O,C=!1){const $=g((C?.5:1)*L,x,A,`${O}/conv1`),z=g(L,x,A,`${O}/conv2`);return{conv1:$,conv2:z}}return{extractConvLayerParams:g,extractResidualLayerParams:S}}function fD(r){const{extractWeights:l,getRemainingWeights:h}=Hn(r),d=[],{extractConvLayerParams:f,extractResidualLayerParams:g}=BZ(l,d),S=f(4704,32,7,"conv32_down"),L=g(9216,32,3,"conv32_1"),x=g(9216,32,3,"conv32_2"),A=g(9216,32,3,"conv32_3"),O=g(36864,64,3,"conv64_down",!0),C=g(36864,64,3,"conv64_1"),$=g(36864,64,3,"conv64_2"),z=g(36864,64,3,"conv64_3"),ne=g(147456,128,3,"conv128_down",!0),te=g(147456,128,3,"conv128_1"),se=g(147456,128,3,"conv128_2"),fe=g(589824,256,3,"conv256_down",!0),de=g(589824,256,3,"conv256_1"),Ae=g(589824,256,3,"conv256_2"),xe=g(589824,256,3,"conv256_down_out"),Me=ws.tidy(()=>ws.transpose(ws.tensor2d(l(256*128),[128,256]),[1,0]));if(d.push({paramPath:"fc"}),h().length!==0)throw new Error(`weights remaing after extract: ${h().length}`);const Ke={conv32_down:S,conv32_1:L,conv32_2:x,conv32_3:A,conv64_down:O,conv64_1:C,conv64_2:$,conv64_3:z,conv128_down:ne,conv128_1:te,conv128_2:se,conv256_down:fe,conv256_1:de,conv256_2:Ae,conv256_down_out:xe,fc:Me};return{params:Ke,paramMappings:d}}function MZ(r,l){const h=ys(r,l);function d(S){const L=h(`${S}/scale/weights`,1),x=h(`${S}/scale/biases`,1);return{weights:L,biases:x}}function f(S){const L=h(`${S}/conv/filters`,4),x=h(`${S}/conv/bias`,1),A=d(S);return{conv:{filters:L,bias:x},scale:A}}function g(S){return{conv1:f(`${S}/conv1`),conv2:f(`${S}/conv2`)}}return{extractConvLayerParams:f,extractResidualLayerParams:g}}function gD(r){const l=[],{extractConvLayerParams:h,extractResidualLayerParams:d}=MZ(r,l),f=h("conv32_down"),g=d("conv32_1"),S=d("conv32_2"),L=d("conv32_3"),x=d("conv64_down"),A=d("conv64_1"),O=d("conv64_2"),C=d("conv64_3"),$=d("conv128_down"),z=d("conv128_1"),ne=d("conv128_2"),te=d("conv256_down"),se=d("conv256_1"),fe=d("conv256_2"),de=d("conv256_down_out"),Ae=r.fc;if(l.push({originalPath:"fc",paramPath:"fc"}),!BS(Ae))throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${Ae}`);const xe={conv32_down:f,conv32_1:g,conv32_2:S,conv32_3:L,conv64_down:x,conv64_1:A,conv64_2:O,conv64_3:C,conv128_down:$,conv128_1:z,conv128_2:ne,conv256_down:te,conv256_1:se,conv256_2:fe,conv256_down_out:de,fc:Ae};return Vn(r,l),{params:xe,paramMappings:l}}const Yn=Ye(Je());function Li(r,l){let h=mD(r,l.conv1);return h=fx(h,l.conv2),h=Yn.add(h,r),h=Yn.relu(h),h}function Vu(r,l){let h=wg(r,l.conv1);h=fx(h,l.conv2);let d=Yn.avgPool(r,2,2,"valid");const f=Yn.zeros(d.shape),g=d.shape[3]!==h.shape[3],S=d.shape[1]!==h.shape[1]||d.shape[2]!==h.shape[2];if(S){const L=[...h.shape];L[1]=1;const x=Yn.zeros(L);h=Yn.concat([h,x],1);const A=[...h.shape];A[2]=1;const O=Yn.zeros(A);h=Yn.concat([h,O],2)}return d=g?Yn.concat([d,f],3):d,h=Yn.add(d,h),h=Yn.relu(h),h}const _s=Ye(Je());class Hu extends kn{constructor(){super("FaceRecognitionNet")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("FaceRecognitionNet - load model before inference");return _s.tidy(()=>{const h=_s.cast(r.toBatchTensor(150,!0),"float32"),d=[122.782,117.001,104.298],f=yi(h,d).div(_s.scalar(256));let g=wg(f,l.conv32_down);g=_s.maxPool(g,3,2,"valid"),g=Li(g,l.conv32_1),g=Li(g,l.conv32_2),g=Li(g,l.conv32_3),g=Vu(g,l.conv64_down),g=Li(g,l.conv64_1),g=Li(g,l.conv64_2),g=Li(g,l.conv64_3),g=Vu(g,l.conv128_down),g=Li(g,l.conv128_1),g=Li(g,l.conv128_2),g=Vu(g,l.conv256_down),g=Li(g,l.conv256_1),g=Li(g,l.conv256_2),g=Vu(g,l.conv256_down_out);const S=g.mean([1,2]),L=_s.matMul(S,l.fc);return L})}async forward(r){return this.forwardInput(await Rt(r))}async computeFaceDescriptor(r){const l=await Rt(r),h=_s.tidy(()=>_s.unstack(this.forwardInput(l))),d=await Promise.all(h.map(f=>f.data()));return h.forEach(f=>f.dispose()),l.isBatchInput?d:d[0]}getDefaultModelName(){return"face_recognition_model"}extractParamsFromWeigthMap(r){return gD(r)}extractParams(r){return fD(r)}}function PZ(r){const l=new Hu;return l.extractWeights(r),l}function Lg(r,l){const h={descriptor:l};return Object.assign({},r,h)}function zZ(r){return typeof r.age=="number"}function Sg(r,l){const h={age:l};return Object.assign({},r,h)}function GZ(r){return(r.gender===Tr.MALE||r.gender===Tr.FEMALE)&&Rc(r.genderProbability)}function Ig(r,l,h){const d={gender:l,genderProbability:h};return Object.assign({},r,d)}const Si=Ye(Je());function VZ(r,l){function h(x,A){const O=Si.tensor4d(r(3*3*x),[3,3,x,1]),C=Si.tensor1d(r(x)),$=Si.tensor1d(r(x)),z=Si.tensor1d(r(x)),ne=Si.tensor1d(r(x));return l.push({paramPath:`${A}/filters`},{paramPath:`${A}/batch_norm_scale`},{paramPath:`${A}/batch_norm_offset`},{paramPath:`${A}/batch_norm_mean`},{paramPath:`${A}/batch_norm_variance`}),{filters:O,batch_norm_scale:C,batch_norm_offset:$,batch_norm_mean:z,batch_norm_variance:ne}}function d(x,A,O,C,$){const z=Si.tensor4d(r(x*A*O*O),[O,O,x,A]),ne=Si.tensor1d(r(A));return l.push({paramPath:`${C}/filters`},{paramPath:`${C}/${$?"batch_norm_offset":"bias"}`}),{filters:z,bias:ne}}function f(x,A,O,C){const{filters:$,bias:z}=d(x,A,O,C,!0);return{filters:$,batch_norm_offset:z}}function g(x,A,O){const C=h(x,`${O}/depthwise_conv`),$=f(x,A,1,`${O}/pointwise_conv`);return{depthwise_conv:C,pointwise_conv:$}}function S(){const x=f(3,32,3,"mobilenetv1/conv_0"),A=g(32,64,"mobilenetv1/conv_1"),O=g(64,128,"mobilenetv1/conv_2"),C=g(128,128,"mobilenetv1/conv_3"),$=g(128,256,"mobilenetv1/conv_4"),z=g(256,256,"mobilenetv1/conv_5"),ne=g(256,512,"mobilenetv1/conv_6"),te=g(512,512,"mobilenetv1/conv_7"),se=g(512,512,"mobilenetv1/conv_8"),fe=g(512,512,"mobilenetv1/conv_9"),de=g(512,512,"mobilenetv1/conv_10"),Ae=g(512,512,"mobilenetv1/conv_11"),xe=g(512,1024,"mobilenetv1/conv_12"),Me=g(1024,1024,"mobilenetv1/conv_13");return{conv_0:x,conv_1:A,conv_2:O,conv_3:C,conv_4:$,conv_5:z,conv_6:ne,conv_7:te,conv_8:se,conv_9:fe,conv_10:de,conv_11:Ae,conv_12:xe,conv_13:Me}}function L(){const x=f(1024,256,1,"prediction_layer/conv_0"),A=f(256,512,3,"prediction_layer/conv_1"),O=f(512,128,1,"prediction_layer/conv_2"),C=f(128,256,3,"prediction_layer/conv_3"),$=f(256,128,1,"prediction_layer/conv_4"),z=f(128,256,3,"prediction_layer/conv_5"),ne=f(256,64,1,"prediction_layer/conv_6"),te=f(64,128,3,"prediction_layer/conv_7"),se=d(512,12,1,"prediction_layer/box_predictor_0/box_encoding_predictor"),fe=d(512,9,1,"prediction_layer/box_predictor_0/class_predictor"),de=d(1024,24,1,"prediction_layer/box_predictor_1/box_encoding_predictor"),Ae=d(1024,18,1,"prediction_layer/box_predictor_1/class_predictor"),xe=d(512,24,1,"prediction_layer/box_predictor_2/box_encoding_predictor"),Me=d(512,18,1,"prediction_layer/box_predictor_2/class_predictor"),Ke=d(256,24,1,"prediction_layer/box_predictor_3/box_encoding_predictor"),wt=d(256,18,1,"prediction_layer/box_predictor_3/class_predictor"),$t=d(256,24,1,"prediction_layer/box_predictor_4/box_encoding_predictor"),Kt=d(256,18,1,"prediction_layer/box_predictor_4/class_predictor"),Fn=d(128,24,1,"prediction_layer/box_predictor_5/box_encoding_predictor"),vn=d(128,18,1,"prediction_layer/box_predictor_5/class_predictor"),Nn={box_encoding_predictor:se,class_predictor:fe},Qs={box_encoding_predictor:de,class_predictor:Ae},Ai={box_encoding_predictor:xe,class_predictor:Me},ei={box_encoding_predictor:Ke,class_predictor:wt},xa={box_encoding_predictor:$t,class_predictor:Kt},hl={box_encoding_predictor:Fn,class_predictor:vn};return{conv_0:x,conv_1:A,conv_2:O,conv_3:C,conv_4:$,conv_5:z,conv_6:ne,conv_7:te,box_predictor_0:Nn,box_predictor_1:Qs,box_predictor_2:Ai,box_predictor_3:ei,box_predictor_4:xa,box_predictor_5:hl}}return{extractMobilenetV1Params:S,extractPredictionLayerParams:L}}function yD(r){const l=[],{extractWeights:h,getRemainingWeights:d}=Hn(r),{extractMobilenetV1Params:f,extractPredictionLayerParams:g}=VZ(h,l),S=f(),L=g(),x=Si.tensor3d(h(5118*4),[1,5118,4]),A={extra_dim:x};if(l.push({paramPath:"output_layer/extra_dim"}),d().length!==0)throw new Error(`weights remaing after extract: ${d().length}`);return{params:{mobilenetv1:S,prediction_layer:L,output_layer:A},paramMappings:l}}function HZ(r,l){const h=ys(r,l);function d(A,O,C){const $=h(`${A}/Conv2d_${O}_pointwise/weights`,4,`${C}/filters`),z=h(`${A}/Conv2d_${O}_pointwise/convolution_bn_offset`,1,`${C}/batch_norm_offset`);return{filters:$,batch_norm_offset:z}}function f(A){const O=`mobilenetv1/conv_${A}`,C=`MobilenetV1/Conv2d_${A}_depthwise`,$=`${O}/depthwise_conv`,z=`${O}/pointwise_conv`,ne=h(`${C}/depthwise_weights`,4,`${$}/filters`),te=h(`${C}/BatchNorm/gamma`,1,`${$}/batch_norm_scale`),se=h(`${C}/BatchNorm/beta`,1,`${$}/batch_norm_offset`),fe=h(`${C}/BatchNorm/moving_mean`,1,`${$}/batch_norm_mean`),de=h(`${C}/BatchNorm/moving_variance`,1,`${$}/batch_norm_variance`);return{depthwise_conv:{filters:ne,batch_norm_scale:te,batch_norm_offset:se,batch_norm_mean:fe,batch_norm_variance:de},pointwise_conv:d("MobilenetV1",A,z)}}function g(){return{conv_0:d("MobilenetV1",0,"mobilenetv1/conv_0"),conv_1:f(1),conv_2:f(2),conv_3:f(3),conv_4:f(4),conv_5:f(5),conv_6:f(6),conv_7:f(7),conv_8:f(8),conv_9:f(9),conv_10:f(10),conv_11:f(11),conv_12:f(12),conv_13:f(13)}}function S(A,O){const C=h(`${A}/weights`,4,`${O}/filters`),$=h(`${A}/biases`,1,`${O}/bias`);return{filters:C,bias:$}}function L(A){const O=S(`Prediction/BoxPredictor_${A}/BoxEncodingPredictor`,`prediction_layer/box_predictor_${A}/box_encoding_predictor`),C=S(`Prediction/BoxPredictor_${A}/ClassPredictor`,`prediction_layer/box_predictor_${A}/class_predictor`);return{box_encoding_predictor:O,class_predictor:C}}function x(){return{conv_0:d("Prediction",0,"prediction_layer/conv_0"),conv_1:d("Prediction",1,"prediction_layer/conv_1"),conv_2:d("Prediction",2,"prediction_layer/conv_2"),conv_3:d("Prediction",3,"prediction_layer/conv_3"),conv_4:d("Prediction",4,"prediction_layer/conv_4"),conv_5:d("Prediction",5,"prediction_layer/conv_5"),conv_6:d("Prediction",6,"prediction_layer/conv_6"),conv_7:d("Prediction",7,"prediction_layer/conv_7"),box_predictor_0:L(0),box_predictor_1:L(1),box_predictor_2:L(2),box_predictor_3:L(3),box_predictor_4:L(4),box_predictor_5:L(5)}}return{extractMobilenetV1Params:g,extractPredictionLayerParams:x}}function bD(r){const l=[],{extractMobilenetV1Params:h,extractPredictionLayerParams:d}=HZ(r,l),f=r["Output/extra_dim"];if(l.push({originalPath:"Output/extra_dim",paramPath:"output_layer/extra_dim"}),!yr(f))throw new Error(`expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have ${f}`);const g={mobilenetv1:h(),prediction_layer:d(),output_layer:{extra_dim:f}};return Vn(r,l),{params:g,paramMappings:l}}const yo=Ye(Je());function Xs(r,l,h){return yo.tidy(()=>{let d=yo.conv2d(r,l.filters,h,"same");return d=yo.add(d,l.batch_norm_offset),yo.clipByValue(d,0,6)})}const Ar=Ye(Je()),YZ=.0010000000474974513;function qZ(r,l,h){return Ar.tidy(()=>{let d=Ar.depthwiseConv2d(r,l.filters,h,"same");return d=Ar.batchNorm(d,l.batch_norm_mean,l.batch_norm_variance,l.batch_norm_offset,l.batch_norm_scale,YZ),Ar.clipByValue(d,0,6)})}function jZ(r){return[2,4,6,12].some(l=>l===r)?[2,2]:[1,1]}function wD(r,l){return Ar.tidy(()=>{let h,d=Xs(r,l.conv_0,[2,2]);const f=[l.conv_1,l.conv_2,l.conv_3,l.conv_4,l.conv_5,l.conv_6,l.conv_7,l.conv_8,l.conv_9,l.conv_10,l.conv_11,l.conv_12,l.conv_13];if(f.forEach((g,S)=>{const L=S+1,x=jZ(L);d=qZ(d,g.depthwise_conv,x),d=Xs(d,g.pointwise_conv,[1,1]),L===11&&(h=d)}),h===null)throw new Error("mobileNetV1 - output of conv layer 11 is null");return{out:d,conv11:h}})}function LD(r,l,h,d,f){const g=r.shape[0],S=Math.min(h,g),L=l.map((O,C)=>({score:O,boxIndex:C})).filter(O=>O.score>f).sort((O,C)=>C.score-O.score),x=O=>O<=d?1:0,A=[];return L.forEach(O=>{if(A.length>=S)return;const C=O.score;for(let $=A.length-1;$>=0;--$){const z=KZ(r,O.boxIndex,A[$]);if(z===0)continue;if(O.score*=x(z),O.score<=f)break}C===O.score&&A.push(O.boxIndex)}),A}function KZ(r,l,h){const d=r.arraySync(),f=Math.min(d[l][0],d[l][2]),g=Math.min(d[l][1],d[l][3]),S=Math.max(d[l][0],d[l][2]),L=Math.max(d[l][1],d[l][3]),x=Math.min(d[h][0],d[h][2]),A=Math.min(d[h][1],d[h][3]),O=Math.max(d[h][0],d[h][2]),C=Math.max(d[h][1],d[h][3]),$=(S-f)*(L-g),z=(O-x)*(C-A);if($<=0||z<=0)return 0;const ne=Math.max(f,x),te=Math.max(g,A),se=Math.min(S,O),fe=Math.min(L,C),de=Math.max(se-ne,0)*Math.max(fe-te,0);return de/($+z-de)}const De=Ye(Je());function XZ(r){const l=De.unstack(De.transpose(r,[1,0])),h=[De.sub(l[2],l[0]),De.sub(l[3],l[1])],d=[De.add(l[0],De.div(h[0],De.scalar(2))),De.add(l[1],De.div(h[1],De.scalar(2)))];return{sizes:h,centers:d}}function JZ(r,l){const{sizes:h,centers:d}=XZ(r),f=De.unstack(De.transpose(l,[1,0])),g=De.div(De.mul(De.exp(De.div(f[2],De.scalar(5))),h[0]),De.scalar(2)),S=De.add(De.mul(De.div(f[0],De.scalar(10)),h[0]),d[0]),L=De.div(De.mul(De.exp(De.div(f[3],De.scalar(5))),h[1]),De.scalar(2)),x=De.add(De.mul(De.div(f[1],De.scalar(10)),h[1]),d[1]);return De.transpose(De.stack([De.sub(S,g),De.sub(x,L),De.add(S,g),De.add(x,L)]),[1,0])}function SD(r,l,h){return De.tidy(()=>{const d=r.shape[0];let f=JZ(De.reshape(De.tile(h.extra_dim,[d,1,1]),[-1,4]),De.reshape(r,[-1,4]));f=De.reshape(f,[d,f.shape[0]/d,4]);const g=De.sigmoid(De.slice(l,[0,0,1],[-1,-1,-1]));let S=De.slice(g,[0,0,0],[-1,-1,1]);S=De.reshape(S,[d,S.shape[1]]);const L=De.unstack(f),x=De.unstack(S);return{boxes:L,scores:x}})}const Yu=Ye(Je());function wa(r,l){return Yu.tidy(()=>{const h=r.shape[0],d=Yu.reshape(ga(r,l.box_encoding_predictor),[h,-1,1,4]),f=Yu.reshape(ga(r,l.class_predictor),[h,-1,3]);return{boxPredictionEncoding:d,classPrediction:f}})}const qu=Ye(Je());function ID(r,l,h){return qu.tidy(()=>{const d=Xs(r,h.conv_0,[1,1]),f=Xs(d,h.conv_1,[2,2]),g=Xs(f,h.conv_2,[1,1]),S=Xs(g,h.conv_3,[2,2]),L=Xs(S,h.conv_4,[1,1]),x=Xs(L,h.conv_5,[2,2]),A=Xs(x,h.conv_6,[1,1]),O=Xs(A,h.conv_7,[2,2]),C=wa(l,h.box_predictor_0),$=wa(r,h.box_predictor_1),z=wa(f,h.box_predictor_2),ne=wa(S,h.box_predictor_3),te=wa(x,h.box_predictor_4),se=wa(O,h.box_predictor_5),fe=qu.concat([C.boxPredictionEncoding,$.boxPredictionEncoding,z.boxPredictionEncoding,ne.boxPredictionEncoding,te.boxPredictionEncoding,se.boxPredictionEncoding],1),de=qu.concat([C.classPrediction,$.classPrediction,z.classPrediction,ne.classPrediction,te.classPrediction,se.classPrediction],1);return{boxPredictions:fe,classPredictions:de}})}class Ii{constructor({minConfidence:r,maxResults:l}={}){this._name="SsdMobilenetv1Options";if(this._minConfidence=r||.5,this._maxResults=l||100,typeof this._minConfidence!="number"||this._minConfidence<=0||this._minConfidence>=1)throw new Error(`${this._name} - expected minConfidence to be a number between 0 and 1`);if(typeof this._maxResults!="number")throw new Error(`${this._name} - expected maxResults to be a number`)}get minConfidence(){return this._minConfidence}get maxResults(){return this._maxResults}}const xi=Ye(Je());class nl extends kn{constructor(){super("SsdMobilenetv1")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("SsdMobilenetv1 - load model before inference");return xi.tidy(()=>{const h=xi.cast(r.toBatchTensor(512,!1),"float32"),d=xi.sub(xi.mul(h,xi.scalar(.007843137718737125)),xi.scalar(1)),f=wD(d,l.mobilenetv1),{boxPredictions:g,classPredictions:S}=ID(f.out,f.conv11,l.prediction_layer);return SD(g,S,l.output_layer)})}async forward(r){return this.forwardInput(await Rt(r))}async locateFaces(r,l={}){const{maxResults:h,minConfidence:d}=new Ii(l),f=await Rt(r),{boxes:g,scores:S}=this.forwardInput(f),L=g[0],x=S[0];for(let de=1;de{const[Ae,xe]=[Math.max(0,se[de][0]),Math.min(1,se[de][2])].map(wt=>wt*te),[Me,Ke]=[Math.max(0,se[de][1]),Math.min(1,se[de][3])].map(wt=>wt*ne);return new Yt(A[de],new Su(Me,Ae,Ke-Me,xe-Ae),{height:f.getInputHeight(0),width:f.getInputWidth(0)})});return L.dispose(),x.dispose(),fe}getDefaultModelName(){return"ssd_mobilenetv1_model"}extractParamsFromWeigthMap(r){return bD(r)}extractParams(r){return yD(r)}}function xD(r){const l=new nl;return l.extractWeights(r),l}function ZZ(r){return xD(r)}class QZ extends nl{}const TD=.4,AD=[new Ze(.738768,.874946),new Ze(2.42204,2.65704),new Ze(4.30971,7.04493),new Ze(10.246,4.59428),new Ze(12.6868,11.8741)],vD=[new Ze(1.603231,2.094468),new Ze(6.041143,7.080126),new Ze(2.882459,3.518061),new Ze(4.266906,5.178857),new Ze(9.041765,10.66308)],ND=[117.001,114.697,97.404],CD="tiny_yolov2_model",RD="tiny_yolov2_separable_conv_model";const xg=r=>typeof r=="number";function gx(r){if(!r)throw new Error(`invalid config: ${r}`);if(typeof r.withSeparableConvs!="boolean")throw new Error(`config.withSeparableConvs has to be a boolean, have: ${r.withSeparableConvs}`);if(!xg(r.iouThreshold)||r.iouThreshold<0||r.iouThreshold>1)throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${r.iouThreshold}`);if(!Array.isArray(r.classes)||!r.classes.length||!r.classes.every(l=>typeof l=="string"))throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(r.classes)}`);if(!Array.isArray(r.anchors)||!r.anchors.length||!r.anchors.map(l=>l||{}).every(l=>xg(l.x)&&xg(l.y)))throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(r.anchors)}`);if(r.meanRgb&&(!Array.isArray(r.meanRgb)||r.meanRgb.length!==3||!r.meanRgb.every(xg)))throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(r.meanRgb)}`)}const Js=Ye(Je());function sl(r){return Js.tidy(()=>{const l=Js.mul(r,Js.scalar(.10000000149011612));return Js.add(Js.relu(Js.sub(r,l)),l)})}const Zs=Ye(Je());function vr(r,l){return Zs.tidy(()=>{let h=Zs.pad(r,[[0,0],[1,1],[1,1],[0,0]]);return h=Zs.conv2d(h,l.conv.filters,[1,1],"valid"),h=Zs.sub(h,l.bn.sub),h=Zs.mul(h,l.bn.truediv),h=Zs.add(h,l.conv.bias),sl(h)})}const bo=Ye(Je());function Nr(r,l){return bo.tidy(()=>{let h=bo.pad(r,[[0,0],[1,1],[1,1],[0,0]]);return h=bo.separableConv2d(h,l.depthwise_filter,l.pointwise_filter,[1,1],"valid"),h=bo.add(h,l.bias),sl(h)})}const yx=Ye(Je());function eQ(r,l){const h=Kc(r,l);function d(S,L){const x=yx.tensor1d(r(S)),A=yx.tensor1d(r(S));return l.push({paramPath:`${L}/sub`},{paramPath:`${L}/truediv`}),{sub:x,truediv:A}}function f(S,L,x){const A=h(S,L,3,`${x}/conv`),O=d(L,`${x}/bn`);return{conv:A,bn:O}}const g=Xc(r,l);return{extractConvParams:h,extractConvWithBatchNormParams:f,extractSeparableConvParams:g}}function OD(r,l,h,d){const{extractWeights:f,getRemainingWeights:g}=Hn(r),S=[],{extractConvParams:L,extractConvWithBatchNormParams:x,extractSeparableConvParams:A}=eQ(f,S);let O;if(l.withSeparableConvs){const[C,$,z,ne,te,se,fe,de,Ae]=d,xe=l.isFirstLayerConv2d?L(C,$,3,"conv0"):A(C,$,"conv0"),Me=A($,z,"conv1"),Ke=A(z,ne,"conv2"),wt=A(ne,te,"conv3"),$t=A(te,se,"conv4"),Kt=A(se,fe,"conv5"),Fn=de?A(fe,de,"conv6"):void 0,vn=Ae?A(de,Ae,"conv7"):void 0,Nn=L(Ae||de||fe,5*h,1,"conv8");O={conv0:xe,conv1:Me,conv2:Ke,conv3:wt,conv4:$t,conv5:Kt,conv6:Fn,conv7:vn,conv8:Nn}}else{const[C,$,z,ne,te,se,fe,de,Ae]=d,xe=x(C,$,"conv0"),Me=x($,z,"conv1"),Ke=x(z,ne,"conv2"),wt=x(ne,te,"conv3"),$t=x(te,se,"conv4"),Kt=x(se,fe,"conv5"),Fn=x(fe,de,"conv6"),vn=x(de,Ae,"conv7"),Nn=L(Ae,5*h,1,"conv8");O={conv0:xe,conv1:Me,conv2:Ke,conv3:wt,conv4:$t,conv5:Kt,conv6:Fn,conv7:vn,conv8:Nn}}if(g().length!==0)throw new Error(`weights remaing after extract: ${g().length}`);return{params:O,paramMappings:S}}function tQ(r,l){const h=ys(r,l);function d(L){const x=h(`${L}/sub`,1),A=h(`${L}/truediv`,1);return{sub:x,truediv:A}}function f(L){const x=h(`${L}/filters`,4),A=h(`${L}/bias`,1);return{filters:x,bias:A}}function g(L){const x=f(`${L}/conv`),A=d(`${L}/bn`);return{conv:x,bn:A}}const S=Jc(h);return{extractConvParams:f,extractConvWithBatchNormParams:g,extractSeparableConvParams:S}}function ED(r,l){const h=[],{extractConvParams:d,extractConvWithBatchNormParams:f,extractSeparableConvParams:g}=tQ(r,h);let S;if(l.withSeparableConvs){const L=l.filterSizes&&l.filterSizes.length||9;S={conv0:l.isFirstLayerConv2d?d("conv0"):g("conv0"),conv1:g("conv1"),conv2:g("conv2"),conv3:g("conv3"),conv4:g("conv4"),conv5:g("conv5"),conv6:L>7?g("conv6"):void 0,conv7:L>8?g("conv7"):void 0,conv8:d("conv8")}}else S={conv0:f("conv0"),conv1:f("conv1"),conv2:f("conv2"),conv3:f("conv3"),conv4:f("conv4"),conv5:f("conv5"),conv6:f("conv6"),conv7:f("conv7"),conv8:d("conv8")};return Vn(r,h),{params:S,paramMappings:h}}var bx;(function(r){r[r.XS=224]="XS",r[r.SM=320]="SM",r[r.MD=416]="MD",r[r.LG=608]="LG"})(bx||(bx={}));class Cr{constructor({inputSize:r,scoreThreshold:l}={}){this._name="TinyYolov2Options";if(this._inputSize=r||416,this._scoreThreshold=l||.5,typeof this._inputSize!="number"||this._inputSize%32!==0)throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`);if(typeof this._scoreThreshold!="number"||this._scoreThreshold<=0||this._scoreThreshold>=1)throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`)}get inputSize(){return this._inputSize}get scoreThreshold(){return this._scoreThreshold}}const kt=Ye(Je());class il extends kn{constructor(r){super("TinyYolov2");gx(r),this._config=r}get config(){return this._config}get withClassScores(){return this.config.withClassScores||this.config.classes.length>1}get boxEncodingSize(){return 5+(this.withClassScores?this.config.classes.length:0)}runTinyYolov2(r,l){let h=vr(r,l.conv0);return h=kt.maxPool(h,[2,2],[2,2],"same"),h=vr(h,l.conv1),h=kt.maxPool(h,[2,2],[2,2],"same"),h=vr(h,l.conv2),h=kt.maxPool(h,[2,2],[2,2],"same"),h=vr(h,l.conv3),h=kt.maxPool(h,[2,2],[2,2],"same"),h=vr(h,l.conv4),h=kt.maxPool(h,[2,2],[2,2],"same"),h=vr(h,l.conv5),h=kt.maxPool(h,[2,2],[1,1],"same"),h=vr(h,l.conv6),h=vr(h,l.conv7),ga(h,l.conv8,"valid",!1)}runMobilenet(r,l){let h=this.config.isFirstLayerConv2d?sl(ga(r,l.conv0,"valid",!1)):Nr(r,l.conv0);return h=kt.maxPool(h,[2,2],[2,2],"same"),h=Nr(h,l.conv1),h=kt.maxPool(h,[2,2],[2,2],"same"),h=Nr(h,l.conv2),h=kt.maxPool(h,[2,2],[2,2],"same"),h=Nr(h,l.conv3),h=kt.maxPool(h,[2,2],[2,2],"same"),h=Nr(h,l.conv4),h=kt.maxPool(h,[2,2],[2,2],"same"),h=Nr(h,l.conv5),h=kt.maxPool(h,[2,2],[1,1],"same"),h=l.conv6?Nr(h,l.conv6):h,h=l.conv7?Nr(h,l.conv7):h,ga(h,l.conv8,"valid",!1)}forwardInput(r,l){const{params:h}=this;if(!h)throw new Error("TinyYolov2 - load model before inference");return kt.tidy(()=>{let d=kt.cast(r.toBatchTensor(l,!1),"float32");return d=this.config.meanRgb?yi(d,this.config.meanRgb):d,d=d.div(kt.scalar(256)),this.config.withSeparableConvs?this.runMobilenet(d,h):this.runTinyYolov2(d,h)})}async forward(r,l){return await this.forwardInput(await Rt(r),l)}async detect(r,l={}){const{inputSize:h,scoreThreshold:d}=new Cr(l),f=await Rt(r),g=await this.forwardInput(f,h),S=kt.tidy(()=>kt.unstack(g)[0].expandDims()),L={width:f.getInputWidth(0),height:f.getInputHeight(0)},x=await this.extractBoxes(S,f.getReshapedInputDimensions(0),d);g.dispose(),S.dispose();const A=x.map(te=>te.box),O=x.map(te=>te.score),C=x.map(te=>te.classScore),$=x.map(te=>this.config.classes[te.label]),z=VS(A.map(te=>te.rescale(h)),O,this.config.iouThreshold,!0),ne=z.map(te=>new Oc(O[te],C[te],$[te],A[te],L));return ne}getDefaultModelName(){return""}extractParamsFromWeigthMap(r){return ED(r,this.config)}extractParams(r){const l=this.config.filterSizes||il.DEFAULT_FILTER_SIZES,h=l?l.length:void 0;if(h!==7&&h!==8&&h!==9)throw new Error(`TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found ${h} filterSizes in config`);return OD(r,this.config,this.boxEncodingSize,l)}async extractBoxes(r,l,h){const{width:d,height:f}=l,g=Math.max(d,f),S=g/d,L=g/f,x=r.shape[1],A=this.config.anchors.length,[O,C,$]=kt.tidy(()=>{const se=r.reshape([x,x,A,this.boxEncodingSize]),fe=se.slice([0,0,0,0],[x,x,A,4]),de=se.slice([0,0,0,4],[x,x,A,1]),Ae=this.withClassScores?kt.softmax(se.slice([0,0,0,5],[x,x,A,this.config.classes.length]),3):kt.scalar(0);return[fe,de,Ae]}),z=[],ne=await C.array(),te=await O.array();for(let se=0;seh){const xe=(fe+Lu(te[se][fe][de][0]))/x*S,Me=(se+Lu(te[se][fe][de][1]))/x*L,Ke=Math.exp(te[se][fe][de][2])*this.config.anchors[de].x/x*S,wt=Math.exp(te[se][fe][de][3])*this.config.anchors[de].y/x*L,$t=xe-Ke/2,Kt=Me-wt/2,Fn={row:se,col:fe,anchor:de},{classScore:vn,label:Nn}=this.withClassScores?await this.extractPredictedClass($,Fn):{classScore:1,label:0};z.push({box:new wu($t,Kt,$t+Ke,Kt+wt),score:Ae,classScore:Ae*vn,label:Nn,...Fn})}}return O.dispose(),C.dispose(),$.dispose(),z}async extractPredictedClass(r,l){const{row:h,col:d,anchor:f}=l,g=await r.array();return Array(this.config.classes.length).fill(0).map((S,L)=>g[h][d][f][L]).map((S,L)=>({classScore:S,label:L})).reduce((S,L)=>S.classScore>L.classScore?S:L)}}il.DEFAULT_FILTER_SIZES=[3,16,32,64,128,256,512,1024,1024];class ju extends il{constructor(r=!0){const l=Object.assign({},{withSeparableConvs:r,iouThreshold:TD,classes:["face"]},r?{anchors:vD,meanRgb:ND}:{anchors:AD,withClassScores:!0});super(l)}get withSeparableConvs(){return this.config.withSeparableConvs}get anchors(){return this.config.anchors}async locateFaces(r,l){const h=await this.detect(r,l);return h.map(d=>new Yt(d.score,d.relativeBox,{width:d.imageWidth,height:d.imageHeight}))}getDefaultModelName(){return this.withSeparableConvs?RD:CD}extractParamsFromWeigthMap(r){return super.extractParamsFromWeigthMap(r)}}function nQ(r,l=!0){const h=new ju(l);return h.extractWeights(r),h}class wx extends Cr{constructor(){super(...arguments);this._name="TinyFaceDetectorOptions"}}class Ti{async then(r){return r(await this.run())}async run(){throw new Error("ComposableTask - run is not implemented")}}const Lx=Ye(Je());async function La(r,l,h,d,f=({alignedRect:g})=>g){const g=r.map(x=>ba(x)?f(x):x.detection),S=d||(l instanceof Lx.Tensor?await qc(l,g):await Yc(l,g)),L=await h(S);return S.forEach(x=>x instanceof Lx.Tensor&&x.dispose()),L}async function rl(r,l,h,d,f){return La([r],l,async g=>h(g[0]),d,f)}const DD=.4,kD=[new Ze(1.603231,2.094468),new Ze(6.041143,7.080126),new Ze(2.882459,3.518061),new Ze(4.266906,5.178857),new Ze(9.041765,10.66308)],FD=[117.001,114.697,97.404];class Ku extends il{constructor(){const r={withSeparableConvs:!0,iouThreshold:DD,classes:["face"],anchors:kD,meanRgb:FD,isFirstLayerConv2d:!0,filterSizes:[3,16,32,64,128,256,512]};super(r)}get anchors(){return this.config.anchors}async locateFaces(r,l){const h=await this.detect(r,l);return h.map(d=>new Yt(d.score,d.relativeBox,{width:d.imageWidth,height:d.imageHeight}))}getDefaultModelName(){return"tiny_face_detector_model"}extractParamsFromWeigthMap(r){return super.extractParamsFromWeigthMap(r)}}const pt={ssdMobilenetv1:new nl,tinyFaceDetector:new Ku,tinyYolov2:new ju,faceLandmark68Net:new Gu,faceLandmark68TinyNet:new px,faceRecognitionNet:new Hu,faceExpressionNet:new cx,ageGenderNet:new dx},_D=(r,l)=>pt.ssdMobilenetv1.locateFaces(r,l),sQ=(r,l)=>pt.tinyFaceDetector.locateFaces(r,l),iQ=(r,l)=>pt.tinyYolov2.locateFaces(r,l),WD=r=>pt.faceLandmark68Net.detectLandmarks(r),rQ=r=>pt.faceLandmark68TinyNet.detectLandmarks(r),oQ=r=>pt.faceRecognitionNet.computeFaceDescriptor(r),aQ=r=>pt.faceExpressionNet.predictExpressions(r),cQ=r=>pt.ageGenderNet.predictAgeAndGender(r),$D=r=>pt.ssdMobilenetv1.load(r),lQ=r=>pt.tinyFaceDetector.load(r),hQ=r=>pt.tinyYolov2.load(r),uQ=r=>pt.faceLandmark68Net.load(r),dQ=r=>pt.faceLandmark68TinyNet.load(r),pQ=r=>pt.faceRecognitionNet.load(r),mQ=r=>pt.faceExpressionNet.load(r),fQ=r=>pt.ageGenderNet.load(r),gQ=$D,yQ=_D,bQ=WD;class UD extends Ti{constructor(r,l,h){super();this.parentTask=r;this.input=l;this.extractedFaces=h}}class Zu extends UD{async run(){const r=await this.parentTask,l=await La(r,this.input,async h=>await Promise.all(h.map(d=>pt.faceExpressionNet.predictExpressions(d))),this.extractedFaces);return r.map((h,d)=>gg(h,l[d]))}withAgeAndGender(){return new Xu(this,this.input)}}class Qu extends UD{async run(){const r=await this.parentTask;if(!r)return;const l=await rl(r,this.input,h=>pt.faceExpressionNet.predictExpressions(h),this.extractedFaces);return gg(r,l)}withAgeAndGender(){return new Ju(this,this.input)}}class cl extends Zu{withAgeAndGender(){return new ol(this,this.input)}withFaceDescriptors(){return new Sa(this,this.input)}}class ll extends Qu{withAgeAndGender(){return new al(this,this.input)}withFaceDescriptor(){return new Ia(this,this.input)}}class BD extends Ti{constructor(r,l,h){super();this.parentTask=r;this.input=l;this.extractedFaces=h}}class Xu extends BD{async run(){const r=await this.parentTask,l=await La(r,this.input,async h=>await Promise.all(h.map(d=>pt.ageGenderNet.predictAgeAndGender(d))),this.extractedFaces);return r.map((h,d)=>{const{age:f,gender:g,genderProbability:S}=l[d];return Sg(Ig(h,g,S),f)})}withFaceExpressions(){return new Zu(this,this.input)}}class Ju extends BD{async run(){const r=await this.parentTask;if(!r)return;const{age:l,gender:h,genderProbability:d}=await rl(r,this.input,f=>pt.ageGenderNet.predictAgeAndGender(f),this.extractedFaces);return Sg(Ig(r,h,d),l)}withFaceExpressions(){return new Qu(this,this.input)}}class ol extends Xu{withFaceExpressions(){return new cl(this,this.input)}withFaceDescriptors(){return new Sa(this,this.input)}}class al extends Ju{withFaceExpressions(){return new ll(this,this.input)}withFaceDescriptor(){return new Ia(this,this.input)}}class Sx extends Ti{constructor(r,l){super();this.parentTask=r;this.input=l}}class Sa extends Sx{async run(){const r=await this.parentTask,l=await La(r,this.input,h=>Promise.all(h.map(d=>pt.faceRecognitionNet.computeFaceDescriptor(d))),null,h=>h.landmarks.align(null,{useDlibAlignment:!0}));return l.map((h,d)=>Lg(r[d],h))}withFaceExpressions(){return new cl(this,this.input)}withAgeAndGender(){return new ol(this,this.input)}}class Ia extends Sx{async run(){const r=await this.parentTask;if(!r)return;const l=await rl(r,this.input,h=>pt.faceRecognitionNet.computeFaceDescriptor(h),null,h=>h.landmarks.align(null,{useDlibAlignment:!0}));return Lg(r,l)}withFaceExpressions(){return new ll(this,this.input)}withAgeAndGender(){return new al(this,this.input)}}const ed=Ye(Je());class Ix extends Ti{constructor(r,l,h){super();this.parentTask=r;this.input=l;this.useTinyLandmarkNet=h}get landmarkNet(){return this.useTinyLandmarkNet?pt.faceLandmark68TinyNet:pt.faceLandmark68Net}}class xx extends Ix{async run(){const r=await this.parentTask,l=r.map(f=>f.detection),h=this.input instanceof ed.Tensor?await qc(this.input,l):await Yc(this.input,l),d=await Promise.all(h.map(f=>this.landmarkNet.detectLandmarks(f)));return h.forEach(f=>f instanceof ed.Tensor&&f.dispose()),r.map((f,g)=>el(f,d[g]))}withFaceExpressions(){return new cl(this,this.input)}withAgeAndGender(){return new ol(this,this.input)}withFaceDescriptors(){return new Sa(this,this.input)}}class Tx extends Ix{async run(){const r=await this.parentTask;if(!r)return;const{detection:l}=r,h=this.input instanceof ed.Tensor?await qc(this.input,[l]):await Yc(this.input,[l]),d=await this.landmarkNet.detectLandmarks(h[0]);return h.forEach(f=>f instanceof ed.Tensor&&f.dispose()),el(r,d)}withFaceExpressions(){return new ll(this,this.input)}withAgeAndGender(){return new al(this,this.input)}withFaceDescriptor(){return new Ia(this,this.input)}}class Ax extends Ti{constructor(r,l=new Ii){super();this.input=r;this.options=l}}class Tg extends Ax{async run(){const{input:r,options:l}=this,h=l instanceof wx?d=>pt.tinyFaceDetector.locateFaces(d,l):l instanceof Ii?d=>pt.ssdMobilenetv1.locateFaces(d,l):l instanceof Cr?d=>pt.tinyYolov2.locateFaces(d,l):null;if(!h)throw new Error("detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options");return h(r)}runAndExtendWithFaceDetections(){return new Promise(async r=>{const l=await this.run();return r(l.map(h=>sa({},h)))})}withFaceLandmarks(r=!1){return new xx(this.runAndExtendWithFaceDetections(),this.input,r)}withFaceExpressions(){return new Zu(this.runAndExtendWithFaceDetections(),this.input)}withAgeAndGender(){return new Xu(this.runAndExtendWithFaceDetections(),this.input)}}class vx extends Ax{async run(){const r=await new Tg(this.input,this.options);let l=r[0];return r.forEach(h=>{h.score>l.score&&(l=h)}),l}runAndExtendWithFaceDetection(){return new Promise(async r=>{const l=await this.run();return r(l?sa({},l):void 0)})}withFaceLandmarks(r=!1){return new Tx(this.runAndExtendWithFaceDetection(),this.input,r)}withFaceExpressions(){return new Qu(this.runAndExtendWithFaceDetection(),this.input)}withAgeAndGender(){return new Ju(this.runAndExtendWithFaceDetection(),this.input)}}function wQ(r,l=new Ii){return new vx(r,l)}function Ag(r,l=new Ii){return new Tg(r,l)}async function MD(r,l){return console.warn("allFacesSsdMobilenetv1 is deprecated and will be removed soon, use the high level api instead"),await Ag(r,new Ii(l?{minConfidence:l}:{})).withFaceLandmarks().withFaceDescriptors()}async function LQ(r,l={}){return console.warn("allFacesTinyYolov2 is deprecated and will be removed soon, use the high level api instead"),await Ag(r,new Cr(l)).withFaceLandmarks().withFaceDescriptors()}const SQ=MD;function Nx(r,l){if(r.length!==l.length)throw new Error("euclideanDistance: arr1.length !== arr2.length");const h=Array.from(r),d=Array.from(l);return Math.sqrt(h.map((f,g)=>f-d[g]).reduce((f,g)=>f+Math.pow(g,2),0))}class PD{constructor(r,l=.6){this._distanceThreshold=l;const h=Array.isArray(r)?r:[r];if(!h.length)throw new Error("FaceRecognizer.constructor - expected atleast one input");let d=1;const f=()=>`person ${d++}`;this._labeledDescriptors=h.map(g=>{if(g instanceof na)return g;if(g instanceof Float32Array)return new na(f(),[g]);if(g.descriptor&&g.descriptor instanceof Float32Array)return new na(f(),[g.descriptor]);throw new Error("FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>")})}get labeledDescriptors(){return this._labeledDescriptors}get distanceThreshold(){return this._distanceThreshold}computeMeanDistance(r,l){return l.map(h=>Nx(h,r)).reduce((h,d)=>h+d,0)/(l.length||1)}matchDescriptor(r){return this.labeledDescriptors.map(({descriptors:l,label:h})=>new km(h,this.computeMeanDistance(r,l))).reduce((l,h)=>l.distancer.toJSON())}}static fromJSON(r){const l=r.labeledDescriptors.map(h=>na.fromJSON(h));return new PD(l,r.distanceThreshold)}}function IQ(r){const l=new Ku;return l.extractWeights(r),l}function zD(r,l){const{width:h,height:d}=new ms(l.width,l.height);if(h<=0||d<=0)throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({width:h,height:d})}`);if(Array.isArray(r))return r.map(f=>zD(f,{width:h,height:d}));if(ba(r)){const f=r.detection.forSize(h,d),g=r.unshiftedLandmarks.forSize(f.box.width,f.box.height);return el(sa(r,f),g)}return Vi(r)?sa(r,r.detection.forSize(h,d)):r instanceof qs||r instanceof Yt?r.forSize(h,d):r}var GD="0.8.1";vc(exports,{AgeGenderNet:()=>dx,BoundingBox:()=>wu,Box:()=>Ct,ComposableTask:()=>Ti,ComputeAllFaceDescriptorsTask:()=>Sa,ComputeFaceDescriptorsTaskBase:()=>Sx,ComputeSingleFaceDescriptorTask:()=>Ia,DetectAllFaceLandmarksTask:()=>xx,DetectAllFacesTask:()=>Tg,DetectFaceLandmarksTaskBase:()=>Ix,DetectFacesTaskBase:()=>Ax,DetectSingleFaceLandmarksTask:()=>Tx,DetectSingleFaceTask:()=>vx,Dimensions:()=>ms,FACE_EXPRESSION_LABELS:()=>ax,FaceDetection:()=>Yt,FaceDetectionNet:()=>QZ,FaceExpressionNet:()=>cx,FaceExpressions:()=>ya,FaceLandmark68Net:()=>Gu,FaceLandmark68TinyNet:()=>px,FaceLandmarkNet:()=>UZ,FaceLandmarks:()=>qs,FaceLandmarks5:()=>BX,FaceLandmarks68:()=>Iu,FaceMatch:()=>km,FaceMatcher:()=>PD,FaceRecognitionNet:()=>Hu,Gender:()=>Tr,LabeledBox:()=>Fm,LabeledFaceDescriptors:()=>na,NetInput:()=>po,NeuralNetwork:()=>kn,ObjectDetection:()=>Oc,Point:()=>Ze,PredictedBox:()=>MX,Rect:()=>Su,SsdMobilenetv1:()=>nl,SsdMobilenetv1Options:()=>Ii,TinyFaceDetector:()=>Ku,TinyFaceDetectorOptions:()=>wx,TinyYolov2:()=>ju,TinyYolov2Options:()=>Cr,TinyYolov2SizeType:()=>bx,allFaces:()=>SQ,allFacesSsdMobilenetv1:()=>MD,allFacesTinyYolov2:()=>LQ,awaitMediaLoaded:()=>QS,bufferToImage:()=>eI,computeFaceDescriptor:()=>oQ,createCanvas:()=>Dc,createCanvasFromMedia:()=>Tu,createFaceDetectionNet:()=>ZZ,createFaceRecognitionNet:()=>PZ,createSsdMobilenetv1:()=>xD,createTinyFaceDetector:()=>IQ,createTinyYolov2:()=>nQ,detectAllFaces:()=>Ag,detectFaceLandmarks:()=>WD,detectFaceLandmarksTiny:()=>rQ,detectLandmarks:()=>bQ,detectSingleFace:()=>wQ,draw:()=>hx,env:()=>gt,euclideanDistance:()=>Nx,extendWithAge:()=>Sg,extendWithFaceDescriptor:()=>Lg,extendWithFaceDetection:()=>sa,extendWithFaceExpressions:()=>gg,extendWithFaceLandmarks:()=>el,extendWithGender:()=>Ig,extractFaceTensors:()=>qc,extractFaces:()=>Yc,fetchImage:()=>OZ,fetchJson:()=>ix,fetchNetWeights:()=>EZ,fetchOrThrow:()=>fa,getContext2dOrThrow:()=>es,getMediaDimensions:()=>ra,imageTensorToCanvas:()=>tI,imageToSquare:()=>sx,inverseSigmoid:()=>_X,iou:()=>zS,isMediaElement:()=>Um,isMediaLoaded:()=>xu,isWithAge:()=>zZ,isWithFaceDetection:()=>Vi,isWithFaceExpressions:()=>lx,isWithFaceLandmarks:()=>ba,isWithGender:()=>GZ,loadAgeGenderModel:()=>fQ,loadFaceDetectionModel:()=>gQ,loadFaceExpressionModel:()=>mQ,loadFaceLandmarkModel:()=>uQ,loadFaceLandmarkTinyModel:()=>dQ,loadFaceRecognitionModel:()=>pQ,loadSsdMobilenetv1Model:()=>$D,loadTinyFaceDetectorModel:()=>lQ,loadTinyYolov2Model:()=>hQ,loadWeightMap:()=>rx,locateFaces:()=>yQ,matchDimensions:()=>DZ,minBbox:()=>GS,nets:()=>pt,nonMaxSuppression:()=>VS,normalize:()=>yi,padToSquare:()=>HS,predictAgeAndGender:()=>cQ,recognizeFaceExpressions:()=>aQ,resizeResults:()=>zD,resolveInput:()=>ia,shuffleArray:()=>FX,sigmoid:()=>Lu,ssdMobilenetv1:()=>_D,tf:()=>xQ,tinyFaceDetector:()=>sQ,tinyYolov2:()=>iQ,toNetInput:()=>Rt,utils:()=>US,validateConfig:()=>gx,version:()=>vQ});const xQ=Ye(Je()),TQ=typeof process!="undefined",AQ=typeof navigator!="undefined"&&typeof navigator.userAgent!="undefined",vQ={faceapi:GD,node:TQ,browser:AQ}; /** * @license * Copyright 2017 Google LLC. All Rights Reserved. diff --git a/dist/face-api.cjs.map b/dist/face-api.cjs.map index 3bc7f40..61ce4c3 100644 --- a/dist/face-api.cjs.map +++ b/dist/face-api.cjs.map @@ -1,7 +1,7 @@ { "version": 3, "sources": ["node_modules/node-fetch/lib/index.mjs", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/backend.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/environment.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/global_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/kernel_names.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/kernel_registry.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/profiler.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor_format.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/types.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/engine.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/device_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/flags.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor_util_env.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/operation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/complex.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor_ops_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/types.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/io_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/router_registry.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/indexed_db.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/local_storage.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/model_management.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/platforms/platform_browser.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/platforms/platform_node.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/buffer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cast.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/clone.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/print.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/base_side_effects.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/browser_files.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/progress.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/weights_loader.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/http.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/passthrough.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/io.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mat_mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/one_hot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/confusion_matrix.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/math.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/browser.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/serialization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/test_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/globals.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/add.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/floorDiv.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/abs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/acos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/acosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/add_n.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/axis_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/all.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/any.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/arg_max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/arg_min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/asin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/asinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/atan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/atan2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/atanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/basic_lstm_cell.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batch_to_space_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_to.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ceil.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/clip_by_value.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_input.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_input.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cumsum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depth_to_space.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/diag.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dilation2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/where.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/zeros_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/div_no_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/elu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/erf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/expand_dims.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/expm1.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tile.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/eye.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fill.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/floor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reduce_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/segment_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/gather.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/greater.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/greater_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/imag.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/is_finite.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/is_inf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/is_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/maximum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/scalar.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/leaky_relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/less.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/less_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linspace.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log1p.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/neg.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/softplus.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log_sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sub.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log_softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log_sum_exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_and.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_not.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_or.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_xor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_with_argmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/zeros.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ones.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mean.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/minimum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/moments.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/multi_rnn_cell.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/multinomial.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/not_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/real.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ones_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/outer_product.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/space_to_batch_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pow.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/prelu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/prod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rand.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/alea.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xor128.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xorwow.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xorshift7.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xor4096.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/tychei.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/seedrandom.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rand_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/random_gamma.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/random_normal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/random_uniform.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/range.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reciprocal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/relu6.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/round.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rsqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/selu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/separable_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/setdiff1d_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sign.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/fft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/ifft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/irfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/split_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/split.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/rfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/squared_difference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/squeeze.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/stack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/step.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/strided_slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor5d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor6d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/topk.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/truncated_normal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/unsorted_segment_sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/unstack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/variable.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/where_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/where_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/boolean_mask.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/compare.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/binary_ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/norm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/moving_average.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sparse_to_dense_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sparse_to_dense.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dropout_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dropout.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal_ops_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/in_top_k.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_filter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused/conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_filter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_input.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused/depthwise_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused/mat_mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused_ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/hamming_window.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/hann_window.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/frame.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/stft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/crop_and_resize.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/flip_left_right.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/rotate_with_offset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/nonmax_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/array_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/non_max_suppression_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_with_score.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_with_score_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_padded.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_padded_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/resize_bilinear.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/resize_nearest_neighbor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/band_part.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/gram_schmidt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/qr.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/loss_ops_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/compute_weighted_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/absolute_difference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/cosine_distance.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/hinge_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/huber_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/log_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/mean_squared_error.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/sigmoid_cross_entropy.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/softmax_cross_entropy.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adadelta_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adagrad_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adam_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adamax_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/sgd_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/momentum_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/rmsprop_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/optimizer_constructors.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/train.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/browser_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rotate_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/array_ops_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/selu_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/erf_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/complex_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/backend_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/split_shared.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/tile_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/topk_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/kernel_impls.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/base.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Abs_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Acos_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Acosh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Add_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/AddN_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ArgMax_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ArgMin_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Asin_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Asinh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Atan2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Atan_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Atanh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool3D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/BatchMatMul_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/BatchToSpaceND_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/BroadcastTo_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cast_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Ceil_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ClipByValue_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Concat_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2DBackpropInput_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_filter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv3D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cos_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cosh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cumsum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/DepthwiseConv2dNative_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Dilation2D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Div_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Elu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Erf_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Exp_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Expm1_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Floor_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/FloorDiv_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/FusedBatchNorm_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/GatherV2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/GreaterEqual_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Identity_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/IsFinite_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/IsInf_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/IsNan_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Log1p_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Log_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/LogSoftmax_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/LRN_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/min_max_grad_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Max_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Maximum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool3D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Min_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Minimum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Mod_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Multiply_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Negate_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/OneHot_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/OnesLike_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/PadV2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Pow_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Prelu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Reciprocal_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Relu6_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Relu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Reshape_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ResizeBilinear_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ResizeNearestNeighbor_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Reverse_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Round_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Rsqrt_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SelectV2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Selu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sigmoid_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sign_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sin_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sinh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Slice_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Softmax_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Softplus_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SpaceToBatchND_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SplitV_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sqrt_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Square_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SquaredDifference_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Step_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sub_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Tan_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Tanh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Tile_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Transpose_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Unpack_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/UnsortedSegmentSum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ZerosLike_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/register_all_gradients.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/abs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/acos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/acosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/add_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/add.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/all.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/any.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/arg_max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/arg_min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as_scalar.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as_type.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as5d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/asin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/asinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atan2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/avg_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/batch_to_space_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/batchnorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/broadcast_to.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cast.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ceil.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/clip_by_value.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/concat.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv2d_transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cumsum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depth_to_space.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depthwise_conv2D_deprecated.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depthwise_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/dilation2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div_no_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/dot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/elu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/erf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/expand_dims.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/expm1.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/fft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/flatten.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/floor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/floorDiv.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/gather.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ifft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/irfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_finite.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_inf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/leaky_relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/local_response_normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_sum_exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log1p.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_and.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_not.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_or.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_xor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mat_mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/max_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/maximum_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/maximum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mean.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/minimum_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/minimum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mod_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mul_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/neg.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/norm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/not_equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/not_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/one_hot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ones_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pow_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pow.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/prelu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/prod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reciprocal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/relu6.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reshape_as.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/resize_bilinear.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/resize_nearest_neighbor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reverse.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/rfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/round.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/rsqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/selu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/separable_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sign.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/softplus.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/space_to_batch_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/split.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squared_difference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squared_difference_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squeeze.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/stack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/step.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/strided_slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sub_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sub.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tile.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_bool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_float.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_int.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/topk.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unsorted_segment_sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unstack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/where.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/zeros_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/register_all_chained_ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/backend/common.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/errors.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/generic_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/constraints.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_constraints.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/keras_format/common.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/common.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/math_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/backend/tfjs_backend.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/keras_format/initializer_config.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/initializers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_initializers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/backend/state.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/types_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/variable_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/variables.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/topology.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/input_layer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/logs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/base_callbacks.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/serialization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/losses.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/metrics.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/optimizers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/user_defined_metadata.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/layer_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/serialization_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/container.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training_dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training_tensors.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/models.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/activations.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/regularizers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/advanced_activations.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/conv_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/recurrent.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional_recurrent.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/core.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/embeddings.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/merge.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/noise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/padding.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/pooling.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/wrappers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_layers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_metrics.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_models.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_regularizers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/callbacks.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/data/compiled_api.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/custom_op/register.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/arithmetic.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/basic_math.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/control.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/convolution.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/creation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/dynamic.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/evaluation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/graph.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/image.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/logical.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/matrices.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/reduction.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/slice_join.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/spectral.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/transformation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/operation_mapper.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/custom_op/node_value_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ops_for_converter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/arithmetic_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/basic_math_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_array.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_list.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/control_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/convolution_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/creation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/dynamic_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/evaluation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/graph_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/image_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/logical_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/matrices_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/normalization_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/reduction_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/slice_join_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/spectral_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/transformation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/operation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/execution_context.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/model_analysis.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/graph_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/graph_model.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/deep_map.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/deep_clone.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/ring_buffer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/growing_ring_buffer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/lazy_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/datasets/text_line_dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/datasets/csv_dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/microphone_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/webcam_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/datasource.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/string_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/byte_chunk_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/file_chunk_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/url_chunk_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/source_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/sources/file_data_source.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/sources/url_data_source.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/readers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/cpu_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/backend_cpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Abs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/binary_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Complex.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Identity.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Real.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cast.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/kernel_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Add.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/unary_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/unary_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Ceil.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Expm1.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Floor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Max_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Multiply.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Rsqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sub.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Transpose_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Unique_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/shared.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/base.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Acos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Acosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Asin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Asinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Atan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Atanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/pool_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/AvgPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/AvgPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/BatchNorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Clip.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Imag.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Concat.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2D.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2DBackpropFilter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2DBackpropInput.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Elu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Erf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/fft_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FFT.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FlipLeftRight.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IFFT.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsFinite.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsInf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsNaN.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Log1p.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/LogicalNot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolWithArgmax_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolWithArgmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NonMaxSuppressionV4.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NonMaxSuppressionV5.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NotEqual.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/PadV2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Reciprocal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/RotateWithOffset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Round.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Selu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sign.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Softplus.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/SpaceToBatchND.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/SquaredDifference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Step.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Tanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/register_all_kernels.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/canvas_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/tex_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/webgl_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/flags_webgl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/shared.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/addn_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/addn_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/argminmax_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/packing_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/glsl_version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/shader_compiler_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/shader_compiler.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/argminmax_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/avg_pool_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_complex_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/clip_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/clip_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/complex_abs_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/concat_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/concat_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_backprop_gpu_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_gpu_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_packed_gpu_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/crop_and_resize_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/cumsum_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/decode_matrix_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/decode_matrix_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/depth_to_space_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/diag_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_float_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_float_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_matrix_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_matrix_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/fft_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/fill_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gather_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gather_nd_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_context.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_math.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/im2col_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_grad_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/max_pool_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/mulmat_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/multinomial_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/onehot_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pack_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pad_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pad_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pool_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reduce_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reshape_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_nearest_neighbor_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_nearest_neighbor_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reverse_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reverse_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/scatter_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/segment_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/select_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/slice_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/slice_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/strided_slice_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/texture_manager.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/tile_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/unaryop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/unaryop_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/unpack_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/backend_webgl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/webgl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/base.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/kernel_funcs_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Atan2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Identity.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/AvgPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/AvgPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/batchnorm_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/batchnorm_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/BatchNorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/flip_left_right_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FlipLeftRight.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels_utils/from_pixels_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels_utils/from_pixels_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/reduce.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Max_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/transpose_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/transpose_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Transpose_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolWithArgmax_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolWithArgmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV3.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV4.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV5.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/rotate_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/RotateWithOffset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/SquaredDifference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/register_all_kernels.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../src/version.ts", "node_modules/@tensorflow/tfjs/dist/../src/index.ts", "src/env/isNodejs.ts", "src/draw/drawContour.ts", "src/classes/Dimensions.ts", "src/utils/index.ts", "src/classes/Point.ts", "src/classes/Box.ts", "src/classes/BoundingBox.ts", "src/classes/ObjectDetection.ts", "src/classes/FaceDetection.ts", "src/ops/iou.ts", "src/ops/minBbox.ts", "src/ops/nonMaxSuppression.ts", "src/ops/normalize.ts", "src/ops/padToSquare.ts", "src/ops/shuffleArray.ts", "src/ops/index.ts", "src/classes/Rect.ts", "src/classes/FaceLandmarks.ts", "src/classes/FaceLandmarks5.ts", "src/classes/FaceLandmarks68.ts", "src/classes/FaceMatch.ts", "src/classes/LabeledBox.ts", "src/classes/LabeledFaceDescriptors.ts", "src/classes/PredictedBox.ts", "src/factories/WithFaceDetection.ts", "src/env/createBrowserEnv.ts", "src/env/createFileSystem.ts", "src/env/createNodejsEnv.ts", "src/env/isBrowser.ts", "src/env/index.ts", "src/dom/resolveInput.ts", "src/dom/getContext2dOrThrow.ts", "src/draw/DrawTextField.ts", "src/draw/DrawBox.ts", "src/draw/drawDetections.ts", "src/dom/isMediaLoaded.ts", "src/dom/awaitMediaLoaded.ts", "src/dom/bufferToImage.ts", "src/dom/getMediaDimensions.ts", "src/dom/createCanvas.ts", "src/dom/imageTensorToCanvas.ts", "src/dom/isMediaElement.ts", "node_modules/@tensorflow/tfjs-core/dist/backends/../../src/backends/backend.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/environment.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/global_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/kernel_names.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/kernel_registry.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/profiler.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tape.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor_format.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/types.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/engine.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/device_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/flags.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor_util_env.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/operation.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/complex.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor_ops_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/buffer.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cast.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/clone.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/print.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/base_side_effects.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/reshape.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/mat_mul.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/transpose.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor3d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/browser.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/slice_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/globals.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/add.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/floorDiv.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/div.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/mul.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/abs.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/axis_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/concat_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/concat.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sigmoid.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/slice.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/batch_to_space_nd.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/broadcast_to.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv2d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv2d_backprop_input.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv3d_backprop_input.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cos.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cosh.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cumsum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/broadcast_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/where.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/zeros_like.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/exp.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/expand_dims.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tile.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/eye.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/fill.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/floor.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/segment_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/gather.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/greater.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/greater_equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/imag.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/maximum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/scalar.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/less.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/less_equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/log.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/log1p.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/gradients.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/neg.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/max.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sub.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/log_sum_exp.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/logical_and.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/logical_not.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/zeros.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/ones.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/mean.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/min.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/minimum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/square.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/not_equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/real.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/pad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/space_to_batch_nd.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/pow.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor1d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/range.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/relu.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/reverse.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/rsqrt.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sin.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sinh.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/fft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/ifft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/irfft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/split_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/split.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/rfft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sqrt.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/squared_difference.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/squeeze.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/stack.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/step.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor2d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/unsorted_segment_sum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/unstack.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/norm.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/signal_ops_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv2d_backprop_filter.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/depthwise_conv2d_native_backprop_filter.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/depthwise_conv2d_native_backprop_input.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/hamming_window.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/hann_window.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/frame.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/stft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/crop_and_resize.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/flip_left_right.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/rotate_with_offset.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/nonmax_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression.ts", "node_modules/@tensorflow/tfjs-core/dist/backends/../../src/backends/array_util.ts", "node_modules/@tensorflow/tfjs-core/dist/backends/../../src/backends/non_max_suppression_impl.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_async.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_with_score.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_with_score_async.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_padded.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_padded_async.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/resize_bilinear.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/resize_nearest_neighbor.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/linalg/../../../src/ops/linalg/band_part.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/linalg/../../../src/ops/linalg/gram_schmidt.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/linalg/../../../src/ops/linalg/qr.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/loss_ops_utils.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/compute_weighted_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/absolute_difference.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/cosine_distance.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/hinge_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/huber_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/log_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/mean_squared_error.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/sigmoid_cross_entropy.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/softmax_cross_entropy.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/ops.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/selu_util.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Abs_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Acos_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Acosh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Add_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/AddN_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ArgMax_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ArgMin_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Asin_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Asinh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Atan2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Atan_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Atanh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/avg_pool_3d_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/AvgPool3D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/avg_pool_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/AvgPool_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/BatchMatMul_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/BatchToSpaceND_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/BroadcastTo_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cast_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Ceil_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ClipByValue_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Concat_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Conv2D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Conv2DBackpropInput_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv3d_backprop_filter.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Conv3D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cos_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cosh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cumsum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/DepthwiseConv2dNative_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Dilation2D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Div_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Elu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Erf_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Exp_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Expm1_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Floor_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/FloorDiv_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/FusedBatchNorm_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/GatherV2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/GreaterEqual_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Identity_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/IsFinite_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/IsInf_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/IsNan_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Log1p_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Log_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/LogSoftmax_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/local_response_normalization_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/LRN_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/min_max_grad_util.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Max_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Maximum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/max_pool_3d_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/MaxPool3D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/max_pool_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/MaxPool_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Min_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Minimum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Mod_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Multiply_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Negate_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/OneHot_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/OnesLike_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/PadV2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Pow_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Prelu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Reciprocal_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Relu6_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Relu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Reshape_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ResizeBilinear_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ResizeNearestNeighbor_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Reverse_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Round_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Rsqrt_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SelectV2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Selu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sigmoid_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sign_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sin_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sinh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Slice_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Softmax_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Softplus_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SpaceToBatchND_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SplitV_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sqrt_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Square_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SquaredDifference_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Step_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sub_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Tan_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Tanh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Tile_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Transpose_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Unpack_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/UnsortedSegmentSum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ZerosLike_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/register_all_gradients.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/index.ts", "src/dom/imageToSquare.ts", "src/dom/NetInput.ts", "src/dom/toNetInput.ts", "src/dom/extractFaces.ts", "src/dom/extractFaceTensors.ts", "src/dom/fetchOrThrow.ts", "src/dom/fetchImage.ts", "src/dom/fetchJson.ts", "src/dom/fetchNetWeights.ts", "src/common/getModelUris.ts", "src/dom/loadWeightMap.ts", "src/dom/matchDimensions.ts", "src/NeuralNetwork.ts", "src/common/depthwiseSeparableConv.ts", "src/faceFeatureExtractor/denseBlock.ts", "src/common/convLayer.ts", "src/common/disposeUnusedWeightTensors.ts", "src/common/extractConvParamsFactory.ts", "src/common/extractFCParamsFactory.ts", "src/common/types.ts", "src/common/extractSeparableConvParamsFactory.ts", "src/common/extractWeightEntryFactory.ts", "src/common/extractWeightsFactory.ts", "src/faceFeatureExtractor/extractorsFactory.ts", "src/faceFeatureExtractor/extractParams.ts", "src/common/loadConvParamsFactory.ts", "src/faceFeatureExtractor/loadParamsFactory.ts", "src/faceFeatureExtractor/extractParamsFromWeigthMap.ts", "src/faceFeatureExtractor/FaceFeatureExtractor.ts", "src/common/fullyConnectedLayer.ts", "src/faceProcessor/extractParams.ts", "src/faceProcessor/extractParamsFromWeigthMap.ts", "src/faceProcessor/util.ts", "src/faceProcessor/FaceProcessor.ts", "src/faceExpressionNet/FaceExpressions.ts", "src/faceExpressionNet/FaceExpressionNet.ts", "src/factories/WithFaceExpressions.ts", "src/draw/drawFaceExpressions.ts", "src/factories/WithFaceLandmarks.ts", "src/draw/DrawFaceLandmarks.ts", "src/draw/index.ts", "src/xception/extractParams.ts", "src/xception/extractParamsFromWeigthMap.ts", "src/xception/TinyXception.ts", "src/ageGenderNet/extractParams.ts", "src/ageGenderNet/extractParamsFromWeigthMap.ts", "src/ageGenderNet/types.ts", "src/ageGenderNet/AgeGenderNet.ts", "src/faceLandmarkNet/FaceLandmark68NetBase.ts", "src/faceLandmarkNet/FaceLandmark68Net.ts", "src/faceFeatureExtractor/extractParamsFromWeigthMapTiny.ts", "src/faceFeatureExtractor/extractParamsTiny.ts", "src/faceFeatureExtractor/TinyFaceFeatureExtractor.ts", "src/faceLandmarkNet/FaceLandmark68TinyNet.ts", "src/faceLandmarkNet/index.ts", "src/faceRecognitionNet/scaleLayer.ts", "src/faceRecognitionNet/convLayer.ts", "src/faceRecognitionNet/extractParams.ts", "src/faceRecognitionNet/extractParamsFromWeigthMap.ts", "src/faceRecognitionNet/residualLayer.ts", "src/faceRecognitionNet/FaceRecognitionNet.ts", "src/faceRecognitionNet/index.ts", "src/factories/WithFaceDescriptor.ts", "src/factories/WithAge.ts", "src/factories/WithGender.ts", "src/ssdMobilenetv1/extractParams.ts", "src/ssdMobilenetv1/extractParamsFromWeigthMap.ts", "src/ssdMobilenetv1/pointwiseConvLayer.ts", "src/ssdMobilenetv1/mobileNetV1.ts", "src/ssdMobilenetv1/nonMaxSuppression.ts", "src/ssdMobilenetv1/outputLayer.ts", "src/ssdMobilenetv1/boxPredictionLayer.ts", "src/ssdMobilenetv1/predictionLayer.ts", "src/ssdMobilenetv1/SsdMobilenetv1Options.ts", "src/ssdMobilenetv1/SsdMobilenetv1.ts", "src/ssdMobilenetv1/index.ts", "src/tinyYolov2/const.ts", "src/tinyYolov2/config.ts", "src/tinyYolov2/leaky.ts", "src/tinyYolov2/convWithBatchNorm.ts", "src/tinyYolov2/depthwiseSeparableConv.ts", "src/tinyYolov2/extractParams.ts", "src/tinyYolov2/extractParamsFromWeigthMap.ts", "src/tinyYolov2/TinyYolov2Options.ts", "src/tinyYolov2/TinyYolov2Base.ts", "src/tinyYolov2/TinyYolov2.ts", "src/tinyYolov2/index.ts", "src/tinyFaceDetector/TinyFaceDetectorOptions.ts", "src/globalApi/ComposableTask.ts", "src/globalApi/extractFacesAndComputeResults.ts", "src/tinyFaceDetector/const.ts", "src/tinyFaceDetector/TinyFaceDetector.ts", "src/globalApi/nets.ts", "src/globalApi/PredictFaceExpressionsTask.ts", "src/globalApi/PredictAgeAndGenderTask.ts", "src/globalApi/ComputeFaceDescriptorsTasks.ts", "src/globalApi/DetectFaceLandmarksTasks.ts", "src/globalApi/DetectFacesTasks.ts", "src/globalApi/detectFaces.ts", "src/globalApi/allFaces.ts", "src/euclideanDistance.ts", "src/globalApi/FaceMatcher.ts", "src/tinyFaceDetector/index.ts", "src/resizeResults.ts", "src/index.ts"], - "sourcesContent": ["import Stream from 'stream';\nimport http from 'http';\nimport Url from 'url';\nimport https from 'https';\nimport zlib from 'zlib';\n\n// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js\n\n// fix for \"Readable\" isn't a named export issue\nconst Readable = Stream.Readable;\n\nconst BUFFER = Symbol('buffer');\nconst TYPE = Symbol('type');\n\nclass Blob {\n\tconstructor() {\n\t\tthis[TYPE] = '';\n\n\t\tconst blobParts = arguments[0];\n\t\tconst options = arguments[1];\n\n\t\tconst buffers = [];\n\t\tlet size = 0;\n\n\t\tif (blobParts) {\n\t\t\tconst a = blobParts;\n\t\t\tconst length = Number(a.length);\n\t\t\tfor (let i = 0; i < length; i++) {\n\t\t\t\tconst element = a[i];\n\t\t\t\tlet buffer;\n\t\t\t\tif (element instanceof Buffer) {\n\t\t\t\t\tbuffer = element;\n\t\t\t\t} else if (ArrayBuffer.isView(element)) {\n\t\t\t\t\tbuffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength);\n\t\t\t\t} else if (element instanceof ArrayBuffer) {\n\t\t\t\t\tbuffer = Buffer.from(element);\n\t\t\t\t} else if (element instanceof Blob) {\n\t\t\t\t\tbuffer = element[BUFFER];\n\t\t\t\t} else {\n\t\t\t\t\tbuffer = Buffer.from(typeof element === 'string' ? element : String(element));\n\t\t\t\t}\n\t\t\t\tsize += buffer.length;\n\t\t\t\tbuffers.push(buffer);\n\t\t\t}\n\t\t}\n\n\t\tthis[BUFFER] = Buffer.concat(buffers);\n\n\t\tlet type = options && options.type !== undefined && String(options.type).toLowerCase();\n\t\tif (type && !/[^\\u0020-\\u007E]/.test(type)) {\n\t\t\tthis[TYPE] = type;\n\t\t}\n\t}\n\tget size() {\n\t\treturn this[BUFFER].length;\n\t}\n\tget type() {\n\t\treturn this[TYPE];\n\t}\n\ttext() {\n\t\treturn Promise.resolve(this[BUFFER].toString());\n\t}\n\tarrayBuffer() {\n\t\tconst buf = this[BUFFER];\n\t\tconst ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);\n\t\treturn Promise.resolve(ab);\n\t}\n\tstream() {\n\t\tconst readable = new Readable();\n\t\treadable._read = function () {};\n\t\treadable.push(this[BUFFER]);\n\t\treadable.push(null);\n\t\treturn readable;\n\t}\n\ttoString() {\n\t\treturn '[object Blob]';\n\t}\n\tslice() {\n\t\tconst size = this.size;\n\n\t\tconst start = arguments[0];\n\t\tconst end = arguments[1];\n\t\tlet relativeStart, relativeEnd;\n\t\tif (start === undefined) {\n\t\t\trelativeStart = 0;\n\t\t} else if (start < 0) {\n\t\t\trelativeStart = Math.max(size + start, 0);\n\t\t} else {\n\t\t\trelativeStart = Math.min(start, size);\n\t\t}\n\t\tif (end === undefined) {\n\t\t\trelativeEnd = size;\n\t\t} else if (end < 0) {\n\t\t\trelativeEnd = Math.max(size + end, 0);\n\t\t} else {\n\t\t\trelativeEnd = Math.min(end, size);\n\t\t}\n\t\tconst span = Math.max(relativeEnd - relativeStart, 0);\n\n\t\tconst buffer = this[BUFFER];\n\t\tconst slicedBuffer = buffer.slice(relativeStart, relativeStart + span);\n\t\tconst blob = new Blob([], { type: arguments[2] });\n\t\tblob[BUFFER] = slicedBuffer;\n\t\treturn blob;\n\t}\n}\n\nObject.defineProperties(Blob.prototype, {\n\tsize: { enumerable: true },\n\ttype: { enumerable: true },\n\tslice: { enumerable: true }\n});\n\nObject.defineProperty(Blob.prototype, Symbol.toStringTag, {\n\tvalue: 'Blob',\n\twritable: false,\n\tenumerable: false,\n\tconfigurable: true\n});\n\n/**\n * fetch-error.js\n *\n * FetchError interface for operational errors\n */\n\n/**\n * Create FetchError instance\n *\n * @param String message Error message for human\n * @param String type Error type for machine\n * @param String systemError For Node.js system error\n * @return FetchError\n */\nfunction FetchError(message, type, systemError) {\n Error.call(this, message);\n\n this.message = message;\n this.type = type;\n\n // when err.type is `system`, err.code contains system error code\n if (systemError) {\n this.code = this.errno = systemError.code;\n }\n\n // hide custom error implementation details from end-users\n Error.captureStackTrace(this, this.constructor);\n}\n\nFetchError.prototype = Object.create(Error.prototype);\nFetchError.prototype.constructor = FetchError;\nFetchError.prototype.name = 'FetchError';\n\nlet convert;\ntry {\n\tconvert = require('encoding').convert;\n} catch (e) {}\n\nconst INTERNALS = Symbol('Body internals');\n\n// fix an issue where \"PassThrough\" isn't a named export for node <10\nconst PassThrough = Stream.PassThrough;\n\n/**\n * Body mixin\n *\n * Ref: https://fetch.spec.whatwg.org/#body\n *\n * @param Stream body Readable stream\n * @param Object opts Response options\n * @return Void\n */\nfunction Body(body) {\n\tvar _this = this;\n\n\tvar _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {},\n\t _ref$size = _ref.size;\n\n\tlet size = _ref$size === undefined ? 0 : _ref$size;\n\tvar _ref$timeout = _ref.timeout;\n\tlet timeout = _ref$timeout === undefined ? 0 : _ref$timeout;\n\n\tif (body == null) {\n\t\t// body is undefined or null\n\t\tbody = null;\n\t} else if (isURLSearchParams(body)) {\n\t\t// body is a URLSearchParams\n\t\tbody = Buffer.from(body.toString());\n\t} else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') {\n\t\t// body is ArrayBuffer\n\t\tbody = Buffer.from(body);\n\t} else if (ArrayBuffer.isView(body)) {\n\t\t// body is ArrayBufferView\n\t\tbody = Buffer.from(body.buffer, body.byteOffset, body.byteLength);\n\t} else if (body instanceof Stream) ; else {\n\t\t// none of the above\n\t\t// coerce to string then buffer\n\t\tbody = Buffer.from(String(body));\n\t}\n\tthis[INTERNALS] = {\n\t\tbody,\n\t\tdisturbed: false,\n\t\terror: null\n\t};\n\tthis.size = size;\n\tthis.timeout = timeout;\n\n\tif (body instanceof Stream) {\n\t\tbody.on('error', function (err) {\n\t\t\tconst error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err);\n\t\t\t_this[INTERNALS].error = error;\n\t\t});\n\t}\n}\n\nBody.prototype = {\n\tget body() {\n\t\treturn this[INTERNALS].body;\n\t},\n\n\tget bodyUsed() {\n\t\treturn this[INTERNALS].disturbed;\n\t},\n\n\t/**\n * Decode response as ArrayBuffer\n *\n * @return Promise\n */\n\tarrayBuffer() {\n\t\treturn consumeBody.call(this).then(function (buf) {\n\t\t\treturn buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);\n\t\t});\n\t},\n\n\t/**\n * Return raw response as Blob\n *\n * @return Promise\n */\n\tblob() {\n\t\tlet ct = this.headers && this.headers.get('content-type') || '';\n\t\treturn consumeBody.call(this).then(function (buf) {\n\t\t\treturn Object.assign(\n\t\t\t// Prevent copying\n\t\t\tnew Blob([], {\n\t\t\t\ttype: ct.toLowerCase()\n\t\t\t}), {\n\t\t\t\t[BUFFER]: buf\n\t\t\t});\n\t\t});\n\t},\n\n\t/**\n * Decode response as json\n *\n * @return Promise\n */\n\tjson() {\n\t\tvar _this2 = this;\n\n\t\treturn consumeBody.call(this).then(function (buffer) {\n\t\t\ttry {\n\t\t\t\treturn JSON.parse(buffer.toString());\n\t\t\t} catch (err) {\n\t\t\t\treturn Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json'));\n\t\t\t}\n\t\t});\n\t},\n\n\t/**\n * Decode response as text\n *\n * @return Promise\n */\n\ttext() {\n\t\treturn consumeBody.call(this).then(function (buffer) {\n\t\t\treturn buffer.toString();\n\t\t});\n\t},\n\n\t/**\n * Decode response as buffer (non-spec api)\n *\n * @return Promise\n */\n\tbuffer() {\n\t\treturn consumeBody.call(this);\n\t},\n\n\t/**\n * Decode response as text, while automatically detecting the encoding and\n * trying to decode to UTF-8 (non-spec api)\n *\n * @return Promise\n */\n\ttextConverted() {\n\t\tvar _this3 = this;\n\n\t\treturn consumeBody.call(this).then(function (buffer) {\n\t\t\treturn convertBody(buffer, _this3.headers);\n\t\t});\n\t}\n};\n\n// In browsers, all properties are enumerable.\nObject.defineProperties(Body.prototype, {\n\tbody: { enumerable: true },\n\tbodyUsed: { enumerable: true },\n\tarrayBuffer: { enumerable: true },\n\tblob: { enumerable: true },\n\tjson: { enumerable: true },\n\ttext: { enumerable: true }\n});\n\nBody.mixIn = function (proto) {\n\tfor (const name of Object.getOwnPropertyNames(Body.prototype)) {\n\t\t// istanbul ignore else: future proof\n\t\tif (!(name in proto)) {\n\t\t\tconst desc = Object.getOwnPropertyDescriptor(Body.prototype, name);\n\t\t\tObject.defineProperty(proto, name, desc);\n\t\t}\n\t}\n};\n\n/**\n * Consume and convert an entire Body to a Buffer.\n *\n * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body\n *\n * @return Promise\n */\nfunction consumeBody() {\n\tvar _this4 = this;\n\n\tif (this[INTERNALS].disturbed) {\n\t\treturn Body.Promise.reject(new TypeError(`body used already for: ${this.url}`));\n\t}\n\n\tthis[INTERNALS].disturbed = true;\n\n\tif (this[INTERNALS].error) {\n\t\treturn Body.Promise.reject(this[INTERNALS].error);\n\t}\n\n\tlet body = this.body;\n\n\t// body is null\n\tif (body === null) {\n\t\treturn Body.Promise.resolve(Buffer.alloc(0));\n\t}\n\n\t// body is blob\n\tif (isBlob(body)) {\n\t\tbody = body.stream();\n\t}\n\n\t// body is buffer\n\tif (Buffer.isBuffer(body)) {\n\t\treturn Body.Promise.resolve(body);\n\t}\n\n\t// istanbul ignore if: should never happen\n\tif (!(body instanceof Stream)) {\n\t\treturn Body.Promise.resolve(Buffer.alloc(0));\n\t}\n\n\t// body is stream\n\t// get ready to actually consume the body\n\tlet accum = [];\n\tlet accumBytes = 0;\n\tlet abort = false;\n\n\treturn new Body.Promise(function (resolve, reject) {\n\t\tlet resTimeout;\n\n\t\t// allow timeout on slow response body\n\t\tif (_this4.timeout) {\n\t\t\tresTimeout = setTimeout(function () {\n\t\t\t\tabort = true;\n\t\t\t\treject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout'));\n\t\t\t}, _this4.timeout);\n\t\t}\n\n\t\t// handle stream errors\n\t\tbody.on('error', function (err) {\n\t\t\tif (err.name === 'AbortError') {\n\t\t\t\t// if the request was aborted, reject with this Error\n\t\t\t\tabort = true;\n\t\t\t\treject(err);\n\t\t\t} else {\n\t\t\t\t// other errors, such as incorrect content-encoding\n\t\t\t\treject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err));\n\t\t\t}\n\t\t});\n\n\t\tbody.on('data', function (chunk) {\n\t\t\tif (abort || chunk === null) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tif (_this4.size && accumBytes + chunk.length > _this4.size) {\n\t\t\t\tabort = true;\n\t\t\t\treject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size'));\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\taccumBytes += chunk.length;\n\t\t\taccum.push(chunk);\n\t\t});\n\n\t\tbody.on('end', function () {\n\t\t\tif (abort) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tclearTimeout(resTimeout);\n\n\t\t\ttry {\n\t\t\t\tresolve(Buffer.concat(accum, accumBytes));\n\t\t\t} catch (err) {\n\t\t\t\t// handle streams that have accumulated too much data (issue #414)\n\t\t\t\treject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err));\n\t\t\t}\n\t\t});\n\t});\n}\n\n/**\n * Detect buffer encoding and convert to target encoding\n * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding\n *\n * @param Buffer buffer Incoming buffer\n * @param String encoding Target encoding\n * @return String\n */\nfunction convertBody(buffer, headers) {\n\tif (typeof convert !== 'function') {\n\t\tthrow new Error('The package `encoding` must be installed to use the textConverted() function');\n\t}\n\n\tconst ct = headers.get('content-type');\n\tlet charset = 'utf-8';\n\tlet res, str;\n\n\t// header\n\tif (ct) {\n\t\tres = /charset=([^;]*)/i.exec(ct);\n\t}\n\n\t// no charset in content type, peek at response body for at most 1024 bytes\n\tstr = buffer.slice(0, 1024).toString();\n\n\t// html5\n\tif (!res && str) {\n\t\tres = / 0 && arguments[0] !== undefined ? arguments[0] : undefined;\n\n\t\tthis[MAP] = Object.create(null);\n\n\t\tif (init instanceof Headers) {\n\t\t\tconst rawHeaders = init.raw();\n\t\t\tconst headerNames = Object.keys(rawHeaders);\n\n\t\t\tfor (const headerName of headerNames) {\n\t\t\t\tfor (const value of rawHeaders[headerName]) {\n\t\t\t\t\tthis.append(headerName, value);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn;\n\t\t}\n\n\t\t// We don't worry about converting prop to ByteString here as append()\n\t\t// will handle it.\n\t\tif (init == null) ; else if (typeof init === 'object') {\n\t\t\tconst method = init[Symbol.iterator];\n\t\t\tif (method != null) {\n\t\t\t\tif (typeof method !== 'function') {\n\t\t\t\t\tthrow new TypeError('Header pairs must be iterable');\n\t\t\t\t}\n\n\t\t\t\t// sequence>\n\t\t\t\t// Note: per spec we have to first exhaust the lists then process them\n\t\t\t\tconst pairs = [];\n\t\t\t\tfor (const pair of init) {\n\t\t\t\t\tif (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') {\n\t\t\t\t\t\tthrow new TypeError('Each header pair must be iterable');\n\t\t\t\t\t}\n\t\t\t\t\tpairs.push(Array.from(pair));\n\t\t\t\t}\n\n\t\t\t\tfor (const pair of pairs) {\n\t\t\t\t\tif (pair.length !== 2) {\n\t\t\t\t\t\tthrow new TypeError('Each header pair must be a name/value tuple');\n\t\t\t\t\t}\n\t\t\t\t\tthis.append(pair[0], pair[1]);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// record\n\t\t\t\tfor (const key of Object.keys(init)) {\n\t\t\t\t\tconst value = init[key];\n\t\t\t\t\tthis.append(key, value);\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tthrow new TypeError('Provided initializer must be an object');\n\t\t}\n\t}\n\n\t/**\n * Return combined header value given name\n *\n * @param String name Header name\n * @return Mixed\n */\n\tget(name) {\n\t\tname = `${name}`;\n\t\tvalidateName(name);\n\t\tconst key = find(this[MAP], name);\n\t\tif (key === undefined) {\n\t\t\treturn null;\n\t\t}\n\n\t\treturn this[MAP][key].join(', ');\n\t}\n\n\t/**\n * Iterate over all headers\n *\n * @param Function callback Executed for each item with parameters (value, name, thisArg)\n * @param Boolean thisArg `this` context for callback function\n * @return Void\n */\n\tforEach(callback) {\n\t\tlet thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined;\n\n\t\tlet pairs = getHeaders(this);\n\t\tlet i = 0;\n\t\twhile (i < pairs.length) {\n\t\t\tvar _pairs$i = pairs[i];\n\t\t\tconst name = _pairs$i[0],\n\t\t\t value = _pairs$i[1];\n\n\t\t\tcallback.call(thisArg, value, name, this);\n\t\t\tpairs = getHeaders(this);\n\t\t\ti++;\n\t\t}\n\t}\n\n\t/**\n * Overwrite header values given name\n *\n * @param String name Header name\n * @param String value Header value\n * @return Void\n */\n\tset(name, value) {\n\t\tname = `${name}`;\n\t\tvalue = `${value}`;\n\t\tvalidateName(name);\n\t\tvalidateValue(value);\n\t\tconst key = find(this[MAP], name);\n\t\tthis[MAP][key !== undefined ? key : name] = [value];\n\t}\n\n\t/**\n * Append a value onto existing header\n *\n * @param String name Header name\n * @param String value Header value\n * @return Void\n */\n\tappend(name, value) {\n\t\tname = `${name}`;\n\t\tvalue = `${value}`;\n\t\tvalidateName(name);\n\t\tvalidateValue(value);\n\t\tconst key = find(this[MAP], name);\n\t\tif (key !== undefined) {\n\t\t\tthis[MAP][key].push(value);\n\t\t} else {\n\t\t\tthis[MAP][name] = [value];\n\t\t}\n\t}\n\n\t/**\n * Check for header name existence\n *\n * @param String name Header name\n * @return Boolean\n */\n\thas(name) {\n\t\tname = `${name}`;\n\t\tvalidateName(name);\n\t\treturn find(this[MAP], name) !== undefined;\n\t}\n\n\t/**\n * Delete all header values given name\n *\n * @param String name Header name\n * @return Void\n */\n\tdelete(name) {\n\t\tname = `${name}`;\n\t\tvalidateName(name);\n\t\tconst key = find(this[MAP], name);\n\t\tif (key !== undefined) {\n\t\t\tdelete this[MAP][key];\n\t\t}\n\t}\n\n\t/**\n * Return raw headers (non-spec api)\n *\n * @return Object\n */\n\traw() {\n\t\treturn this[MAP];\n\t}\n\n\t/**\n * Get an iterator on keys.\n *\n * @return Iterator\n */\n\tkeys() {\n\t\treturn createHeadersIterator(this, 'key');\n\t}\n\n\t/**\n * Get an iterator on values.\n *\n * @return Iterator\n */\n\tvalues() {\n\t\treturn createHeadersIterator(this, 'value');\n\t}\n\n\t/**\n * Get an iterator on entries.\n *\n * This is the default iterator of the Headers object.\n *\n * @return Iterator\n */\n\t[Symbol.iterator]() {\n\t\treturn createHeadersIterator(this, 'key+value');\n\t}\n}\nHeaders.prototype.entries = Headers.prototype[Symbol.iterator];\n\nObject.defineProperty(Headers.prototype, Symbol.toStringTag, {\n\tvalue: 'Headers',\n\twritable: false,\n\tenumerable: false,\n\tconfigurable: true\n});\n\nObject.defineProperties(Headers.prototype, {\n\tget: { enumerable: true },\n\tforEach: { enumerable: true },\n\tset: { enumerable: true },\n\tappend: { enumerable: true },\n\thas: { enumerable: true },\n\tdelete: { enumerable: true },\n\tkeys: { enumerable: true },\n\tvalues: { enumerable: true },\n\tentries: { enumerable: true }\n});\n\nfunction getHeaders(headers) {\n\tlet kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value';\n\n\tconst keys = Object.keys(headers[MAP]).sort();\n\treturn keys.map(kind === 'key' ? function (k) {\n\t\treturn k.toLowerCase();\n\t} : kind === 'value' ? function (k) {\n\t\treturn headers[MAP][k].join(', ');\n\t} : function (k) {\n\t\treturn [k.toLowerCase(), headers[MAP][k].join(', ')];\n\t});\n}\n\nconst INTERNAL = Symbol('internal');\n\nfunction createHeadersIterator(target, kind) {\n\tconst iterator = Object.create(HeadersIteratorPrototype);\n\titerator[INTERNAL] = {\n\t\ttarget,\n\t\tkind,\n\t\tindex: 0\n\t};\n\treturn iterator;\n}\n\nconst HeadersIteratorPrototype = Object.setPrototypeOf({\n\tnext() {\n\t\t// istanbul ignore if\n\t\tif (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) {\n\t\t\tthrow new TypeError('Value of `this` is not a HeadersIterator');\n\t\t}\n\n\t\tvar _INTERNAL = this[INTERNAL];\n\t\tconst target = _INTERNAL.target,\n\t\t kind = _INTERNAL.kind,\n\t\t index = _INTERNAL.index;\n\n\t\tconst values = getHeaders(target, kind);\n\t\tconst len = values.length;\n\t\tif (index >= len) {\n\t\t\treturn {\n\t\t\t\tvalue: undefined,\n\t\t\t\tdone: true\n\t\t\t};\n\t\t}\n\n\t\tthis[INTERNAL].index = index + 1;\n\n\t\treturn {\n\t\t\tvalue: values[index],\n\t\t\tdone: false\n\t\t};\n\t}\n}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]())));\n\nObject.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, {\n\tvalue: 'HeadersIterator',\n\twritable: false,\n\tenumerable: false,\n\tconfigurable: true\n});\n\n/**\n * Export the Headers object in a form that Node.js can consume.\n *\n * @param Headers headers\n * @return Object\n */\nfunction exportNodeCompatibleHeaders(headers) {\n\tconst obj = Object.assign({ __proto__: null }, headers[MAP]);\n\n\t// http.request() only supports string as Host header. This hack makes\n\t// specifying custom Host header possible.\n\tconst hostHeaderKey = find(headers[MAP], 'Host');\n\tif (hostHeaderKey !== undefined) {\n\t\tobj[hostHeaderKey] = obj[hostHeaderKey][0];\n\t}\n\n\treturn obj;\n}\n\n/**\n * Create a Headers object from an object of headers, ignoring those that do\n * not conform to HTTP grammar productions.\n *\n * @param Object obj Object of headers\n * @return Headers\n */\nfunction createHeadersLenient(obj) {\n\tconst headers = new Headers();\n\tfor (const name of Object.keys(obj)) {\n\t\tif (invalidTokenRegex.test(name)) {\n\t\t\tcontinue;\n\t\t}\n\t\tif (Array.isArray(obj[name])) {\n\t\t\tfor (const val of obj[name]) {\n\t\t\t\tif (invalidHeaderCharRegex.test(val)) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tif (headers[MAP][name] === undefined) {\n\t\t\t\t\theaders[MAP][name] = [val];\n\t\t\t\t} else {\n\t\t\t\t\theaders[MAP][name].push(val);\n\t\t\t\t}\n\t\t\t}\n\t\t} else if (!invalidHeaderCharRegex.test(obj[name])) {\n\t\t\theaders[MAP][name] = [obj[name]];\n\t\t}\n\t}\n\treturn headers;\n}\n\nconst INTERNALS$1 = Symbol('Response internals');\n\n// fix an issue where \"STATUS_CODES\" aren't a named export for node <10\nconst STATUS_CODES = http.STATUS_CODES;\n\n/**\n * Response class\n *\n * @param Stream body Readable stream\n * @param Object opts Response options\n * @return Void\n */\nclass Response {\n\tconstructor() {\n\t\tlet body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null;\n\t\tlet opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};\n\n\t\tBody.call(this, body, opts);\n\n\t\tconst status = opts.status || 200;\n\t\tconst headers = new Headers(opts.headers);\n\n\t\tif (body != null && !headers.has('Content-Type')) {\n\t\t\tconst contentType = extractContentType(body);\n\t\t\tif (contentType) {\n\t\t\t\theaders.append('Content-Type', contentType);\n\t\t\t}\n\t\t}\n\n\t\tthis[INTERNALS$1] = {\n\t\t\turl: opts.url,\n\t\t\tstatus,\n\t\t\tstatusText: opts.statusText || STATUS_CODES[status],\n\t\t\theaders,\n\t\t\tcounter: opts.counter\n\t\t};\n\t}\n\n\tget url() {\n\t\treturn this[INTERNALS$1].url || '';\n\t}\n\n\tget status() {\n\t\treturn this[INTERNALS$1].status;\n\t}\n\n\t/**\n * Convenience property representing if the request ended normally\n */\n\tget ok() {\n\t\treturn this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300;\n\t}\n\n\tget redirected() {\n\t\treturn this[INTERNALS$1].counter > 0;\n\t}\n\n\tget statusText() {\n\t\treturn this[INTERNALS$1].statusText;\n\t}\n\n\tget headers() {\n\t\treturn this[INTERNALS$1].headers;\n\t}\n\n\t/**\n * Clone this response\n *\n * @return Response\n */\n\tclone() {\n\t\treturn new Response(clone(this), {\n\t\t\turl: this.url,\n\t\t\tstatus: this.status,\n\t\t\tstatusText: this.statusText,\n\t\t\theaders: this.headers,\n\t\t\tok: this.ok,\n\t\t\tredirected: this.redirected\n\t\t});\n\t}\n}\n\nBody.mixIn(Response.prototype);\n\nObject.defineProperties(Response.prototype, {\n\turl: { enumerable: true },\n\tstatus: { enumerable: true },\n\tok: { enumerable: true },\n\tredirected: { enumerable: true },\n\tstatusText: { enumerable: true },\n\theaders: { enumerable: true },\n\tclone: { enumerable: true }\n});\n\nObject.defineProperty(Response.prototype, Symbol.toStringTag, {\n\tvalue: 'Response',\n\twritable: false,\n\tenumerable: false,\n\tconfigurable: true\n});\n\nconst INTERNALS$2 = Symbol('Request internals');\n\n// fix an issue where \"format\", \"parse\" aren't a named export for node <10\nconst parse_url = Url.parse;\nconst format_url = Url.format;\n\nconst streamDestructionSupported = 'destroy' in Stream.Readable.prototype;\n\n/**\n * Check if a value is an instance of Request.\n *\n * @param Mixed input\n * @return Boolean\n */\nfunction isRequest(input) {\n\treturn typeof input === 'object' && typeof input[INTERNALS$2] === 'object';\n}\n\nfunction isAbortSignal(signal) {\n\tconst proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal);\n\treturn !!(proto && proto.constructor.name === 'AbortSignal');\n}\n\n/**\n * Request class\n *\n * @param Mixed input Url or Request instance\n * @param Object init Custom options\n * @return Void\n */\nclass Request {\n\tconstructor(input) {\n\t\tlet init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};\n\n\t\tlet parsedURL;\n\n\t\t// normalize input\n\t\tif (!isRequest(input)) {\n\t\t\tif (input && input.href) {\n\t\t\t\t// in order to support Node.js' Url objects; though WHATWG's URL objects\n\t\t\t\t// will fall into this branch also (since their `toString()` will return\n\t\t\t\t// `href` property anyway)\n\t\t\t\tparsedURL = parse_url(input.href);\n\t\t\t} else {\n\t\t\t\t// coerce input to a string before attempting to parse\n\t\t\t\tparsedURL = parse_url(`${input}`);\n\t\t\t}\n\t\t\tinput = {};\n\t\t} else {\n\t\t\tparsedURL = parse_url(input.url);\n\t\t}\n\n\t\tlet method = init.method || input.method || 'GET';\n\t\tmethod = method.toUpperCase();\n\n\t\tif ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) {\n\t\t\tthrow new TypeError('Request with GET/HEAD method cannot have body');\n\t\t}\n\n\t\tlet inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null;\n\n\t\tBody.call(this, inputBody, {\n\t\t\ttimeout: init.timeout || input.timeout || 0,\n\t\t\tsize: init.size || input.size || 0\n\t\t});\n\n\t\tconst headers = new Headers(init.headers || input.headers || {});\n\n\t\tif (inputBody != null && !headers.has('Content-Type')) {\n\t\t\tconst contentType = extractContentType(inputBody);\n\t\t\tif (contentType) {\n\t\t\t\theaders.append('Content-Type', contentType);\n\t\t\t}\n\t\t}\n\n\t\tlet signal = isRequest(input) ? input.signal : null;\n\t\tif ('signal' in init) signal = init.signal;\n\n\t\tif (signal != null && !isAbortSignal(signal)) {\n\t\t\tthrow new TypeError('Expected signal to be an instanceof AbortSignal');\n\t\t}\n\n\t\tthis[INTERNALS$2] = {\n\t\t\tmethod,\n\t\t\tredirect: init.redirect || input.redirect || 'follow',\n\t\t\theaders,\n\t\t\tparsedURL,\n\t\t\tsignal\n\t\t};\n\n\t\t// node-fetch-only options\n\t\tthis.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20;\n\t\tthis.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true;\n\t\tthis.counter = init.counter || input.counter || 0;\n\t\tthis.agent = init.agent || input.agent;\n\t}\n\n\tget method() {\n\t\treturn this[INTERNALS$2].method;\n\t}\n\n\tget url() {\n\t\treturn format_url(this[INTERNALS$2].parsedURL);\n\t}\n\n\tget headers() {\n\t\treturn this[INTERNALS$2].headers;\n\t}\n\n\tget redirect() {\n\t\treturn this[INTERNALS$2].redirect;\n\t}\n\n\tget signal() {\n\t\treturn this[INTERNALS$2].signal;\n\t}\n\n\t/**\n * Clone this request\n *\n * @return Request\n */\n\tclone() {\n\t\treturn new Request(this);\n\t}\n}\n\nBody.mixIn(Request.prototype);\n\nObject.defineProperty(Request.prototype, Symbol.toStringTag, {\n\tvalue: 'Request',\n\twritable: false,\n\tenumerable: false,\n\tconfigurable: true\n});\n\nObject.defineProperties(Request.prototype, {\n\tmethod: { enumerable: true },\n\turl: { enumerable: true },\n\theaders: { enumerable: true },\n\tredirect: { enumerable: true },\n\tclone: { enumerable: true },\n\tsignal: { enumerable: true }\n});\n\n/**\n * Convert a Request to Node.js http request options.\n *\n * @param Request A Request instance\n * @return Object The options object to be passed to http.request\n */\nfunction getNodeRequestOptions(request) {\n\tconst parsedURL = request[INTERNALS$2].parsedURL;\n\tconst headers = new Headers(request[INTERNALS$2].headers);\n\n\t// fetch step 1.3\n\tif (!headers.has('Accept')) {\n\t\theaders.set('Accept', '*/*');\n\t}\n\n\t// Basic fetch\n\tif (!parsedURL.protocol || !parsedURL.hostname) {\n\t\tthrow new TypeError('Only absolute URLs are supported');\n\t}\n\n\tif (!/^https?:$/.test(parsedURL.protocol)) {\n\t\tthrow new TypeError('Only HTTP(S) protocols are supported');\n\t}\n\n\tif (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) {\n\t\tthrow new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8');\n\t}\n\n\t// HTTP-network-or-cache fetch steps 2.4-2.7\n\tlet contentLengthValue = null;\n\tif (request.body == null && /^(POST|PUT)$/i.test(request.method)) {\n\t\tcontentLengthValue = '0';\n\t}\n\tif (request.body != null) {\n\t\tconst totalBytes = getTotalBytes(request);\n\t\tif (typeof totalBytes === 'number') {\n\t\t\tcontentLengthValue = String(totalBytes);\n\t\t}\n\t}\n\tif (contentLengthValue) {\n\t\theaders.set('Content-Length', contentLengthValue);\n\t}\n\n\t// HTTP-network-or-cache fetch step 2.11\n\tif (!headers.has('User-Agent')) {\n\t\theaders.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)');\n\t}\n\n\t// HTTP-network-or-cache fetch step 2.15\n\tif (request.compress && !headers.has('Accept-Encoding')) {\n\t\theaders.set('Accept-Encoding', 'gzip,deflate');\n\t}\n\n\tlet agent = request.agent;\n\tif (typeof agent === 'function') {\n\t\tagent = agent(parsedURL);\n\t}\n\n\tif (!headers.has('Connection') && !agent) {\n\t\theaders.set('Connection', 'close');\n\t}\n\n\t// HTTP-network fetch step 4.2\n\t// chunked encoding is handled by Node.js\n\n\treturn Object.assign({}, parsedURL, {\n\t\tmethod: request.method,\n\t\theaders: exportNodeCompatibleHeaders(headers),\n\t\tagent\n\t});\n}\n\n/**\n * abort-error.js\n *\n * AbortError interface for cancelled requests\n */\n\n/**\n * Create AbortError instance\n *\n * @param String message Error message for human\n * @return AbortError\n */\nfunction AbortError(message) {\n Error.call(this, message);\n\n this.type = 'aborted';\n this.message = message;\n\n // hide custom error implementation details from end-users\n Error.captureStackTrace(this, this.constructor);\n}\n\nAbortError.prototype = Object.create(Error.prototype);\nAbortError.prototype.constructor = AbortError;\nAbortError.prototype.name = 'AbortError';\n\n// fix an issue where \"PassThrough\", \"resolve\" aren't a named export for node <10\nconst PassThrough$1 = Stream.PassThrough;\nconst resolve_url = Url.resolve;\n\n/**\n * Fetch function\n *\n * @param Mixed url Absolute url or Request instance\n * @param Object opts Fetch options\n * @return Promise\n */\nfunction fetch(url, opts) {\n\n\t// allow custom promise\n\tif (!fetch.Promise) {\n\t\tthrow new Error('native promise missing, set fetch.Promise to your favorite alternative');\n\t}\n\n\tBody.Promise = fetch.Promise;\n\n\t// wrap http.request into fetch\n\treturn new fetch.Promise(function (resolve, reject) {\n\t\t// build request object\n\t\tconst request = new Request(url, opts);\n\t\tconst options = getNodeRequestOptions(request);\n\n\t\tconst send = (options.protocol === 'https:' ? https : http).request;\n\t\tconst signal = request.signal;\n\n\t\tlet response = null;\n\n\t\tconst abort = function abort() {\n\t\t\tlet error = new AbortError('The user aborted a request.');\n\t\t\treject(error);\n\t\t\tif (request.body && request.body instanceof Stream.Readable) {\n\t\t\t\trequest.body.destroy(error);\n\t\t\t}\n\t\t\tif (!response || !response.body) return;\n\t\t\tresponse.body.emit('error', error);\n\t\t};\n\n\t\tif (signal && signal.aborted) {\n\t\t\tabort();\n\t\t\treturn;\n\t\t}\n\n\t\tconst abortAndFinalize = function abortAndFinalize() {\n\t\t\tabort();\n\t\t\tfinalize();\n\t\t};\n\n\t\t// send request\n\t\tconst req = send(options);\n\t\tlet reqTimeout;\n\n\t\tif (signal) {\n\t\t\tsignal.addEventListener('abort', abortAndFinalize);\n\t\t}\n\n\t\tfunction finalize() {\n\t\t\treq.abort();\n\t\t\tif (signal) signal.removeEventListener('abort', abortAndFinalize);\n\t\t\tclearTimeout(reqTimeout);\n\t\t}\n\n\t\tif (request.timeout) {\n\t\t\treq.once('socket', function (socket) {\n\t\t\t\treqTimeout = setTimeout(function () {\n\t\t\t\t\treject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout'));\n\t\t\t\t\tfinalize();\n\t\t\t\t}, request.timeout);\n\t\t\t});\n\t\t}\n\n\t\treq.on('error', function (err) {\n\t\t\treject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err));\n\t\t\tfinalize();\n\t\t});\n\n\t\treq.on('response', function (res) {\n\t\t\tclearTimeout(reqTimeout);\n\n\t\t\tconst headers = createHeadersLenient(res.headers);\n\n\t\t\t// HTTP fetch step 5\n\t\t\tif (fetch.isRedirect(res.statusCode)) {\n\t\t\t\t// HTTP fetch step 5.2\n\t\t\t\tconst location = headers.get('Location');\n\n\t\t\t\t// HTTP fetch step 5.3\n\t\t\t\tconst locationURL = location === null ? null : resolve_url(request.url, location);\n\n\t\t\t\t// HTTP fetch step 5.5\n\t\t\t\tswitch (request.redirect) {\n\t\t\t\t\tcase 'error':\n\t\t\t\t\t\treject(new FetchError(`uri requested responds with a redirect, redirect mode is set to error: ${request.url}`, 'no-redirect'));\n\t\t\t\t\t\tfinalize();\n\t\t\t\t\t\treturn;\n\t\t\t\t\tcase 'manual':\n\t\t\t\t\t\t// node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL.\n\t\t\t\t\t\tif (locationURL !== null) {\n\t\t\t\t\t\t\t// handle corrupted header\n\t\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\t\theaders.set('Location', locationURL);\n\t\t\t\t\t\t\t} catch (err) {\n\t\t\t\t\t\t\t\t// istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request\n\t\t\t\t\t\t\t\treject(err);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase 'follow':\n\t\t\t\t\t\t// HTTP-redirect fetch step 2\n\t\t\t\t\t\tif (locationURL === null) {\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// HTTP-redirect fetch step 5\n\t\t\t\t\t\tif (request.counter >= request.follow) {\n\t\t\t\t\t\t\treject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect'));\n\t\t\t\t\t\t\tfinalize();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// HTTP-redirect fetch step 6 (counter increment)\n\t\t\t\t\t\t// Create a new Request object.\n\t\t\t\t\t\tconst requestOpts = {\n\t\t\t\t\t\t\theaders: new Headers(request.headers),\n\t\t\t\t\t\t\tfollow: request.follow,\n\t\t\t\t\t\t\tcounter: request.counter + 1,\n\t\t\t\t\t\t\tagent: request.agent,\n\t\t\t\t\t\t\tcompress: request.compress,\n\t\t\t\t\t\t\tmethod: request.method,\n\t\t\t\t\t\t\tbody: request.body,\n\t\t\t\t\t\t\tsignal: request.signal,\n\t\t\t\t\t\t\ttimeout: request.timeout,\n\t\t\t\t\t\t\tsize: request.size\n\t\t\t\t\t\t};\n\n\t\t\t\t\t\t// HTTP-redirect fetch step 9\n\t\t\t\t\t\tif (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) {\n\t\t\t\t\t\t\treject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect'));\n\t\t\t\t\t\t\tfinalize();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// HTTP-redirect fetch step 11\n\t\t\t\t\t\tif (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') {\n\t\t\t\t\t\t\trequestOpts.method = 'GET';\n\t\t\t\t\t\t\trequestOpts.body = undefined;\n\t\t\t\t\t\t\trequestOpts.headers.delete('content-length');\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// HTTP-redirect fetch step 15\n\t\t\t\t\t\tresolve(fetch(new Request(locationURL, requestOpts)));\n\t\t\t\t\t\tfinalize();\n\t\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// prepare response\n\t\t\tres.once('end', function () {\n\t\t\t\tif (signal) signal.removeEventListener('abort', abortAndFinalize);\n\t\t\t});\n\t\t\tlet body = res.pipe(new PassThrough$1());\n\n\t\t\tconst response_options = {\n\t\t\t\turl: request.url,\n\t\t\t\tstatus: res.statusCode,\n\t\t\t\tstatusText: res.statusMessage,\n\t\t\t\theaders: headers,\n\t\t\t\tsize: request.size,\n\t\t\t\ttimeout: request.timeout,\n\t\t\t\tcounter: request.counter\n\t\t\t};\n\n\t\t\t// HTTP-network fetch step 12.1.1.3\n\t\t\tconst codings = headers.get('Content-Encoding');\n\n\t\t\t// HTTP-network fetch step 12.1.1.4: handle content codings\n\n\t\t\t// in following scenarios we ignore compression support\n\t\t\t// 1. compression support is disabled\n\t\t\t// 2. HEAD request\n\t\t\t// 3. no Content-Encoding header\n\t\t\t// 4. no content response (204)\n\t\t\t// 5. content not modified response (304)\n\t\t\tif (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) {\n\t\t\t\tresponse = new Response(body, response_options);\n\t\t\t\tresolve(response);\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// For Node v6+\n\t\t\t// Be less strict when decoding compressed responses, since sometimes\n\t\t\t// servers send slightly invalid responses that are still accepted\n\t\t\t// by common browsers.\n\t\t\t// Always using Z_SYNC_FLUSH is what cURL does.\n\t\t\tconst zlibOptions = {\n\t\t\t\tflush: zlib.Z_SYNC_FLUSH,\n\t\t\t\tfinishFlush: zlib.Z_SYNC_FLUSH\n\t\t\t};\n\n\t\t\t// for gzip\n\t\t\tif (codings == 'gzip' || codings == 'x-gzip') {\n\t\t\t\tbody = body.pipe(zlib.createGunzip(zlibOptions));\n\t\t\t\tresponse = new Response(body, response_options);\n\t\t\t\tresolve(response);\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// for deflate\n\t\t\tif (codings == 'deflate' || codings == 'x-deflate') {\n\t\t\t\t// handle the infamous raw deflate response from old servers\n\t\t\t\t// a hack for old IIS and Apache servers\n\t\t\t\tconst raw = res.pipe(new PassThrough$1());\n\t\t\t\traw.once('data', function (chunk) {\n\t\t\t\t\t// see http://stackoverflow.com/questions/37519828\n\t\t\t\t\tif ((chunk[0] & 0x0F) === 0x08) {\n\t\t\t\t\t\tbody = body.pipe(zlib.createInflate());\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbody = body.pipe(zlib.createInflateRaw());\n\t\t\t\t\t}\n\t\t\t\t\tresponse = new Response(body, response_options);\n\t\t\t\t\tresolve(response);\n\t\t\t\t});\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// for br\n\t\t\tif (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') {\n\t\t\t\tbody = body.pipe(zlib.createBrotliDecompress());\n\t\t\t\tresponse = new Response(body, response_options);\n\t\t\t\tresolve(response);\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// otherwise, use response as-is\n\t\t\tresponse = new Response(body, response_options);\n\t\t\tresolve(response);\n\t\t});\n\n\t\twriteToStream(req, request);\n\t});\n}\n/**\n * Redirect code matching\n *\n * @param Number code Status code\n * @return Boolean\n */\nfetch.isRedirect = function (code) {\n\treturn code === 301 || code === 302 || code === 303 || code === 307 || code === 308;\n};\n\n// expose Promise\nfetch.Promise = global.Promise;\n\nexport default fetch;\nexport { Headers, Request, Response, FetchError };\n", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport const EPSILON_FLOAT32 = 1e-7;\nexport const EPSILON_FLOAT16 = 1e-4;\n/** Convenient class for storing tensor-related data. */\nexport class DataStorage {\n constructor(backend, dataMover) {\n this.backend = backend;\n this.dataMover = dataMover;\n this.data = new WeakMap();\n this.dataIdsCount = 0;\n }\n get(dataId) {\n if (!this.data.has(dataId)) {\n this.dataMover.moveData(this.backend, dataId);\n }\n return this.data.get(dataId);\n }\n set(dataId, value) {\n this.dataIdsCount++;\n this.data.set(dataId, value);\n }\n has(dataId) {\n return this.data.has(dataId);\n }\n delete(dataId) {\n this.dataIdsCount--;\n return this.data.delete(dataId);\n }\n numDataIds() {\n return this.dataIdsCount;\n }\n}\n/**\n * The interface that defines the kernels that should be implemented when\n * adding a new backend. New backends don't need to implement every one of the\n * methods, this can be done gradually (throw an error for unimplemented\n * methods).\n */\nexport class KernelBackend {\n time(f) {\n return notYetImplemented('time');\n }\n read(dataId) {\n return notYetImplemented('read');\n }\n readSync(dataId) {\n return notYetImplemented('readSync');\n }\n numDataIds() {\n return notYetImplemented('numDataIds');\n }\n disposeData(dataId) {\n return notYetImplemented('disposeData');\n }\n write(values, shape, dtype) {\n return notYetImplemented('write');\n }\n move(dataId, values, shape, dtype) {\n return notYetImplemented('move');\n }\n memory() {\n return notYetImplemented('memory');\n }\n /** Returns the highest precision for floats in bits (e.g. 16 or 32) */\n floatPrecision() {\n return notYetImplemented('floatPrecision');\n }\n /** Returns the smallest representable number. */\n epsilon() {\n return this.floatPrecision() === 32 ? EPSILON_FLOAT32 : EPSILON_FLOAT16;\n }\n batchMatMul(a, b, transposeA, transposeB) {\n return notYetImplemented('batchMatMul');\n }\n fusedBatchMatMul({ a, b, transposeA, transposeB, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedBatchMatMul');\n }\n slice(x, begin, size) {\n return notYetImplemented('slice');\n }\n stridedSlice(x, begin, end, strides) {\n return notYetImplemented('stridedSlice');\n }\n unstack(x, axis) {\n return notYetImplemented('unstack');\n }\n reverse(a, axis) {\n return notYetImplemented('reverse');\n }\n concat(tensors, axis) {\n return notYetImplemented('concat');\n }\n neg(a) {\n return notYetImplemented('neg');\n }\n add(a, b) {\n return notYetImplemented('add');\n }\n addN(tensors) {\n return notYetImplemented('addN');\n }\n subtract(a, b) {\n return notYetImplemented('subtract');\n }\n multiply(a, b) {\n return notYetImplemented('multiply');\n }\n realDivide(a, b) {\n return notYetImplemented('realDivide');\n }\n floorDiv(a, b) {\n return notYetImplemented('floorDiv');\n }\n sum(x, axes) {\n return notYetImplemented('sum');\n }\n prod(x, axes) {\n return notYetImplemented('prod');\n }\n unsortedSegmentSum(x, segmentIds, numSegments) {\n return notYetImplemented('unsortedSegmentSum');\n }\n argMin(x, axis) {\n return notYetImplemented('argMin');\n }\n argMax(x, axis) {\n return notYetImplemented('argMax');\n }\n equal(a, b) {\n return notYetImplemented('equal');\n }\n notEqual(a, b) {\n return notYetImplemented('notEqual');\n }\n less(a, b) {\n return notYetImplemented('less');\n }\n lessEqual(a, b) {\n return notYetImplemented('lessEqual');\n }\n greater(a, b) {\n return notYetImplemented('greater');\n }\n greaterEqual(a, b) {\n return notYetImplemented('greaterEqual');\n }\n logicalNot(a) {\n return notYetImplemented('logicalNot');\n }\n logicalAnd(a, b) {\n return notYetImplemented('logicalAnd');\n }\n logicalOr(a, b) {\n return notYetImplemented('logicalOr');\n }\n where(condition) {\n return notYetImplemented('where');\n }\n select(condition, a, b) {\n return notYetImplemented('select');\n }\n topk(x, k, sorted) {\n return notYetImplemented('topk');\n }\n min(x, axes) {\n return notYetImplemented('min');\n }\n minimum(a, b) {\n return notYetImplemented('minimum');\n }\n mod(a, b) {\n return notYetImplemented('mod');\n }\n max(x, axes) {\n return notYetImplemented('max');\n }\n maximum(a, b) {\n return notYetImplemented('maximum');\n }\n all(x, axes) {\n return notYetImplemented('all');\n }\n any(x, axes) {\n return notYetImplemented('any');\n }\n squaredDifference(a, b) {\n return notYetImplemented('squaredDifference');\n }\n ceil(x) {\n return notYetImplemented('ceil');\n }\n floor(x) {\n return notYetImplemented('floor');\n }\n round(x) {\n return notYetImplemented('round');\n }\n sign(x) {\n return notYetImplemented('sign');\n }\n isNaN(x) {\n return notYetImplemented('isNaN');\n }\n isInf(x) {\n return notYetImplemented('isInf');\n }\n isFinite(x) {\n return notYetImplemented('isFinite');\n }\n pow(a, b) {\n return notYetImplemented('pow');\n }\n exp(x) {\n return notYetImplemented('exp');\n }\n expm1(x) {\n return notYetImplemented('expm1');\n }\n softmax(x, dim) {\n return notYetImplemented('softmax');\n }\n log(x) {\n return notYetImplemented('log');\n }\n log1p(x) {\n return notYetImplemented('log1p');\n }\n sqrt(x) {\n return notYetImplemented('sqrt');\n }\n rsqrt(x) {\n return notYetImplemented('rsqrt');\n }\n square(x) {\n return notYetImplemented('square');\n }\n reciprocal(x) {\n return notYetImplemented('reciprocal');\n }\n relu(x) {\n return notYetImplemented('relu');\n }\n relu6(x) {\n return notYetImplemented('relu6');\n }\n prelu(x, a) {\n return notYetImplemented('prelu');\n }\n elu(x) {\n return notYetImplemented('elu');\n }\n eluDer(dy, y) {\n return notYetImplemented('eluDer');\n }\n selu(x) {\n return notYetImplemented('selu');\n }\n int(x) {\n return notYetImplemented('int');\n }\n clip(x, min, max) {\n return notYetImplemented('clip');\n }\n abs(x) {\n return notYetImplemented('abs');\n }\n complexAbs(x) {\n return notYetImplemented('complexAbs');\n }\n sigmoid(x) {\n return notYetImplemented('sigmoid');\n }\n softplus(x) {\n return notYetImplemented('softplus');\n }\n sin(x) {\n return notYetImplemented('sin');\n }\n cos(x) {\n return notYetImplemented('cos');\n }\n tan(x) {\n return notYetImplemented('tan');\n }\n asin(x) {\n return notYetImplemented('asin');\n }\n acos(x) {\n return notYetImplemented('acos');\n }\n atan(x) {\n return notYetImplemented('atan');\n }\n atan2(a, b) {\n return notYetImplemented('atan2');\n }\n sinh(x) {\n return notYetImplemented('sinh');\n }\n cosh(x) {\n return notYetImplemented('cosh');\n }\n tanh(x) {\n return notYetImplemented('tanh');\n }\n asinh(x) {\n return notYetImplemented('asinh');\n }\n acosh(x) {\n return notYetImplemented('acosh');\n }\n atanh(x) {\n return notYetImplemented('atanh');\n }\n erf(x) {\n return notYetImplemented('erf');\n }\n step(x, alpha) {\n return notYetImplemented('step');\n }\n fusedConv2d({ input, filter, convInfo, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedConv2d');\n }\n conv2d(x, filter, convInfo) {\n return notYetImplemented('conv2d');\n }\n conv2dDerInput(dy, filter, convInfo) {\n return notYetImplemented('conv2dDerInput');\n }\n conv2dDerFilter(x, dY, convInfo) {\n return notYetImplemented('conv2dDerFilter');\n }\n fusedDepthwiseConv2D({ input, filter, convInfo, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedDepthwiseConv2D');\n }\n depthwiseConv2D(input, filter, convInfo) {\n return notYetImplemented('depthwiseConv2D');\n }\n depthwiseConv2DDerInput(dy, filter, convInfo) {\n return notYetImplemented('depthwiseConv2DDerInput');\n }\n depthwiseConv2DDerFilter(x, dY, convInfo) {\n return notYetImplemented('depthwiseConv2DDerFilter');\n }\n conv3d(x, filter, convInfo) {\n return notYetImplemented('conv3d');\n }\n conv3dDerInput(dy, filter, convInfo) {\n return notYetImplemented('conv3dDerInput');\n }\n conv3dDerFilter(x, dY, convInfo) {\n return notYetImplemented('conv3dDerFilter');\n }\n maxPool(x, convInfo) {\n return notYetImplemented('maxPool');\n }\n maxPoolBackprop(dy, x, y, convInfo) {\n return notYetImplemented('maxPoolBackprop');\n }\n avgPool(x, convInfo) {\n return notYetImplemented('avgPool');\n }\n avgPoolBackprop(dy, x, convInfo) {\n return notYetImplemented('avgPoolBackprop');\n }\n avgPool3d(x, convInfo) {\n return notYetImplemented('avgPool3d');\n }\n avgPool3dBackprop(dy, x, convInfo) {\n return notYetImplemented('avgPool3dBackprop');\n }\n maxPool3d(x, convInfo) {\n return notYetImplemented('maxPool3d');\n }\n maxPool3dBackprop(dy, x, y, convInfo) {\n return notYetImplemented('maxPool3dBackprop');\n }\n reshape(x, shape) {\n return notYetImplemented('reshape');\n }\n cast(x, dtype) {\n return notYetImplemented('cast');\n }\n tile(x, reps) {\n return notYetImplemented('tile');\n }\n pad(x, paddings, constantValue) {\n return notYetImplemented('pad');\n }\n transpose(x, perm) {\n return notYetImplemented('transpose');\n }\n gather(x, indices, axis) {\n return notYetImplemented('gather');\n }\n gatherND(x, indices) {\n return notYetImplemented('gatherND');\n }\n scatterND(indices, updates, shape) {\n return notYetImplemented('scatterND');\n }\n batchToSpaceND(x, blockShape, crops) {\n return notYetImplemented('batchToSpaceND');\n }\n spaceToBatchND(x, blockShape, paddings) {\n return notYetImplemented('spaceToBatchND');\n }\n resizeBilinear(x, newHeight, newWidth, alignCorners) {\n return notYetImplemented('resizeBilinear');\n }\n resizeBilinearBackprop(dy, x, alignCorners) {\n return notYetImplemented('resizeBilinearBackprop');\n }\n resizeNearestNeighbor(x, newHEight, newWidth, alignCorners) {\n return notYetImplemented('resizeNearestNeighbor');\n }\n resizeNearestNeighborBackprop(dy, x, alignCorners) {\n return notYetImplemented('resizeNearestNeighborBackprop');\n }\n batchNorm(x, mean, variance, offset, scale, varianceEpsilon) {\n return notYetImplemented('batchNorm');\n }\n localResponseNormalization4D(x, radius, bias, alpha, beta) {\n return notYetImplemented('localResponseNormalization4D');\n }\n LRNGrad(dy, inputImage, outputImage, radius, bias, alpha, beta) {\n return notYetImplemented('LRNGrad');\n }\n multinomial(logits, normalized, numSamples, seed) {\n return notYetImplemented('multinomial');\n }\n oneHot(indices, depth, onValue, offValue) {\n return notYetImplemented('oneHot');\n }\n cumsum(x, axis, exclusive, reverse) {\n return notYetImplemented('cumsum');\n }\n nonMaxSuppression(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {\n return notYetImplemented('nonMaxSuppression');\n }\n fft(x) {\n return notYetImplemented('fft');\n }\n ifft(x) {\n return notYetImplemented('ifft');\n }\n complex(real, imag) {\n return notYetImplemented('complex');\n }\n real(input) {\n return notYetImplemented('real');\n }\n imag(input) {\n return notYetImplemented('imag');\n }\n cropAndResize(image, boxes, boxIndex, cropSize, method, extrapolationValue) {\n return notYetImplemented('cropAndResize');\n }\n depthToSpace(x, blockSize, dataFormat) {\n return notYetImplemented('depthToSpace');\n }\n // Aligns with the \"SplitV\" kernel in TensorFlow.\n split(value, sizeSplits, axis) {\n return notYetImplemented('split');\n }\n sparseToDense(sparseIndices, sparseValues, outputShape, defaultValue) {\n return notYetImplemented('sparseToDense');\n }\n diag(x) {\n return notYetImplemented('diag');\n }\n fill(shape, value, dtype) {\n return notYetImplemented('fill');\n }\n onesLike(x) {\n return notYetImplemented('onesLike');\n }\n zerosLike(x) {\n return notYetImplemented('zerosLike');\n }\n linspace(start, stop, num) {\n return notYetImplemented('linspace');\n }\n dispose() {\n return notYetImplemented('dispose');\n }\n}\nfunction notYetImplemented(kernelName) {\n throw new Error(`'${kernelName}' not yet implemented or not found in the registry. ` +\n `This kernel may not be supported by the tfjs backend you have chosen`);\n}\n//# sourceMappingURL=backend.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Expects flags from URL in the format ?tfjsflags=FLAG1:1,FLAG2:true.\nconst TENSORFLOWJS_FLAGS_PREFIX = 'tfjsflags';\n/**\n * The environment contains evaluated flags as well as the registered platform.\n * This is always used as a global singleton and can be retrieved with\n * `tf.env()`.\n *\n * @doc {heading: 'Environment'}\n */\nexport class Environment {\n // tslint:disable-next-line: no-any\n constructor(global) {\n this.global = global;\n this.flags = {};\n this.flagRegistry = {};\n this.urlFlags = {};\n this.populateURLFlags();\n }\n setPlatform(platformName, platform) {\n if (this.platform != null) {\n console.warn(`Platform ${this.platformName} has already been set. ` +\n `Overwriting the platform with ${platform}.`);\n }\n this.platformName = platformName;\n this.platform = platform;\n }\n registerFlag(flagName, evaluationFn, setHook) {\n this.flagRegistry[flagName] = { evaluationFn, setHook };\n // Override the flag value from the URL. This has to happen here because the\n // environment is initialized before flags get registered.\n if (this.urlFlags[flagName] != null) {\n const flagValue = this.urlFlags[flagName];\n console.warn(`Setting feature override from URL ${flagName}: ${flagValue}.`);\n this.set(flagName, flagValue);\n }\n }\n async getAsync(flagName) {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n this.flags[flagName] = await this.evaluateFlag(flagName);\n return this.flags[flagName];\n }\n get(flagName) {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n const flagValue = this.evaluateFlag(flagName);\n if (flagValue instanceof Promise) {\n throw new Error(`Flag ${flagName} cannot be synchronously evaluated. ` +\n `Please use getAsync() instead.`);\n }\n this.flags[flagName] = flagValue;\n return this.flags[flagName];\n }\n getNumber(flagName) {\n return this.get(flagName);\n }\n getBool(flagName) {\n return this.get(flagName);\n }\n getFlags() {\n return this.flags;\n }\n // For backwards compatibility.\n get features() {\n return this.flags;\n }\n set(flagName, value) {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(`Cannot set flag ${flagName} as it has not been registered.`);\n }\n this.flags[flagName] = value;\n if (this.flagRegistry[flagName].setHook != null) {\n this.flagRegistry[flagName].setHook(value);\n }\n }\n evaluateFlag(flagName) {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(`Cannot evaluate flag '${flagName}': no evaluation function found.`);\n }\n return this.flagRegistry[flagName].evaluationFn();\n }\n setFlags(flags) {\n this.flags = Object.assign({}, flags);\n }\n reset() {\n this.flags = {};\n this.urlFlags = {};\n this.populateURLFlags();\n }\n populateURLFlags() {\n if (typeof this.global === 'undefined' ||\n typeof this.global.location === 'undefined' ||\n typeof this.global.location.search === 'undefined') {\n return;\n }\n const urlParams = getQueryParams(this.global.location.search);\n if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) {\n const keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(',');\n keyValues.forEach(keyValue => {\n const [key, value] = keyValue.split(':');\n this.urlFlags[key] = parseValue(key, value);\n });\n }\n }\n}\nexport function getQueryParams(queryString) {\n const params = {};\n queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, (s, ...t) => {\n decodeParam(params, t[0], t[1]);\n return t.join('=');\n });\n return params;\n}\nfunction decodeParam(params, name, value) {\n params[decodeURIComponent(name)] = decodeURIComponent(value || '');\n}\nfunction parseValue(flagName, value) {\n value = value.toLowerCase();\n if (value === 'true' || value === 'false') {\n return value === 'true';\n }\n else if (`${+value}` === value) {\n return +value;\n }\n throw new Error(`Could not parse value flag value ${value} for flag ${flagName}.`);\n}\n/**\n * Returns the current environment (a global singleton).\n *\n * The environment object contains the evaluated feature values as well as the\n * active platform.\n *\n * @doc {heading: 'Environment'}\n */\nexport function env() {\n return ENV;\n}\nexport let ENV = null;\nexport function setEnvironmentGlobal(environment) {\n ENV = environment;\n}\n//# sourceMappingURL=environment.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Note that the identifier globalNameSpace is scoped to this module, but will\n// always resolve to the same global object regardless of how the module is\n// resolved.\n// tslint:disable-next-line:no-any\nlet globalNameSpace;\n// tslint:disable-next-line:no-any\nexport function getGlobalNamespace() {\n if (globalNameSpace == null) {\n // tslint:disable-next-line:no-any\n let ns;\n if (typeof (window) !== 'undefined') {\n ns = window;\n }\n else if (typeof (global) !== 'undefined') {\n ns = global;\n }\n else if (typeof (process) !== 'undefined') {\n ns = process;\n }\n else if (typeof (self) !== 'undefined') {\n ns = self;\n }\n else {\n throw new Error('Could not find a global object');\n }\n globalNameSpace = ns;\n }\n return globalNameSpace;\n}\n// tslint:disable-next-line:no-any\nfunction getGlobalMap() {\n const ns = getGlobalNamespace();\n if (ns._tfGlobals == null) {\n ns._tfGlobals = new Map();\n }\n return ns._tfGlobals;\n}\n/**\n * Returns a globally accessible 'singleton' object.\n *\n * @param key the name of the object\n * @param init a function to initialize to initialize this object\n * the first time it is fetched.\n */\nexport function getGlobal(key, init) {\n const globalMap = getGlobalMap();\n if (globalMap.has(key)) {\n return globalMap.get(key);\n }\n else {\n const singleton = init();\n globalMap.set(key, singleton);\n return globalMap.get(key);\n }\n}\n//# sourceMappingURL=global_util.js.map", "export const Abs = 'Abs';\nexport const Acos = 'Acos';\nexport const Acosh = 'Acosh';\nexport const Add = 'Add';\nexport const AddN = 'AddN';\nexport const All = 'All';\nexport const Any = 'Any';\nexport const ArgMax = 'ArgMax';\nexport const ArgMin = 'ArgMin';\nexport const Asin = 'Asin';\nexport const Asinh = 'Asinh';\nexport const Atan = 'Atan';\nexport const Atanh = 'Atanh';\nexport const Atan2 = 'Atan2';\nexport const AvgPool = 'AvgPool';\nexport const AvgPoolBackprop = 'AvgPoolBackprop';\nexport const AvgPool3D = 'AvgPool3D';\nexport const AvgPool3DBackprop = 'AvgPool3DBackprop';\nexport const BatchMatMul = 'BatchMatMul';\nexport const BatchToSpaceND = 'BatchToSpaceND';\nexport const BroadcastTo = 'BroadcastTo';\nexport const Cast = 'Cast';\nexport const Ceil = 'Ceil';\nexport const ClipByValue = 'ClipByValue';\nexport const Complex = 'Complex';\nexport const Concat = 'Concat';\nexport const Conv2D = 'Conv2D';\nexport const Conv2DBackpropFilter = 'Conv2DBackpropFilter';\nexport const Conv2DBackpropInput = 'Conv2DBackpropInput';\nexport const Conv3D = 'Conv3D';\nexport const Conv3DBackpropFilterV2 = 'Conv3DBackpropFilterV2';\nexport const Conv3DBackpropInputV2 = 'Conv3DBackpropInputV2';\nexport const Cos = 'Cos';\nexport const Cosh = 'Cosh';\nexport const Cumsum = 'Cumsum';\nexport const CropAndResize = 'CropAndResize';\nexport const DepthToSpace = 'DepthToSpace';\nexport const DepthwiseConv2dNative = 'DepthwiseConv2dNative';\nexport const DepthwiseConv2dNativeBackpropFilter = 'DepthwiseConv2dNativeBackpropFilter';\nexport const DepthwiseConv2dNativeBackpropInput = 'DepthwiseConv2dNativeBackpropInput';\nexport const Diag = 'Diag';\nexport const Dilation2D = 'Dilation2D';\nexport const Dilation2DBackpropInput = 'Dilation2DBackpropInput';\nexport const Dilation2DBackpropFilter = 'Dilation2DBackpropFilter';\nexport const Div = 'Div';\nexport const Elu = 'Elu';\nexport const EluGrad = 'EluGrad';\nexport const Erf = 'Erf';\nexport const Equal = 'Equal';\nexport const Exp = 'Exp';\nexport const Expm1 = 'Expm1';\nexport const FFT = 'FFT';\nexport const Fill = 'Fill';\nexport const FlipLeftRight = 'FlipLeftRight';\nexport const Floor = 'Floor';\nexport const FloorDiv = 'FloorDiv';\nexport const FusedBatchNorm = 'FusedBatchNorm';\nexport const GatherV2 = 'GatherV2';\nexport const GatherNd = 'GatherNd';\nexport const Greater = 'Greater';\nexport const GreaterEqual = 'GreaterEqual';\nexport const Identity = 'Identity';\nexport const IFFT = 'IFFT';\nexport const Imag = 'Imag';\nexport const IsFinite = 'IsFinite';\nexport const IsInf = 'IsInf';\nexport const IsNan = 'IsNan';\nexport const Less = 'Less';\nexport const LessEqual = 'LessEqual';\nexport const LinSpace = 'LinSpace';\nexport const Log = 'Log';\nexport const Log1p = 'Log1p';\nexport const LogicalAnd = 'LogicalAnd';\nexport const LogicalNot = 'LogicalNot';\nexport const LogicalOr = 'LogicalOr';\nexport const LogSoftmax = 'LogSoftmax';\nexport const LRN = 'LRN';\nexport const LRNBackprop = 'LRNBackprop';\nexport const Max = 'Max';\nexport const Maximum = 'Maximum';\nexport const MaxPool = 'MaxPool';\nexport const MaxPoolBackprop = 'MaxPoolBackprop';\nexport const MaxPool3D = 'MaxPool3D';\nexport const MaxPool3DBackprop = 'MaxPool3DBackprop';\nexport const MaxPoolWithArgmax = 'MaxPoolWithArgmax';\nexport const Mean = 'Mean';\nexport const Min = 'Min';\nexport const Minimum = 'Minimum';\nexport const Mod = 'Mod';\nexport const Multiply = 'Multiply';\nexport const Negate = 'Negate';\nexport const NotEqual = 'NotEqual';\nexport const NonMaxSuppressionV3 = 'NonMaxSuppressionV3';\nexport const NonMaxSuppressionV4 = 'NonMaxSuppressionV4';\nexport const NonMaxSuppressionV5 = 'NonMaxSuppressionV5';\nexport const OnesLike = 'OnesLike';\nexport const OneHot = 'OneHot';\nexport const PadV2 = 'PadV2';\nexport const Pool = 'Pool';\nexport const Pow = 'Pow';\nexport const Prelu = 'Prelu';\nexport const Prod = 'Prod';\nexport const Range = 'Range';\nexport const Real = 'Real';\nexport const Reciprocal = 'Reciprocal';\nexport const Relu = 'Relu';\nexport const Reshape = 'Reshape';\nexport const ResizeNearestNeighbor = 'ResizeNearestNeighbor';\nexport const ResizeNearestNeighborGrad = 'ResizeNearestNeighborGrad';\nexport const ResizeBilinear = 'ResizeBilinear';\nexport const ResizeBilinearGrad = 'ResizeBilinearGrad';\nexport const Relu6 = 'Relu6';\nexport const Reverse = 'Reverse';\nexport const Round = 'Round';\nexport const Rsqrt = 'Rsqrt';\nexport const ScatterNd = 'ScatterNd';\nexport const SelectV2 = 'SelectV2';\nexport const Selu = 'Selu';\nexport const Slice = 'Slice';\nexport const Sin = 'Sin';\nexport const Sinh = 'Sinh';\nexport const Sign = 'Sign';\nexport const Sigmoid = 'Sigmoid';\nexport const Softplus = 'Softplus';\nexport const Sqrt = 'Sqrt';\nexport const Sum = 'Sum';\nexport const SpaceToBatchND = 'SpaceToBatchND';\nexport const SplitV = 'SplitV';\nexport const Softmax = 'Softmax';\nexport const SquaredDifference = 'SquaredDifference';\nexport const Square = 'Square';\nexport const Sub = 'Sub';\nexport const SparseToDense = 'SparseToDense';\nexport const StridedSlice = 'StridedSlice';\nexport const Tan = 'Tan';\nexport const Tanh = 'Tanh';\nexport const Tile = 'Tile';\nexport const TopK = 'TopK';\nexport const Transpose = 'Transpose';\nexport const Unique = 'Unique';\nexport const Unpack = 'Unpack';\nexport const UnsortedSegmentSum = 'UnsortedSegmentSum';\nexport const ZerosLike = 'ZerosLike';\n/**\n * TensorFlow.js-only kernels\n */\nexport const Step = 'Step';\nexport const FromPixels = 'FromPixels';\nexport const RotateWithOffset = 'RotateWithOffset';\nexport const _FusedMatMul = '_FusedMatMul';\nexport const FusedConv2D = 'FusedConv2D';\nexport const FusedDepthwiseConv2D = 'FusedDepthwiseConv2D';\n//# sourceMappingURL=kernel_names.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from './environment';\nimport { getGlobal } from './global_util';\nconst kernelRegistry = getGlobal('kernelRegistry', () => new Map());\nconst gradRegistry = getGlobal('gradRegistry', () => new Map());\n/**\n * Returns the kernel function (code) associated with the provided names.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n */\nexport function getKernel(kernelName, backendName) {\n const key = makeKey(kernelName, backendName);\n return kernelRegistry.get(key);\n}\n/**\n * Returns the registered gradient info associated with the provided kernel.\n * @param kernelName The official TF kernel name.\n */\nexport function getGradient(kernelName) {\n return gradRegistry.get(kernelName);\n}\nexport function getKernelsForBackend(backendName) {\n const it = kernelRegistry.entries();\n const result = [];\n while (true) {\n const { done, value } = it.next();\n if (done) {\n break;\n }\n const [key, config] = value;\n const [backend,] = key.split('_');\n if (backend === backendName) {\n result.push(config);\n }\n }\n return result;\n}\n/**\n * Registers the function (forward pass) for the kernel in a global registry.\n *\n * @param config A config object with the following properties:\n * - `kernelName` The official name of the kernel.\n * - `backendName` The official name of the backend.\n * - `kernelFunc` The function to run during the forward pass of the kernel.\n * - `setupFunc` Optional. Gets called once, after the backend initializes.\n * - `disposeFunc` Optional. Gets called once, right before the backend is\n * disposed.\n */\nexport function registerKernel(config) {\n const { kernelName, backendName } = config;\n const key = makeKey(kernelName, backendName);\n if (kernelRegistry.has(key)) {\n console.warn(`The kernel '${kernelName}' for backend ` +\n `'${backendName}' is already registered`);\n }\n kernelRegistry.set(key, config);\n}\n/**\n * Registers a gradient function for a given kernel in the global registry,\n * to be used during the back-propagation of that kernel.\n *\n * @param config An object with the following properties:\n * - `kernelName` The name of the kernel that the gradient function is for.\n * - `gradFunc` The function to run during back-propagation.\n */\nexport function registerGradient(config) {\n const { kernelName } = config;\n if (gradRegistry.has(kernelName)) {\n // TODO (yassogba) after 3.0 assess whether we need to keep this gated\n // to debug mode.\n if (env().getBool('DEBUG')) {\n console.warn(`Overriding the gradient for '${kernelName}'`);\n }\n }\n gradRegistry.set(kernelName, config);\n}\n/**\n * Removes the kernel function from the registry.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n *\n */\nexport function unregisterKernel(kernelName, backendName) {\n const key = makeKey(kernelName, backendName);\n if (!kernelRegistry.has(key)) {\n throw new Error(`The kernel '${kernelName}' for backend ` +\n `'${backendName}' is not registered`);\n }\n kernelRegistry.delete(key);\n}\n/** Removes the registered gradient from the global registry. */\nexport function unregisterGradient(kernelName) {\n if (!gradRegistry.has(kernelName)) {\n throw new Error(`The gradient '${kernelName}' for backend is not registered`);\n }\n gradRegistry.delete(kernelName);\n}\n/**\n * Finds kernels that have already been registered to a backend and re-registers\n * them for a new backend. Useful for registering custom backends.\n * @param registeredBackendName Already registered backend.\n * @param newBackendName New backend.\n */\nexport function copyRegisteredKernels(registeredBackendName, newBackendName) {\n const kernels = getKernelsForBackend(registeredBackendName);\n kernels.forEach(kernelConfig => {\n const newKernelConfig = Object.assign({}, kernelConfig, { backendName: newBackendName });\n registerKernel(newKernelConfig);\n });\n}\nfunction makeKey(kernelName, backendName) {\n return `${backendName}_${kernelName}`;\n}\n//# sourceMappingURL=kernel_registry.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from './environment';\n/**\n * Shuffles the array in-place using Fisher-Yates algorithm.\n *\n * ```js\n * const a = [1, 2, 3, 4, 5];\n * tf.util.shuffle(a);\n * console.log(a);\n * ```\n *\n * @param array The array to shuffle in-place.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\n// tslint:disable-next-line:no-any\nexport function shuffle(array) {\n let counter = array.length;\n let temp = 0;\n let index = 0;\n // While there are elements in the array\n while (counter > 0) {\n // Pick a random index\n index = (Math.random() * counter) | 0;\n // Decrease counter by 1\n counter--;\n // And swap the last element with it\n temp = array[counter];\n array[counter] = array[index];\n array[index] = temp;\n }\n}\n/** Clamps a value to a specified range. */\nexport function clamp(min, x, max) {\n return Math.max(min, Math.min(x, max));\n}\nexport function nearestLargerEven(val) {\n return val % 2 === 0 ? val : val + 1;\n}\nexport function sum(arr) {\n let sum = 0;\n for (let i = 0; i < arr.length; i++) {\n sum += arr[i];\n }\n return sum;\n}\n/**\n * Returns a sample from a uniform [a, b) distribution.\n *\n * @param a The minimum support (inclusive).\n * @param b The maximum support (exclusive).\n * @return A pseudorandom number on the half-open interval [a,b).\n */\nexport function randUniform(a, b) {\n const r = Math.random();\n return (b * r) + (1 - r) * a;\n}\n/** Returns the squared Euclidean distance between two vectors. */\nexport function distSquared(a, b) {\n let result = 0;\n for (let i = 0; i < a.length; i++) {\n const diff = Number(a[i]) - Number(b[i]);\n result += diff * diff;\n }\n return result;\n}\n/**\n * Asserts that the expression is true. Otherwise throws an error with the\n * provided message.\n *\n * ```js\n * const x = 2;\n * tf.util.assert(x === 2, 'x is not 2');\n * ```\n *\n * @param expr The expression to assert (as a boolean).\n * @param msg A function that returns the message to report when throwing an\n * error. We use a function for performance reasons.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function assert(expr, msg) {\n if (!expr) {\n throw new Error(typeof msg === 'string' ? msg : msg());\n }\n}\nexport function assertShapesMatch(shapeA, shapeB, errorMessagePrefix = '') {\n assert(arraysEqual(shapeA, shapeB), () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`);\n}\nexport function assertNonNull(a) {\n assert(a != null, () => `The input to the tensor constructor must be a non-null value.`);\n}\n// NOTE: We explicitly type out what T extends instead of any so that\n// util.flatten on a nested array of number doesn't try to infer T as a\n// number[][], causing us to explicitly type util.flatten().\n/**\n * Flattens an arbitrarily nested array.\n *\n * ```js\n * const a = [[1, 2], [3, 4], [5, [6, [7]]]];\n * const flat = tf.util.flatten(a);\n * console.log(flat);\n * ```\n *\n * @param arr The nested array to flatten.\n * @param result The destination array which holds the elements.\n * @param skipTypedArray If true, avoids flattening the typed arrays. Defaults\n * to false.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function flatten(arr, result = [], skipTypedArray = false) {\n if (result == null) {\n result = [];\n }\n if (Array.isArray(arr) || isTypedArray(arr) && !skipTypedArray) {\n for (let i = 0; i < arr.length; ++i) {\n flatten(arr[i], result, skipTypedArray);\n }\n }\n else {\n result.push(arr);\n }\n return result;\n}\n/**\n * Returns the size (number of elements) of the tensor given its shape.\n *\n * ```js\n * const shape = [3, 4, 2];\n * const size = tf.util.sizeFromShape(shape);\n * console.log(size);\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function sizeFromShape(shape) {\n if (shape.length === 0) {\n // Scalar.\n return 1;\n }\n let size = shape[0];\n for (let i = 1; i < shape.length; i++) {\n size *= shape[i];\n }\n return size;\n}\nexport function isScalarShape(shape) {\n return shape.length === 0;\n}\nexport function arraysEqual(n1, n2) {\n if (n1 === n2) {\n return true;\n }\n if (n1 == null || n2 == null) {\n return false;\n }\n if (n1.length !== n2.length) {\n return false;\n }\n for (let i = 0; i < n1.length; i++) {\n if (n1[i] !== n2[i]) {\n return false;\n }\n }\n return true;\n}\nexport function isInt(a) {\n return a % 1 === 0;\n}\nexport function tanh(x) {\n // tslint:disable-next-line:no-any\n if (Math.tanh != null) {\n // tslint:disable-next-line:no-any\n return Math.tanh(x);\n }\n if (x === Infinity) {\n return 1;\n }\n else if (x === -Infinity) {\n return -1;\n }\n else {\n const e2x = Math.exp(2 * x);\n return (e2x - 1) / (e2x + 1);\n }\n}\nexport function sizeToSquarishShape(size) {\n const width = Math.ceil(Math.sqrt(size));\n return [width, Math.ceil(size / width)];\n}\n/**\n * Creates a new array with randomized indicies to a given quantity.\n *\n * ```js\n * const randomTen = tf.util.createShuffledIndices(10);\n * console.log(randomTen);\n * ```\n *\n * @param number Quantity of how many shuffled indicies to create.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function createShuffledIndices(n) {\n const shuffledIndices = new Uint32Array(n);\n for (let i = 0; i < n; ++i) {\n shuffledIndices[i] = i;\n }\n shuffle(shuffledIndices);\n return shuffledIndices;\n}\nexport function rightPad(a, size) {\n if (size <= a.length) {\n return a;\n }\n return a + ' '.repeat(size - a.length);\n}\nexport function repeatedTry(checkFn, delayFn = (counter) => 0, maxCounter) {\n return new Promise((resolve, reject) => {\n let tryCount = 0;\n const tryFn = () => {\n if (checkFn()) {\n resolve();\n return;\n }\n tryCount++;\n const nextBackoff = delayFn(tryCount);\n if (maxCounter != null && tryCount >= maxCounter) {\n reject();\n return;\n }\n setTimeout(tryFn, nextBackoff);\n };\n tryFn();\n });\n}\n/**\n * Given the full size of the array and a shape that may contain -1 as the\n * implicit dimension, returns the inferred shape where -1 is replaced.\n * E.g. For shape=[2, -1, 3] and size=24, it will return [2, 4, 3].\n *\n * @param shape The shape, which may contain -1 in some dimension.\n * @param size The full size (number of elements) of the array.\n * @return The inferred shape where -1 is replaced with the inferred size.\n */\nexport function inferFromImplicitShape(shape, size) {\n let shapeProd = 1;\n let implicitIdx = -1;\n for (let i = 0; i < shape.length; ++i) {\n if (shape[i] >= 0) {\n shapeProd *= shape[i];\n }\n else if (shape[i] === -1) {\n if (implicitIdx !== -1) {\n throw Error(`Shapes can only have 1 implicit size. ` +\n `Found -1 at dim ${implicitIdx} and dim ${i}`);\n }\n implicitIdx = i;\n }\n else if (shape[i] < 0) {\n throw Error(`Shapes can not be < 0. Found ${shape[i]} at dim ${i}`);\n }\n }\n if (implicitIdx === -1) {\n if (size > 0 && size !== shapeProd) {\n throw Error(`Size(${size}) must match the product of shape ${shape}`);\n }\n return shape;\n }\n if (shapeProd === 0) {\n throw Error(`Cannot infer the missing size in [${shape}] when ` +\n `there are 0 elements`);\n }\n if (size % shapeProd !== 0) {\n throw Error(`The implicit shape can't be a fractional number. ` +\n `Got ${size} / ${shapeProd}`);\n }\n const newShape = shape.slice();\n newShape[implicitIdx] = size / shapeProd;\n return newShape;\n}\nexport function parseAxisParam(axis, shape) {\n const rank = shape.length;\n // Normalize input\n axis = axis == null ? shape.map((s, i) => i) : [].concat(axis);\n // Check for valid range\n assert(axis.every(ax => ax >= -rank && ax < rank), () => `All values in axis param must be in range [-${rank}, ${rank}) but ` +\n `got axis ${axis}`);\n // Check for only integers\n assert(axis.every(ax => isInt(ax)), () => `All values in axis param must be integers but ` +\n `got axis ${axis}`);\n // Handle negative axis.\n return axis.map(a => a < 0 ? rank + a : a);\n}\n/** Reduces the shape by removing all dimensions of shape 1. */\nexport function squeezeShape(shape, axis) {\n const newShape = [];\n const keptDims = [];\n const isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0;\n const axes = (axis == null || isEmptyArray) ?\n null :\n parseAxisParam(axis, shape).sort();\n let j = 0;\n for (let i = 0; i < shape.length; ++i) {\n if (axes != null) {\n if (axes[j] === i && shape[i] !== 1) {\n throw new Error(`Can't squeeze axis ${i} since its dim '${shape[i]}' is not 1`);\n }\n if ((axes[j] == null || axes[j] > i) && shape[i] === 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n if (axes[j] <= i) {\n j++;\n }\n }\n if (shape[i] !== 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n }\n return { newShape, keptDims };\n}\nexport function getTypedArrayFromDType(dtype, size) {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(size);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values;\n}\nexport function getArrayFromDType(dtype, size) {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(size);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(size);\n }\n else if (dtype === 'string') {\n values = new Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values;\n}\nexport function checkConversionForErrors(vals, dtype) {\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i];\n if (isNaN(num) || !isFinite(num)) {\n throw Error(`A tensor of type ${dtype} being uploaded contains ${num}.`);\n }\n }\n}\n/** Returns true if the dtype is valid. */\nexport function isValidDtype(dtype) {\n return dtype === 'bool' || dtype === 'complex64' || dtype === 'float32' ||\n dtype === 'int32' || dtype === 'string';\n}\n/**\n * Returns true if the new type can't encode the old type without loss of\n * precision.\n */\nexport function hasEncodingLoss(oldType, newType) {\n if (newType === 'complex64') {\n return false;\n }\n if (newType === 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'int32' && oldType !== 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'bool' && oldType === 'bool') {\n return false;\n }\n return true;\n}\nexport function isTypedArray(a) {\n return a instanceof Float32Array || a instanceof Int32Array ||\n a instanceof Uint8Array;\n}\nexport function bytesPerElement(dtype) {\n if (dtype === 'float32' || dtype === 'int32') {\n return 4;\n }\n else if (dtype === 'complex64') {\n return 8;\n }\n else if (dtype === 'bool') {\n return 1;\n }\n else {\n throw new Error(`Unknown dtype ${dtype}`);\n }\n}\n/**\n * Returns the approximate number of bytes allocated in the string array - 2\n * bytes per character. Computing the exact bytes for a native string in JS is\n * not possible since it depends on the encoding of the html page that serves\n * the website.\n */\nexport function bytesFromStringArray(arr) {\n if (arr == null) {\n return 0;\n }\n let bytes = 0;\n arr.forEach(x => bytes += x.length);\n return bytes;\n}\n/** Returns true if the value is a string. */\nexport function isString(value) {\n return typeof value === 'string' || value instanceof String;\n}\nexport function isBoolean(value) {\n return typeof value === 'boolean';\n}\nexport function isNumber(value) {\n return typeof value === 'number';\n}\nexport function inferDtype(values) {\n if (Array.isArray(values)) {\n return inferDtype(values[0]);\n }\n if (values instanceof Float32Array) {\n return 'float32';\n }\n else if (values instanceof Int32Array || values instanceof Uint8Array) {\n return 'int32';\n }\n else if (isNumber(values)) {\n return 'float32';\n }\n else if (isString(values)) {\n return 'string';\n }\n else if (isBoolean(values)) {\n return 'bool';\n }\n return 'float32';\n}\nexport function isFunction(f) {\n return !!(f && f.constructor && f.call && f.apply);\n}\nexport function nearestDivisor(size, start) {\n for (let i = start; i < size; ++i) {\n if (size % i === 0) {\n return i;\n }\n }\n return size;\n}\nexport function computeStrides(shape) {\n const rank = shape.length;\n if (rank < 2) {\n return [];\n }\n // Last dimension has implicit stride of 1, thus having D-1 (instead of D)\n // strides.\n const strides = new Array(rank - 1);\n strides[rank - 2] = shape[rank - 1];\n for (let i = rank - 3; i >= 0; --i) {\n strides[i] = strides[i + 1] * shape[i + 1];\n }\n return strides;\n}\n/**\n * Create typed array for scalar value. Used for storing in `DataStorage`.\n */\nexport function createScalarValue(value, dtype) {\n if (dtype === 'string') {\n return encodeString(value);\n }\n return toTypedArray([value], dtype);\n}\nexport function toTypedArray(a, dtype) {\n if (dtype === 'string') {\n throw new Error('Cannot convert a string[] to a TypedArray');\n }\n if (Array.isArray(a)) {\n a = flatten(a);\n }\n if (env().getBool('DEBUG')) {\n checkConversionForErrors(a, dtype);\n }\n if (noConversionNeeded(a, dtype)) {\n return a;\n }\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(a);\n }\n else if (dtype === 'int32') {\n return new Int32Array(a);\n }\n else if (dtype === 'bool') {\n const bool = new Uint8Array(a.length);\n for (let i = 0; i < bool.length; ++i) {\n if (Math.round(a[i]) !== 0) {\n bool[i] = 1;\n }\n }\n return bool;\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\nfunction createNestedArray(offset, shape, a) {\n const ret = new Array();\n if (shape.length === 1) {\n const d = shape[0];\n for (let i = 0; i < d; i++) {\n ret[i] = a[offset + i];\n }\n }\n else {\n const d = shape[0];\n const rest = shape.slice(1);\n const len = rest.reduce((acc, c) => acc * c);\n for (let i = 0; i < d; i++) {\n ret[i] = createNestedArray(offset + i * len, rest, a);\n }\n }\n return ret;\n}\n// Provide a nested array of TypedArray in given shape.\nexport function toNestedArray(shape, a) {\n if (shape.length === 0) {\n // Scalar type should return a single number.\n return a[0];\n }\n const size = shape.reduce((acc, c) => acc * c);\n if (size === 0) {\n // A tensor with shape zero should be turned into empty list.\n return [];\n }\n if (size !== a.length) {\n throw new Error(`[${shape}] does not match the input size ${a.length}.`);\n }\n return createNestedArray(0, shape, a);\n}\nfunction noConversionNeeded(a, dtype) {\n return (a instanceof Float32Array && dtype === 'float32') ||\n (a instanceof Int32Array && dtype === 'int32') ||\n (a instanceof Uint8Array && dtype === 'bool');\n}\nexport function makeOnesTypedArray(size, dtype) {\n const array = makeZerosTypedArray(size, dtype);\n for (let i = 0; i < array.length; i++) {\n array[i] = 1;\n }\n return array;\n}\nexport function makeZerosTypedArray(size, dtype) {\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(size);\n }\n else if (dtype === 'int32') {\n return new Int32Array(size);\n }\n else if (dtype === 'bool') {\n return new Uint8Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n/**\n * Make nested `TypedArray` filled with zeros.\n * @param shape The shape information for the nested array.\n * @param dtype dtype of the array element.\n */\nexport function makeZerosNestedTypedArray(shape, dtype) {\n const size = shape.reduce((prev, curr) => prev * curr, 1);\n if (dtype == null || dtype === 'float32') {\n return toNestedArray(shape, new Float32Array(size));\n }\n else if (dtype === 'int32') {\n return toNestedArray(shape, new Int32Array(size));\n }\n else if (dtype === 'bool') {\n return toNestedArray(shape, new Uint8Array(size));\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n/**\n * Returns the current high-resolution time in milliseconds relative to an\n * arbitrary time in the past. It works across different platforms (node.js,\n * browsers).\n *\n * ```js\n * console.log(tf.util.now());\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function now() {\n return env().platform.now();\n}\nexport function assertNonNegativeIntegerDimensions(shape) {\n shape.forEach(dimSize => {\n assert(Number.isInteger(dimSize) && dimSize >= 0, () => `Tensor must have a shape comprised of positive integers but got ` +\n `shape [${shape}].`);\n });\n}\n/**\n * Returns a platform-specific implementation of\n * [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n *\n * If `fetch` is defined on the global object (`window`, `process`, etc.),\n * `tf.util.fetch` returns that function.\n *\n * If not, `tf.util.fetch` returns a platform-specific solution.\n *\n * ```js\n * const resource = await tf.util.fetch('https://unpkg.com/@tensorflow/tfjs');\n * // handle response\n * ```\n *\n * @doc {heading: 'Util'}\n */\nexport function fetch(path, requestInits) {\n return env().platform.fetch(path, requestInits);\n}\n/**\n * Encodes the provided string into bytes using the provided encoding scheme.\n *\n * @param s The string to encode.\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function encodeString(s, encoding = 'utf-8') {\n encoding = encoding || 'utf-8';\n return env().platform.encode(s, encoding);\n}\n/**\n * Decodes the provided bytes into a string using the provided encoding scheme.\n * @param bytes The bytes to decode.\n *\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function decodeString(bytes, encoding = 'utf-8') {\n encoding = encoding || 'utf-8';\n return env().platform.decode(bytes, encoding);\n}\n/**\n * Computes flat index for a given location (multidimentionsal index) in a\n * Tensor/multidimensional array.\n *\n * @param locs Location in the tensor.\n * @param rank Rank of the tensor.\n * @param strides Tensor strides.\n */\nexport function locToIndex(locs, rank, strides) {\n if (rank === 0) {\n return 0;\n }\n else if (rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += strides[i] * locs[i];\n }\n return index;\n}\n/**\n * Computes the location (multidimensional index) in a tensor/multidimentional\n * array for a given flat index.\n *\n * @param index Index in flat array.\n * @param rank Rank of tensor.\n * @param strides Strides of tensor.\n */\nexport function indexToLoc(index, rank, strides) {\n if (rank === 0) {\n return [];\n }\n else if (rank === 1) {\n return [index];\n }\n const locs = new Array(rank);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / strides[i]);\n index -= locs[i] * strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n}\n//# sourceMappingURL=util.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport * as util from './util';\nexport class Profiler {\n constructor(backendTimer, logger) {\n this.backendTimer = backendTimer;\n this.logger = logger;\n if (logger == null) {\n this.logger = new Logger();\n }\n }\n profileKernel(kernelName, inputs, f) {\n let outputs;\n const holdResultWrapperFn = () => {\n outputs = f();\n };\n const timer = this.backendTimer.time(holdResultWrapperFn);\n for (let i = 0; i < outputs.length; i++) {\n const output = outputs[i];\n // Dangling promise here because we don't want to propagate up\n // asynchronicity.\n output.data().then(tensorVals => {\n checkComputationForErrors(tensorVals, output.dtype, kernelName);\n });\n }\n const kernelProfile = {\n kernelName,\n outputs,\n inputs,\n timeMs: timer.then(timing => timing.kernelMs),\n extraInfo: timer.then(timing => timing.getExtraProfileInfo != null ?\n timing.getExtraProfileInfo() :\n '')\n };\n return kernelProfile;\n }\n logKernelProfile(kernelProfile) {\n const { kernelName, outputs, timeMs, inputs, extraInfo } = kernelProfile;\n outputs.forEach(result => {\n Promise.all([result.data(), timeMs, extraInfo]).then(valueContainer => {\n this.logger.logKernelProfile(kernelName, result, valueContainer[0], valueContainer[1], inputs, valueContainer[2]);\n });\n });\n }\n}\nexport function checkComputationForErrors(vals, dtype, kernelName) {\n if (dtype !== 'float32') {\n // Only floating point computations will generate NaN values\n return false;\n }\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i];\n if (isNaN(num) || !isFinite(num)) {\n // Throwing custom exception so behavior is testable.\n console.warn(`Found ${num} in the result of '${kernelName}'`);\n return true;\n }\n }\n return false;\n}\nexport class Logger {\n logKernelProfile(name, result, vals, timeMs, inputs, extraInfo) {\n const time = typeof timeMs === 'number' ? util.rightPad(`${timeMs}ms`, 9) :\n timeMs['error'];\n const paddedName = util.rightPad(name, 25);\n const rank = result.rank;\n const size = result.size;\n const shape = util.rightPad(result.shape.toString(), 14);\n let inputShapesDescription = '';\n for (const name in inputs) {\n const input = inputs[name];\n if (input != null) {\n // The input might be a non-tensor (e.g HTMLImageElement), in which case\n // we claim the output shape as input shape.\n const inputShape = input.shape || result.shape;\n const inputRank = inputShape.length;\n inputShapesDescription +=\n `${name}: ${inputRank}D ${inputRank > 0 ? inputShape : ''} `;\n }\n }\n console.log(`%c${paddedName}\\t%c${time}\\t%c${rank}D ${shape}\\t%c${size}\\t%c${inputShapesDescription}\\t%c${extraInfo}`, 'font-weight:bold', 'color:red', 'color:blue', 'color: orange', 'color: green', 'color: steelblue');\n }\n}\n//# sourceMappingURL=profiler.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport * as util from './util';\n/**\n * Computes a list of TapeNodes that connect x to y, filtering everything else\n * out and preserving the order of the original tape elements.\n *\n * @param tape The tape elements to filter.\n * @param xs The input Tensors.\n * @param y The output Tensor.\n */\nexport function getFilteredNodesXToY(tape, xs, y) {\n // Forward pass to compute all the nodes and Tensors that are transitively a\n // function of x.\n const tensorsFromX = {};\n const nodesFromX = {};\n for (let i = 0; i < xs.length; i++) {\n tensorsFromX[xs[i].id] = true;\n }\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n for (const inputName in nodeInputs) {\n const input = nodeInputs[inputName];\n let anyInputFromX = false;\n for (let j = 0; j < xs.length; j++) {\n if (tensorsFromX[input.id]) {\n node.outputs.forEach(output => tensorsFromX[output.id] = true);\n anyInputFromX = true;\n nodesFromX[node.id] = true;\n break;\n }\n }\n if (anyInputFromX) {\n break;\n }\n }\n }\n // Backward pass to find all of the nodes and Tensors that lead to y.\n const tensorsLeadToY = {};\n tensorsLeadToY[y.id] = true;\n const nodesToY = {};\n for (let i = tape.length - 1; i >= 0; i--) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n // If any of the outputs lead to y, mark all of the inputs as leading to y.\n for (let j = 0; j < node.outputs.length; j++) {\n if (tensorsLeadToY[node.outputs[j].id]) {\n for (const inputName in nodeInputs) {\n tensorsLeadToY[nodeInputs[inputName].id] = true;\n nodesToY[node.id] = true;\n }\n break;\n }\n }\n }\n // Return the paths that come from x and lead to y.\n const filteredTape = [];\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n if (nodesFromX[node.id] && nodesToY[node.id]) {\n // Prune the inputs from the node that aren't a function of x.\n const prunedInputs = {};\n for (const inputName in node.inputs) {\n const nodeInput = node.inputs[inputName];\n if (tensorsFromX[nodeInput.id]) {\n prunedInputs[inputName] = nodeInput;\n }\n }\n // Copy the node and overwrite inputsAndArgs to the pruned version.\n const prunedNode = Object.assign({}, node);\n prunedNode.inputs = prunedInputs;\n prunedNode.outputs = node.outputs;\n filteredTape.push(prunedNode);\n }\n }\n return filteredTape;\n}\n/**\n * Backpropagate gradients through the filtered TapeNodes.\n *\n * @param tensorAccumulatedGradientMap A map of Tensor to its gradient. This map\n * is mutated by this method.\n * @param filteredTape The filtered TapeNodes to backprop through.\n */\nexport function backpropagateGradients(tensorAccumulatedGradientMap, filteredTape, tidy, add) {\n // Walk the tape backward and keep a map of Tensor to its gradient.\n for (let i = filteredTape.length - 1; i >= 0; i--) {\n const node = filteredTape[i];\n const dys = [];\n node.outputs.forEach(o => {\n const gradTensor = tensorAccumulatedGradientMap[o.id];\n if (gradTensor != null) {\n dys.push(gradTensor);\n }\n else {\n // This particular output is not in the back-propagation subgraph, so it\n // does not affect the final output, thus we put null for its dy.\n dys.push(null);\n }\n });\n if (node.gradient == null) {\n throw new Error(`Cannot compute gradient: gradient function not found ` +\n `for ${node.kernelName}.`);\n }\n // Backprop dy through this node and accumulate gradients over the inputs.\n const inputGradients = node.gradient(dys);\n for (const inputName in node.inputs) {\n if (!(inputName in inputGradients)) {\n throw new Error(`Cannot backprop through input ${inputName}. ` +\n `Available gradients found: ${Object.keys(inputGradients)}.`);\n }\n // Call the gradient function.\n const dx = tidy(() => inputGradients[inputName]());\n if (dx.dtype !== 'float32') {\n throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` +\n `${inputName} must have 'float32' dtype, but has '${dx.dtype}'`);\n }\n const x = node.inputs[inputName];\n if (!util.arraysEqual(dx.shape, x.shape)) {\n throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` +\n `'${inputName}' has shape '${dx.shape}', which does not match ` +\n `the shape of the input '${x.shape}'`);\n }\n if (tensorAccumulatedGradientMap[x.id] == null) {\n tensorAccumulatedGradientMap[x.id] = dx;\n }\n else {\n const curGradient = tensorAccumulatedGradientMap[x.id];\n tensorAccumulatedGradientMap[x.id] = add(curGradient, dx);\n curGradient.dispose();\n }\n }\n }\n}\n//# sourceMappingURL=tape.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { computeStrides, isString, rightPad, sizeFromShape } from './util';\n// Maximum number of values before we decide to show ellipsis.\nconst FORMAT_LIMIT_NUM_VALS = 20;\n// Number of first and last values to show when displaying a, b,...,y, z.\nconst FORMAT_NUM_FIRST_LAST_VALS = 3;\n// Number of significant digits to show.\nconst FORMAT_NUM_SIG_DIGITS = 7;\nexport function tensorToString(vals, shape, dtype, verbose) {\n const strides = computeStrides(shape);\n const padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides);\n const rank = shape.length;\n const valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol);\n const lines = ['Tensor'];\n if (verbose) {\n lines.push(` dtype: ${dtype}`);\n lines.push(` rank: ${rank}`);\n lines.push(` shape: [${shape}]`);\n lines.push(` values:`);\n }\n lines.push(valsLines.map(l => ' ' + l).join('\\n'));\n return lines.join('\\n');\n}\nfunction computeMaxSizePerColumn(vals, shape, dtype, strides) {\n const n = sizeFromShape(shape);\n const numCols = strides[strides.length - 1];\n const padPerCol = new Array(numCols).fill(0);\n const rank = shape.length;\n const valuesOrTuples = dtype === 'complex64' ? createComplexTuples(vals) : vals;\n if (rank > 1) {\n for (let row = 0; row < n / numCols; row++) {\n const offset = row * numCols;\n for (let j = 0; j < numCols; j++) {\n padPerCol[j] = Math.max(padPerCol[j], valToString(valuesOrTuples[offset + j], 0, dtype).length);\n }\n }\n }\n return padPerCol;\n}\nfunction valToString(val, pad, dtype) {\n let valStr;\n if (Array.isArray(val)) {\n valStr = `${parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS))} + ` +\n `${parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS))}j`;\n }\n else if (isString(val)) {\n valStr = `'${val}'`;\n }\n else if (dtype === 'bool') {\n valStr = boolNumToString(val);\n }\n else {\n valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString();\n }\n return rightPad(valStr, pad);\n}\nfunction boolNumToString(v) {\n return v === 0 ? 'false' : 'true';\n}\nfunction subTensorToString(vals, shape, dtype, strides, padPerCol, isLast = true) {\n const storagePerElement = dtype === 'complex64' ? 2 : 1;\n const size = shape[0];\n const rank = shape.length;\n if (rank === 0) {\n if (dtype === 'complex64') {\n const complexTuple = createComplexTuples(vals);\n return [valToString(complexTuple[0], 0, dtype)];\n }\n if (dtype === 'bool') {\n return [boolNumToString(vals[0])];\n }\n return [vals[0].toString()];\n }\n if (rank === 1) {\n if (size > FORMAT_LIMIT_NUM_VALS) {\n const firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement;\n let firstVals = Array.from(vals.slice(0, firstValsSize));\n let lastVals = Array.from(vals.slice((size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement, size * storagePerElement));\n if (dtype === 'complex64') {\n firstVals = createComplexTuples(firstVals);\n lastVals = createComplexTuples(lastVals);\n }\n return [\n '[' +\n firstVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ', ..., ' +\n lastVals\n .map((x, i) => valToString(x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype))\n .join(', ') +\n ']'\n ];\n }\n const displayVals = dtype === 'complex64' ? createComplexTuples(vals) :\n Array.from(vals);\n return [\n '[' +\n displayVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ']'\n ];\n }\n // The array is rank 2 or more.\n const subshape = shape.slice(1);\n const substrides = strides.slice(1);\n const stride = strides[0] * storagePerElement;\n const lines = [];\n if (size > FORMAT_LIMIT_NUM_VALS) {\n for (let i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, false /* isLast */));\n }\n lines.push('...');\n for (let i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */));\n }\n }\n else {\n for (let i = 0; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */));\n }\n }\n const sep = rank === 2 ? ',' : '';\n lines[0] = '[' + lines[0] + sep;\n for (let i = 1; i < lines.length - 1; i++) {\n lines[i] = ' ' + lines[i] + sep;\n }\n let newLineSep = ',\\n';\n for (let i = 2; i < rank; i++) {\n newLineSep += '\\n';\n }\n lines[lines.length - 1] =\n ' ' + lines[lines.length - 1] + ']' + (isLast ? '' : newLineSep);\n return lines;\n}\nfunction createComplexTuples(vals) {\n const complexTuples = [];\n for (let i = 0; i < vals.length; i += 2) {\n complexTuples.push([vals[i], vals[i + 1]]);\n }\n return complexTuples;\n}\n//# sourceMappingURL=tensor_format.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { tensorToString } from './tensor_format';\nimport * as util from './util';\nimport { computeStrides, toNestedArray } from './util';\n/**\n * A mutable object, similar to `tf.Tensor`, that allows users to set values\n * at locations before converting to an immutable `tf.Tensor`.\n *\n * See `tf.buffer` for creating a tensor buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class TensorBuffer {\n constructor(shape, dtype, values) {\n this.dtype = dtype;\n this.shape = shape.slice();\n this.size = util.sizeFromShape(shape);\n if (values != null) {\n const n = values.length;\n util.assert(n === this.size, () => `Length of values '${n}' does not match the size ` +\n `inferred by the shape '${this.size}'.`);\n }\n if (dtype === 'complex64') {\n throw new Error(`complex64 dtype TensorBuffers are not supported. Please create ` +\n `a TensorBuffer for the real and imaginary parts separately and ` +\n `call tf.complex(real, imag).`);\n }\n this.values = values || util.getArrayFromDType(dtype, this.size);\n this.strides = computeStrides(shape);\n }\n /**\n * Sets a value in the buffer at a given location.\n *\n * @param value The value to set.\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n set(value, ...locs) {\n if (locs.length === 0) {\n locs = [0];\n }\n util.assert(locs.length === this.rank, () => `The number of provided coordinates (${locs.length}) must ` +\n `match the rank (${this.rank})`);\n const index = this.locToIndex(locs);\n this.values[index] = value;\n }\n /**\n * Returns the value in the buffer at the provided location.\n *\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n get(...locs) {\n if (locs.length === 0) {\n locs = [0];\n }\n let i = 0;\n for (const loc of locs) {\n if (loc < 0 || loc >= this.shape[i]) {\n const msg = `Requested out of range element at ${locs}. ` +\n ` Buffer shape=${this.shape}`;\n throw new Error(msg);\n }\n i++;\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return this.values[index];\n }\n locToIndex(locs) {\n if (this.rank === 0) {\n return 0;\n }\n else if (this.rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return index;\n }\n indexToLoc(index) {\n if (this.rank === 0) {\n return [];\n }\n else if (this.rank === 1) {\n return [index];\n }\n const locs = new Array(this.shape.length);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / this.strides[i]);\n index -= locs[i] * this.strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n }\n get rank() {\n return this.shape.length;\n }\n /**\n * Creates an immutable `tf.Tensor` object from the buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n toTensor() {\n return trackerFn().makeTensor(this.values, this.shape, this.dtype);\n }\n}\n// For tracking tensor creation and disposal.\nlet trackerFn = null;\n// Used by chaining methods to call into ops.\nlet opHandler = null;\n// Used to warn about deprecated methods.\nlet deprecationWarningFn = null;\n// This here so that we can use this method on dev branches and keep the\n// functionality at master.\n// tslint:disable-next-line:no-unused-expression\n[deprecationWarningFn];\n/**\n * An external consumer can register itself as the tensor tracker. This way\n * the Tensor class can notify the tracker for every tensor created and\n * disposed.\n */\nexport function setTensorTracker(fn) {\n trackerFn = fn;\n}\n/**\n * An external consumer can register itself as the op handler. This way the\n * Tensor class can have chaining methods that call into ops via the op\n * handler.\n */\nexport function setOpHandler(handler) {\n opHandler = handler;\n}\n/**\n * Sets the deprecation warning function to be used by this file. This way the\n * Tensor class can be a leaf but still use the environment.\n */\nexport function setDeprecationWarningFn(fn) {\n deprecationWarningFn = fn;\n}\n/**\n * A `tf.Tensor` object represents an immutable, multidimensional array of\n * numbers that has a shape and a data type.\n *\n * See `tf.tensor` for details on how to create a `tf.Tensor`.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Tensor {\n constructor(shape, dtype, dataId, id) {\n /** Whether this tensor has been globally kept. */\n this.kept = false;\n this.isDisposedInternal = false;\n this.shape = shape.slice();\n this.dtype = dtype || 'float32';\n this.size = util.sizeFromShape(shape);\n this.strides = computeStrides(shape);\n this.dataId = dataId;\n this.id = id;\n this.rankType = (this.rank < 5 ? this.rank.toString() : 'higher');\n }\n get rank() {\n return this.shape.length;\n }\n /**\n * Returns a promise of `tf.TensorBuffer` that holds the underlying data.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async buffer() {\n const vals = await this.data();\n return opHandler.buffer(this.shape, this.dtype, vals);\n }\n /**\n * Returns a `tf.TensorBuffer` that holds the underlying data.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n bufferSync() {\n return opHandler.buffer(this.shape, this.dtype, this.dataSync());\n }\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * asynchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async array() {\n const vals = await this.data();\n return toNestedArray(this.shape, vals);\n }\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * synchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n arraySync() {\n return toNestedArray(this.shape, this.dataSync());\n }\n /**\n * Asynchronously downloads the values from the `tf.Tensor`. Returns a\n * promise of `TypedArray` that resolves when the computation has finished.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async data() {\n this.throwIfDisposed();\n const data = trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n const bytes = await data;\n try {\n return bytes.map(b => util.decodeString(b));\n }\n catch (_a) {\n throw new Error('Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data;\n }\n /**\n * Synchronously downloads the values from the `tf.Tensor`. This blocks the\n * UI thread until the values are ready, which can cause performance issues.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dataSync() {\n this.throwIfDisposed();\n const data = trackerFn().readSync(this.dataId);\n if (this.dtype === 'string') {\n try {\n return data.map(b => util.decodeString(b));\n }\n catch (_a) {\n throw new Error('Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data;\n }\n /** Returns the underlying bytes of the tensor's data. */\n async bytes() {\n this.throwIfDisposed();\n const data = await trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n return data;\n }\n else {\n return new Uint8Array(data.buffer);\n }\n }\n /**\n * Disposes `tf.Tensor` from memory.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dispose() {\n if (this.isDisposed) {\n return;\n }\n trackerFn().disposeTensor(this);\n this.isDisposedInternal = true;\n }\n get isDisposed() {\n return this.isDisposedInternal;\n }\n throwIfDisposed() {\n if (this.isDisposed) {\n throw new Error(`Tensor is disposed.`);\n }\n }\n /**\n * Prints the `tf.Tensor`. See `tf.print` for details.\n *\n * @param verbose Whether to print verbose information about the tensor,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n print(verbose = false) {\n return opHandler.print(this, verbose);\n }\n /**\n * Returns a copy of the tensor. See `tf.clone` for details.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n clone() {\n this.throwIfDisposed();\n return opHandler.clone(this);\n }\n /**\n * Returns a human-readable description of the tensor. Useful for logging.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n toString(verbose = false) {\n const vals = this.dataSync();\n return tensorToString(vals, this.shape, this.dtype, verbose);\n }\n cast(dtype) {\n this.throwIfDisposed();\n return opHandler.cast(this, dtype);\n }\n variable(trainable = true, name, dtype) {\n this.throwIfDisposed();\n return trackerFn().makeVariable(this, trainable, name, dtype);\n }\n}\nObject.defineProperty(Tensor, Symbol.hasInstance, {\n value: (instance) => {\n // Implementation note: we should use properties of the object that will be\n // defined before the constructor body has finished executing (methods).\n // This is because when this code is transpiled by babel, babel will call\n // classCallCheck before the constructor body is run.\n // See https://github.com/tensorflow/tfjs/issues/3384 for backstory.\n return !!instance && instance.data != null && instance.dataSync != null &&\n instance.throwIfDisposed != null;\n }\n});\n/**\n * A mutable `tf.Tensor`, useful for persisting state, e.g. for training.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Variable extends Tensor {\n constructor(initialValue, trainable, name, tensorId) {\n super(initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId);\n this.trainable = trainable;\n this.name = name;\n }\n /**\n * Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have\n * the same shape and dtype as the old `tf.Tensor`.\n *\n * @param newValue New tensor to be assigned to this variable.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n assign(newValue) {\n if (newValue.dtype !== this.dtype) {\n throw new Error(`dtype of the new value (${newValue.dtype}) and ` +\n `previous value (${this.dtype}) must match`);\n }\n if (!util.arraysEqual(newValue.shape, this.shape)) {\n throw new Error(`shape of the new value (${newValue.shape}) and ` +\n `previous value (${this.shape}) must match`);\n }\n trackerFn().disposeTensor(this);\n this.dataId = newValue.dataId;\n trackerFn().incRef(this, null /* backend */);\n }\n dispose() {\n trackerFn().disposeVariable(this);\n this.isDisposedInternal = true;\n }\n}\nObject.defineProperty(Variable, Symbol.hasInstance, {\n value: (instance) => {\n return instance instanceof Tensor && instance.assign != null &&\n instance.assign instanceof Function;\n }\n});\n//# sourceMappingURL=tensor.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport var Rank;\n(function (Rank) {\n Rank[\"R0\"] = \"R0\";\n Rank[\"R1\"] = \"R1\";\n Rank[\"R2\"] = \"R2\";\n Rank[\"R3\"] = \"R3\";\n Rank[\"R4\"] = \"R4\";\n Rank[\"R5\"] = \"R5\";\n Rank[\"R6\"] = \"R6\";\n})(Rank || (Rank = {}));\n// Looks for upcasting types. Used, for example, in operations with mixed dtype\n// inputs.\nvar UpcastInt32AndMap;\n(function (UpcastInt32AndMap) {\n UpcastInt32AndMap[\"float32\"] = \"float32\";\n UpcastInt32AndMap[\"int32\"] = \"int32\";\n UpcastInt32AndMap[\"bool\"] = \"int32\";\n UpcastInt32AndMap[\"complex64\"] = \"complex64\";\n})(UpcastInt32AndMap || (UpcastInt32AndMap = {}));\nvar UpcastBoolAndMap;\n(function (UpcastBoolAndMap) {\n UpcastBoolAndMap[\"float32\"] = \"float32\";\n UpcastBoolAndMap[\"int32\"] = \"int32\";\n UpcastBoolAndMap[\"bool\"] = \"bool\";\n UpcastBoolAndMap[\"complex64\"] = \"complex64\";\n})(UpcastBoolAndMap || (UpcastBoolAndMap = {}));\nvar UpcastFloat32AndMap;\n(function (UpcastFloat32AndMap) {\n UpcastFloat32AndMap[\"float32\"] = \"float32\";\n UpcastFloat32AndMap[\"int32\"] = \"float32\";\n UpcastFloat32AndMap[\"bool\"] = \"float32\";\n UpcastFloat32AndMap[\"complex64\"] = \"complex64\";\n})(UpcastFloat32AndMap || (UpcastFloat32AndMap = {}));\nvar UpcastComplex64AndMap;\n(function (UpcastComplex64AndMap) {\n UpcastComplex64AndMap[\"float32\"] = \"complex64\";\n UpcastComplex64AndMap[\"int32\"] = \"complex64\";\n UpcastComplex64AndMap[\"bool\"] = \"complex64\";\n UpcastComplex64AndMap[\"complex64\"] = \"complex64\";\n})(UpcastComplex64AndMap || (UpcastComplex64AndMap = {}));\nconst upcastTypeMap = {\n 'float32': UpcastFloat32AndMap,\n 'int32': UpcastInt32AndMap,\n 'bool': UpcastBoolAndMap,\n 'complex64': UpcastComplex64AndMap\n};\nexport function upcastType(typeA, typeB) {\n if (typeA === 'string' || typeB === 'string') {\n if (typeA === 'string' && typeB === 'string') {\n return 'string';\n }\n throw new Error(`Can not upcast ${typeA} with ${typeB}`);\n }\n return upcastTypeMap[typeA][typeB];\n}\n/** Returns the output type after summation. */\nexport function sumOutType(type) {\n return upcastType(type, 'int32');\n}\n//# sourceMappingURL=types.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { Tensor } from './tensor';\nimport { upcastType } from './types';\nimport { assert } from './util';\nexport function makeTypesMatch(a, b) {\n if (a.dtype === b.dtype) {\n return [a, b];\n }\n const dtype = upcastType(a.dtype, b.dtype);\n return [a.cast(dtype), b.cast(dtype)];\n}\nexport function assertTypesMatch(a, b) {\n assert(a.dtype === b.dtype, () => `The dtypes of the first(${a.dtype}) and` +\n ` second(${b.dtype}) input must match`);\n}\nexport function isTensorInList(tensor, tensorList) {\n return tensorList.some(x => x.id === tensor.id);\n}\n/**\n * Extracts any `Tensor`s found within the provided object.\n *\n * @param container an object that may be a `Tensor` or may directly contain\n * `Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. In general it\n * is safe to pass any object here, except that `Promise`s are not\n * supported.\n * @returns An array of `Tensors` found within the passed object. If the\n * argument is simply a `Tensor', a list containing that `Tensor` is\n * returned. If the object is not a `Tensor` or does not\n * contain `Tensors`, an empty list is returned.\n */\nexport function getTensorsInContainer(result) {\n const list = [];\n const seen = new Set();\n walkTensorContainer(result, list, seen);\n return list;\n}\nfunction walkTensorContainer(container, list, seen) {\n if (container == null) {\n return;\n }\n if (container instanceof Tensor) {\n list.push(container);\n return;\n }\n if (!isIterable(container)) {\n return;\n }\n // Iteration over keys works also for arrays.\n const iterable = container;\n for (const k in iterable) {\n const val = iterable[k];\n if (!seen.has(val)) {\n seen.add(val);\n walkTensorContainer(val, list, seen);\n }\n }\n}\n// tslint:disable-next-line:no-any\nfunction isIterable(obj) {\n return Array.isArray(obj) || typeof obj === 'object';\n}\n//# sourceMappingURL=tensor_util.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { KernelBackend } from './backends/backend';\nimport { Environment, setEnvironmentGlobal } from './environment';\nimport { getGlobalNamespace } from './global_util';\nimport { Add, Cast } from './kernel_names';\nimport { getGradient, getKernel, getKernelsForBackend } from './kernel_registry';\nimport { Profiler } from './profiler';\nimport { backpropagateGradients, getFilteredNodesXToY } from './tape';\nimport { setTensorTracker, Tensor, Variable } from './tensor';\nimport { getTensorsInContainer } from './tensor_util';\nimport * as util from './util';\nimport { bytesFromStringArray, makeOnesTypedArray, now, sizeFromShape } from './util';\nclass EngineState {\n constructor() {\n // Public since optimizers will use it.\n this.registeredVariables = {};\n this.nextTapeNodeId = 0;\n this.numBytes = 0;\n this.numTensors = 0;\n this.numStringTensors = 0;\n this.numDataBuffers = 0;\n // Number of nested tf.grad() statements when computing higher-order\n // gradients. E.g. `1` for first-order gradients and `2` for second-order\n // gradients. Used to track if the tape should be removed after a backprop.\n this.gradientDepth = 0;\n // Number of nested kernel calls. When kernel depth is greater than 1, we turn\n // off the tape.\n this.kernelDepth = 0;\n this.scopeStack = [];\n /**\n * Keeps track of the number of data moves during a kernel execution. We\n * maintain a stack since kernels can call other kernels, recursively.\n */\n this.numDataMovesStack = [];\n this.nextScopeId = 0;\n this.tensorInfo = new WeakMap();\n this.profiling = false;\n this.activeProfile = { newBytes: 0, newTensors: 0, peakBytes: 0, kernels: [], result: null };\n }\n dispose() {\n for (const variableName in this.registeredVariables) {\n this.registeredVariables[variableName].dispose();\n }\n }\n}\nexport class Engine {\n constructor(ENV) {\n this.ENV = ENV;\n this.registry = {};\n this.registryFactory = {};\n this.pendingBackendInitId = 0;\n this.state = new EngineState();\n }\n async ready() {\n if (this.pendingBackendInit != null) {\n return this.pendingBackendInit.then(() => { });\n }\n if (this.backendInstance != null) {\n return;\n }\n const sortedBackends = this.getSortedBackends();\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const success = await this.initializeBackend(backendName).success;\n if (success) {\n await this.setBackend(backendName);\n return;\n }\n }\n throw new Error(`Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n get backend() {\n if (this.pendingBackendInit != null) {\n throw new Error(`Backend '${this.backendName}' has not yet been initialized. Make ` +\n `sure to await tf.ready() or await tf.setBackend() before calling ` +\n `other methods`);\n }\n if (this.backendInstance == null) {\n const { name, asyncInit } = this.initializeBackendsAndReturnBest();\n if (asyncInit) {\n throw new Error(`The highest priority backend '${name}' has not yet been ` +\n `initialized. Make sure to await tf.ready() or ` +\n `await tf.setBackend() before calling other methods`);\n }\n this.setBackend(name);\n }\n return this.backendInstance;\n }\n backendNames() {\n return Object.keys(this.registryFactory);\n }\n findBackend(backendName) {\n if (!(backendName in this.registry)) {\n // If the backend hasn't been initialized but we have a registry entry for\n // it, initialize it and return it.\n if (backendName in this.registryFactory) {\n const { asyncInit } = this.initializeBackend(backendName);\n if (asyncInit) {\n // Backend is not ready yet.\n return null;\n }\n }\n else {\n return null;\n }\n }\n return this.registry[backendName];\n }\n findBackendFactory(backendName) {\n if (!(backendName in this.registryFactory)) {\n return null;\n }\n return this.registryFactory[backendName].factory;\n }\n registerBackend(backendName, factory, priority = 1) {\n if (backendName in this.registryFactory) {\n console.warn(`${backendName} backend was already registered. ` +\n `Reusing existing backend factory.`);\n return false;\n }\n this.registryFactory[backendName] = { factory, priority };\n return true;\n }\n async setBackend(backendName) {\n if (this.registryFactory[backendName] == null) {\n throw new Error(`Backend name '${backendName}' not found in registry`);\n }\n this.backendName = backendName;\n if (this.registry[backendName] == null) {\n this.backendInstance = null;\n const { success, asyncInit } = this.initializeBackend(backendName);\n const result = asyncInit ? await success : success;\n if (!result) {\n return false;\n }\n }\n this.backendInstance = this.registry[backendName];\n this.setupRegisteredKernels();\n // Reset the profiler.\n this.profiler = new Profiler(this.backendInstance);\n return true;\n }\n setupRegisteredKernels() {\n const kernels = getKernelsForBackend(this.backendName);\n kernels.forEach(kernel => {\n if (kernel.setupFunc != null) {\n kernel.setupFunc(this.backendInstance);\n }\n });\n }\n disposeRegisteredKernels(backendName) {\n const kernels = getKernelsForBackend(backendName);\n kernels.forEach(kernel => {\n if (kernel.disposeFunc != null) {\n kernel.disposeFunc(this.registry[backendName]);\n }\n });\n }\n /**\n * Initializes a backend by looking up the backend name in the factory\n * registry and calling the factory method. Returns a boolean representing\n * whether the initialization of the backend suceeded. Throws an error if\n * there is no backend in the factory registry.\n */\n initializeBackend(backendName) {\n const registryFactoryEntry = this.registryFactory[backendName];\n if (registryFactoryEntry == null) {\n throw new Error(`Cannot initialize backend ${backendName}, no registration found.`);\n }\n try {\n const backend = registryFactoryEntry.factory();\n /* Test if the factory returns a promise.\n Done in a more liberal way than\n previous 'Promise.resolve(backend)===backend'\n as we needed to account for custom Promise\n implementations (e.g. Angular) */\n if (backend && !(backend instanceof KernelBackend)\n && typeof backend.then === 'function') {\n const promiseId = ++this.pendingBackendInitId;\n const success = backend\n .then(backendInstance => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.registry[backendName] = backendInstance;\n this.pendingBackendInit = null;\n return true;\n })\n .catch(err => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.pendingBackendInit = null;\n console.warn(`Initialization of backend ${backendName} failed`);\n console.warn(err.stack || err.message);\n return false;\n });\n this.pendingBackendInit = success;\n return { success, asyncInit: true };\n }\n else {\n this.registry[backendName] = backend;\n return { success: true, asyncInit: false };\n }\n }\n catch (err) {\n console.warn(`Initialization of backend ${backendName} failed`);\n console.warn(err.stack || err.message);\n return { success: false, asyncInit: false };\n }\n }\n removeBackend(backendName) {\n if (!(backendName in this.registryFactory)) {\n throw new Error(`${backendName} backend not found in registry`);\n }\n if (this.backendName === backendName && this.pendingBackendInit != null) {\n // There is a pending promise of the backend we want to remove. Make it\n // obsolete.\n this.pendingBackendInitId++;\n }\n if (backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n delete this.registryFactory[backendName];\n // Unset the backend if it is active.\n if (this.backendName === backendName) {\n this.pendingBackendInit = null;\n this.backendName = null;\n this.backendInstance = null;\n }\n }\n getSortedBackends() {\n if (Object.keys(this.registryFactory).length === 0) {\n throw new Error('No backend found in registry.');\n }\n return Object.keys(this.registryFactory).sort((a, b) => {\n // Highest priority comes first.\n return this.registryFactory[b].priority -\n this.registryFactory[a].priority;\n });\n }\n initializeBackendsAndReturnBest() {\n const sortedBackends = this.getSortedBackends();\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const { success, asyncInit } = this.initializeBackend(backendName);\n if (asyncInit || success) {\n return { name: backendName, asyncInit };\n }\n }\n throw new Error(`Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n moveData(backend, dataId) {\n const info = this.state.tensorInfo.get(dataId);\n const srcBackend = info.backend;\n const values = this.readSync(dataId);\n // Delete the tensor from the old backend and move it to the new\n // backend.\n srcBackend.disposeData(dataId);\n info.backend = backend;\n backend.move(dataId, values, info.shape, info.dtype);\n if (this.shouldCheckForMemLeaks()) {\n // Track the number of moves during a kernel execution to correctly\n // detect memory leaks.\n this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++;\n }\n }\n tidy(nameOrFn, fn) {\n let name = null;\n if (fn == null) {\n // Called with only 1 argument.\n if (typeof nameOrFn !== 'function') {\n throw new Error('Please provide a function to tidy()');\n }\n fn = nameOrFn;\n }\n else {\n // Called with 2 arguments.\n if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) {\n throw new Error('When calling with two arguments, the first argument ' +\n 'to tidy() must be a string');\n }\n if (typeof fn !== 'function') {\n throw new Error('When calling with two arguments, the 2nd argument ' +\n 'to tidy() must be a function');\n }\n name = nameOrFn;\n // TODO(nsthorat,smilkov): Do operation logging and performance\n // profiling.\n }\n let result;\n return this.scopedRun(() => this.startScope(name), () => this.endScope(result), () => {\n result = fn();\n if (result instanceof Promise) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n return result;\n });\n }\n scopedRun(start, end, f) {\n start();\n try {\n const res = f();\n end();\n return res;\n }\n catch (ex) {\n end();\n throw ex;\n }\n }\n nextTensorId() {\n return Engine.nextTensorId++;\n }\n nextVariableId() {\n return Engine.nextVariableId++;\n }\n /**\n * This method is called instead of the public-facing tensor.clone() when\n * saving a tensor for backwards pass. It makes sure to add the clone\n * operation to the tape regardless of being called inside a kernel\n * execution.\n *\n * This method will go away once all kernels are modularized since we won't\n * need to turn off the tape inside runKernel().\n */\n clone(x) {\n const y = this.makeTensorFromDataId(x.dataId, x.shape, x.dtype);\n const inputs = { x };\n const grad = (dy) => ({\n x: () => {\n const dtype = 'float32';\n const gradInputs = { x: dy };\n const attrs = { dtype };\n return ENGINE.runKernelFunc(backend => backend.cast(dy, dtype), gradInputs, null /* grad */, Cast, attrs);\n }\n });\n const saved = [];\n this.addTapeNode(this.state.activeScope.name, inputs, [y], grad, saved, {});\n return y;\n }\n /**\n * Execute a kernel with the given name and return the output tensor.\n *\n * @param kernelName The name of the kernel to execute.\n * @param inputs A map of input names to tensors.\n * @param attrs A map of attribute names to their values. An attribute is a\n * primitive (non-tensor) input to the kernel.\n * @param inputsToSave A list of tensors, inputs to save for the backprop\n * computation.\n * @param outputsToSave A list of booleans, specifying which output to save\n * for the backprop computation. These are booleans since the output\n * tensors are not visible to the user.\n */\n runKernel(kernelName, inputs, attrs, inputsToSave, outputsToSave) {\n const forwardFunc = null;\n const backwardsFunc = null;\n // Call runKernel as a stop-gap until we modularize all kernels.\n // Once we modularize all kernels, we will remove the existing\n // `runKernelFunc`.\n return this.runKernelFunc(forwardFunc, inputs, backwardsFunc, kernelName, attrs, inputsToSave, outputsToSave);\n }\n shouldCheckForMemLeaks() {\n return this.ENV.getBool('IS_TEST');\n }\n checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos) {\n const numDataIdsAfter = this.backend.numDataIds();\n // Count the number of data ids associated with the result of the kernel.\n let numOutputDataIds = 0;\n outInfos.forEach(info => {\n // Complex numbers allocate 3 data ids, one for 'real', one for\n // 'imaginary', and one for the container that holds the former two.\n numOutputDataIds += (info.dtype === 'complex64' ? 3 : 1);\n });\n // Account for the number of moves during kernel execution. A \"data move\"\n // can happen in the middle of a kernel execution, placing a new (key,value)\n // pair in the data storage. Since data moves have net zero effect (we\n // always remove the data from the old backend), we have to cancel them out\n // when detecting memory leaks.\n const numMoves = this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1];\n const dataIdsLeaked = numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves;\n if (dataIdsLeaked > 0) {\n throw new Error(`Backend '${this.backendName}' has an internal memory leak ` +\n `(${dataIdsLeaked} data ids) after running '${kernelName}'`);\n }\n }\n /**\n * @deprecated Use `runKernel` for newly added kernels. Keep using this method\n * only for kernels that are not yet fully modularized.\n */\n runKernelFunc(forwardFunc, inputs, backwardsFunc, kernelName, attrs, inputsToSave, outputsToSave) {\n let outputs;\n let saved = [];\n const isTapeOn = this.isTapeOn();\n if (kernelName == null) {\n kernelName =\n this.state.activeScope != null ? this.state.activeScope.name : '';\n }\n const startingBytecount = this.state.numBytes;\n const startingNumTensors = this.state.numTensors;\n if (this.shouldCheckForMemLeaks()) {\n this.state.numDataMovesStack.push(0);\n }\n let kernelFunc;\n const kernel = getKernel(kernelName, this.backendName);\n let out;\n if (kernel != null) {\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = kernel.kernelFunc({ inputs, attrs, backend: this.backend });\n const outInfos = Array.isArray(out) ? out : [out];\n if (this.shouldCheckForMemLeaks()) {\n this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos);\n }\n const outTensors = outInfos.map(({ dataId, shape, dtype }) => this.makeTensorFromDataId(dataId, shape, dtype));\n // Save the inputs and outputs.\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since we would never run backprop, which disposes\n // the kept tensors.\n if (isTapeOn) {\n let tensorsToSave = this.getTensorsForGradient(kernelName, inputs, outTensors);\n if (tensorsToSave == null) {\n // Fallback for ops that call runKernelFunc and pass in\n // inputsToSave and outputsToSave. Currently this is the set of ops\n // with kernel support in the WASM backend. Once those ops and\n // respective gradients are modularised we can remove this path.\n if (outputsToSave == null) {\n outputsToSave = [];\n }\n const outsToSave = outTensors.filter((_, i) => outputsToSave[i]);\n tensorsToSave = (inputsToSave || []).slice().concat(outsToSave);\n }\n saved = this.saveTensorsForBackwardMode(tensorsToSave);\n }\n return outTensors;\n };\n }\n else {\n const saveFunc = (tensors) => {\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since we would never run backprop, which disposes\n // the kept tensors.\n if (!isTapeOn) {\n return;\n }\n saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n };\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = this.tidy(() => forwardFunc(this.backend, saveFunc));\n const outs = (Array.isArray(out) ? out : [out]);\n if (this.shouldCheckForMemLeaks()) {\n this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outs);\n }\n return outs;\n };\n }\n // Stop recording to a tape when running a kernel.\n let kernelProfile;\n this.scopedRun(() => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {\n if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {\n outputs = kernelFunc();\n }\n else {\n kernelProfile = this.profiler.profileKernel(kernelName, inputs, () => kernelFunc());\n if (this.ENV.getBool('DEBUG')) {\n this.profiler.logKernelProfile(kernelProfile);\n }\n outputs = kernelProfile.outputs;\n }\n });\n if (isTapeOn) {\n this.addTapeNode(kernelName, inputs, outputs, backwardsFunc, saved, attrs);\n }\n if (this.state.profiling) {\n this.state.activeProfile.kernels.push({\n name: kernelName,\n bytesAdded: this.state.numBytes - startingBytecount,\n totalBytesSnapshot: this.state.numBytes,\n tensorsAdded: this.state.numTensors - startingNumTensors,\n totalTensorsSnapshot: this.state.numTensors,\n inputShapes: Object.keys(inputs).map(key => inputs[key] != null ? inputs[key].shape : null),\n outputShapes: outputs.map(item => item.shape),\n kernelTimeMs: kernelProfile.timeMs,\n extraInfo: kernelProfile.extraInfo\n });\n }\n return (Array.isArray(out) ? outputs : outputs[0]);\n }\n /**\n * Saves tensors used in forward mode for use in backward mode.\n *\n * @param tensors the list of tensors to save.\n */\n saveTensorsForBackwardMode(tensors) {\n const saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n return saved;\n }\n /**\n * Returns a list of tensors to save for a given gradient calculation.\n *\n * Returns undefined if their is no registered gradient for this kernel in the\n * gradient registry.\n *\n * @param kernelName name of kernel to look up gradient for.\n * @param inputs a map of input tensors.\n * @param outputs an array of output tensors from forward mode of kernel.\n */\n getTensorsForGradient(kernelName, inputs, outputs) {\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n const inputsToSave = gradConfig.inputsToSave || [];\n const outputsToSave = gradConfig.outputsToSave || [];\n // If saveAllInputs is true, all inputs will be saved. Otherwise, inputs\n // specified in inputsToSave will be saved.\n let inputTensorsToSave;\n if (gradConfig.saveAllInputs) {\n util.assert(Array.isArray(inputs), () => 'saveAllInputs is true, expected inputs to be an array.');\n inputTensorsToSave = Object.keys(inputs).map((key) => inputs[key]);\n }\n else {\n inputTensorsToSave = inputsToSave.map((inputName) => inputs[inputName]);\n }\n const outputTensorsToSave = outputs.filter((_, i) => outputsToSave[i]);\n return inputTensorsToSave.concat(outputTensorsToSave);\n }\n // TODO(yassogba) throw exception here once all runkernelFunc calls with\n // inputsToSave/outputsToSave are removed\n return null;\n }\n /**\n * Internal method used by public APIs for tensor creation. Makes a new\n * tensor with the provided shape, dtype and values. It always\n * creates a new data id and writes the values to the underlying backend.\n */\n makeTensor(values, shape, dtype, backend) {\n if (values == null) {\n throw new Error('Values passed to engine.makeTensor() are null');\n }\n dtype = dtype || 'float32';\n backend = backend || this.backend;\n let backendVals = values;\n if (dtype === 'string' && util.isString(values[0])) {\n backendVals = values.map(d => util.encodeString(d));\n }\n const dataId = backend.write(backendVals, shape, dtype);\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.incRef(t, backend);\n // Count bytes for string tensors.\n if (dtype === 'string') {\n const info = this.state.tensorInfo.get(dataId);\n const newBytes = bytesFromStringArray(backendVals);\n this.state.numBytes += newBytes - info.bytes;\n info.bytes = newBytes;\n }\n return t;\n }\n /**\n * Internal method used by backends. Makes a new tensor\n * that is a wrapper around an existing data id. It doesn't create\n * a new data id, only increments the ref count used in memory tracking.\n */\n makeTensorFromDataId(dataId, shape, dtype, backend) {\n dtype = dtype || 'float32';\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.incRef(t, backend);\n return t;\n }\n makeVariable(initialValue, trainable = true, name, dtype) {\n name = name || this.nextVariableId().toString();\n if (dtype != null && dtype !== initialValue.dtype) {\n initialValue = initialValue.cast(dtype);\n }\n const v = new Variable(initialValue, trainable, name, this.nextTensorId());\n if (this.state.registeredVariables[v.name] != null) {\n throw new Error(`Variable with name ${v.name} was already registered`);\n }\n this.state.registeredVariables[v.name] = v;\n this.incRef(v, this.backend);\n return v;\n }\n incRef(a, backend) {\n const refCount = this.state.tensorInfo.has(a.dataId) ?\n this.state.tensorInfo.get(a.dataId).refCount :\n 0;\n this.state.numTensors++;\n if (a.dtype === 'string') {\n this.state.numStringTensors++;\n }\n if (refCount === 0) {\n this.state.numDataBuffers++;\n // Bytes for complex numbers are counted by their components. Bytes for\n // string tensors are counted when writing values.\n let bytes = 0;\n if (a.dtype !== 'complex64' && a.dtype !== 'string') {\n bytes = a.size * util.bytesPerElement(a.dtype);\n }\n this.state.tensorInfo.set(a.dataId, {\n backend: backend || this.backend,\n dtype: a.dtype,\n shape: a.shape,\n bytes,\n refCount: 0\n });\n this.state.numBytes += bytes;\n }\n this.state.tensorInfo.get(a.dataId).refCount++;\n if (!(a instanceof Variable)) {\n this.track(a);\n }\n }\n disposeTensor(a) {\n if (!this.state.tensorInfo.has(a.dataId)) {\n return;\n }\n this.state.numTensors--;\n if (a.dtype === 'string') {\n this.state.numStringTensors--;\n }\n const info = this.state.tensorInfo.get(a.dataId);\n const refCount = info.refCount;\n if (refCount <= 1) {\n // Don't count bytes for complex numbers as they are counted by their\n // components.\n if (a.dtype !== 'complex64') {\n this.state.numBytes -= info.bytes;\n }\n this.state.numDataBuffers--;\n info.backend.disposeData(a.dataId);\n this.state.tensorInfo.delete(a.dataId);\n }\n else {\n this.state.tensorInfo.get(a.dataId).refCount--;\n }\n // TODO(nsthorat): Construct an error and save the stack trace for\n // debugging when in debug mode. Creating a stack trace is too expensive\n // to do unconditionally.\n }\n disposeVariables() {\n for (const varName in this.state.registeredVariables) {\n const v = this.state.registeredVariables[varName];\n this.disposeVariable(v);\n }\n }\n disposeVariable(v) {\n this.disposeTensor(v);\n if (this.state.registeredVariables[v.name] != null) {\n delete this.state.registeredVariables[v.name];\n }\n }\n memory() {\n const info = this.backend.memory();\n info.numTensors = this.state.numTensors;\n info.numDataBuffers = this.state.numDataBuffers;\n info.numBytes = this.state.numBytes;\n if (this.state.numStringTensors > 0) {\n info.unreliable = true;\n if (info.reasons == null) {\n info.reasons = [];\n }\n info.reasons.push('Memory usage by string tensors is approximate ' +\n '(2 bytes per character)');\n }\n return info;\n }\n async profile(query) {\n this.state.profiling = true;\n const startBytes = this.state.numBytes;\n const startNumTensors = this.state.numTensors;\n this.state.activeProfile.kernels = [];\n this.state.activeProfile.result = await query();\n this.state.profiling = false;\n this.state.activeProfile.peakBytes = Math.max(...this.state.activeProfile.kernels.map(d => d.totalBytesSnapshot));\n this.state.activeProfile.newBytes = this.state.numBytes - startBytes;\n this.state.activeProfile.newTensors =\n this.state.numTensors - startNumTensors;\n for (const kernel of this.state.activeProfile.kernels) {\n kernel.kernelTimeMs = await kernel.kernelTimeMs;\n kernel.extraInfo = await kernel.extraInfo;\n }\n return this.state.activeProfile;\n }\n isTapeOn() {\n return this.state.gradientDepth > 0 && this.state.kernelDepth === 0;\n }\n addTapeNode(kernelName, inputs, outputs, gradientsFunc, saved, attrs) {\n const tapeNode = { id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved };\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n gradientsFunc = gradConfig.gradFunc;\n }\n if (gradientsFunc != null) {\n tapeNode.gradient = (dys) => {\n // TODO(smilkov): To optimize back-prop, pass dys that are not used in\n // the backprop graph to the user as null instead of zeros\n dys = dys.map((dy, i) => {\n if (dy == null) {\n const output = outputs[i];\n const vals = util.makeZerosTypedArray(output.size, output.dtype);\n return this.makeTensor(vals, output.shape, output.dtype);\n }\n return dy;\n });\n // Grad functions of ops with single outputs expect a dy, while ops\n // with multiple outputs expect dys (array of dy).\n return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs);\n };\n }\n this.state.activeTape.push(tapeNode);\n }\n keep(result) {\n result.kept = true;\n return result;\n }\n startTape() {\n if (this.state.gradientDepth === 0) {\n this.state.activeTape = [];\n }\n this.state.gradientDepth++;\n }\n endTape() {\n this.state.gradientDepth--;\n }\n /**\n * Start a scope. Use this with endScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n startScope(name) {\n const scopeInfo = {\n track: [],\n name: 'unnamed scope',\n id: this.state.nextScopeId++\n };\n if (name) {\n scopeInfo.name = name;\n }\n this.state.scopeStack.push(scopeInfo);\n this.state.activeScope = scopeInfo;\n }\n /**\n * End a scope. Use this with startScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n endScope(result) {\n const tensorsToTrackInParent = getTensorsInContainer(result);\n const tensorsToTrackInParentSet = new Set(tensorsToTrackInParent.map(t => t.id));\n // Dispose the arrays tracked in this scope.\n for (let i = 0; i < this.state.activeScope.track.length; i++) {\n const tensor = this.state.activeScope.track[i];\n if (!tensor.kept && !tensorsToTrackInParentSet.has(tensor.id)) {\n tensor.dispose();\n }\n }\n const oldScope = this.state.scopeStack.pop();\n this.state.activeScope = this.state.scopeStack.length === 0 ?\n null :\n this.state.scopeStack[this.state.scopeStack.length - 1];\n // Track the current result in the parent scope.\n tensorsToTrackInParent.forEach(tensor => {\n // Only track the tensor if was allocated in the inner scope and is not\n // globally kept.\n if (!tensor.kept && tensor.scopeId === oldScope.id) {\n this.track(tensor);\n }\n });\n }\n /**\n * Returns gradients of `f` with respect to each of the `xs`. The gradients\n * returned are of the same length as `xs`, but some might be null if `f`\n * was not a function of that `x`. It also takes optional dy to multiply the\n * gradient, which defaults to `1`.\n */\n gradients(f, xs, dy, allowNoGradients = false) {\n util.assert(xs.length > 0, () => 'gradients() received an empty list of xs.');\n if (dy != null && dy.dtype !== 'float32') {\n throw new Error(`dy must have 'float32' dtype, but has '${dy.dtype}'`);\n }\n const y = this.scopedRun(() => this.startTape(), () => this.endTape(), () => this.tidy('forward', f));\n util.assert(y instanceof Tensor, () => 'The result y returned by f() must be a tensor.');\n // Filter out the nodes that don't connect x => y.\n const filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y);\n if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) {\n throw new Error('Cannot compute gradient of y=f(x) with respect to x. Make sure ' +\n 'that the f you passed encloses all operations that lead from x ' +\n 'to y.');\n }\n return this.tidy('backward', () => {\n const accumulatedGradientMap = {};\n accumulatedGradientMap[y.id] = (dy == null) ? ones(y.shape) : dy;\n // Backprop gradients through the filtered nodes.\n backpropagateGradients(accumulatedGradientMap, filteredTape, \n // Pass the tidy function to avoid circular dep with `tape.ts`.\n f => this.tidy(f), \n // Pass an add function to avoide a circular dep with `tape.ts`.\n add);\n const grads = xs.map(x => accumulatedGradientMap[x.id]);\n if (this.state.gradientDepth === 0) {\n // This means that we are not computing higher-order gradients\n // and can clean up the tape.\n this.state.activeTape.forEach(node => {\n for (const tensor of node.saved) {\n tensor.dispose();\n }\n });\n this.state.activeTape = null;\n }\n return { value: y, grads };\n });\n }\n customGrad(f) {\n util.assert(util.isFunction(f), () => 'The f passed in customGrad(f) must be a function.');\n return (...inputs) => {\n util.assert(inputs.every(t => t instanceof Tensor), () => 'The args passed in customGrad(f)(x1, x2,...) must all be ' +\n 'tensors');\n let res;\n const inputMap = {};\n inputs.forEach((input, i) => {\n inputMap[i] = input;\n });\n return this.runKernelFunc((_, save) => {\n res = f(...[...inputs, save]);\n util.assert(res.value instanceof Tensor, () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.value` is a tensor');\n util.assert(util.isFunction(res.gradFunc), () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function.');\n return res.value;\n }, inputMap, (dy, saved) => {\n const gradRes = res.gradFunc(dy, saved);\n const grads = Array.isArray(gradRes) ? gradRes : [gradRes];\n util.assert(grads.length === inputs.length, () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'the same number of tensors as inputs passed to f(...).');\n util.assert(grads.every(t => t instanceof Tensor), () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'a list of only tensors.');\n const gradMap = {};\n grads.forEach((grad, i) => {\n gradMap[i] = () => grad;\n });\n return gradMap;\n });\n };\n }\n readSync(dataId) {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.readSync(dataId);\n }\n read(dataId) {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.read(dataId);\n }\n async time(query) {\n const start = now();\n const timingInfo = await this.backend.time(query);\n timingInfo.wallMs = now() - start;\n return timingInfo;\n }\n /**\n * Tracks a Tensor in the current scope to be automatically cleaned up\n * when the current scope ends, and returns the value.\n *\n * @param result The Tensor to track in the current scope.\n */\n track(result) {\n if (this.state.activeScope != null) {\n result.scopeId = this.state.activeScope.id;\n this.state.activeScope.track.push(result);\n }\n return result;\n }\n get registeredVariables() {\n return this.state.registeredVariables;\n }\n /**\n * Resets the engine state. Removes all backends but does not remove\n * registered backend factories.\n */\n reset() {\n // Make any pending promise obsolete.\n this.pendingBackendInitId++;\n this.state.dispose();\n this.ENV.reset();\n this.state = new EngineState();\n for (const backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n this.backendName = null;\n this.backendInstance = null;\n this.pendingBackendInit = null;\n }\n}\nEngine.nextTensorId = 0;\nEngine.nextVariableId = 0;\nfunction ones(shape) {\n const values = makeOnesTypedArray(sizeFromShape(shape), 'float32');\n return ENGINE.makeTensor(values, shape, 'float32');\n}\nexport function getOrMakeEngine() {\n const ns = getGlobalNamespace();\n if (ns._tfengine == null) {\n const environment = new Environment(ns);\n ns._tfengine = new Engine(environment);\n }\n setEnvironmentGlobal(ns._tfengine.ENV);\n // Tell the current tensor interface that the global engine is responsible\n // for tracking.\n setTensorTracker(() => ns._tfengine);\n return ns._tfengine;\n}\nexport const ENGINE = getOrMakeEngine();\n/**\n * A implementation of the add op for use within engine and tape.\n *\n * This allows us to avoid a circular dependency between add.ts and engine.\n * It is exported to be available in tape tests.\n */\nexport function add(a, b) {\n // We duplicate Add here to avoid a circular dependency with add.ts.\n const inputs = { a, b };\n return ENGINE.runKernelFunc((backend, save) => {\n const res = backend.add(a, b);\n save([a, b]);\n return res;\n }, inputs, null /* gradient */, Add);\n}\n//# sourceMappingURL=engine.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// tslint:disable-next-line:no-any\nfunction _isNavigatorDefined() {\n return typeof navigator !== 'undefined' && navigator != null;\n}\nexport function isMobile() {\n if (_isNavigatorDefined()) {\n // tslint:disable-next-line:no-any\n const a = navigator.userAgent || navigator.vendor || window.opera;\n // tslint:disable-next-line:max-line-length\n return /(android|bb\\d+|meego).+mobile|avantgo|bada\\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i\n .test(a) ||\n // tslint:disable-next-line:max-line-length\n /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\\-(n|u)|c55\\/|capi|ccwa|cdm\\-|cell|chtm|cldc|cmd\\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\\-s|devi|dica|dmob|do(c|p)o|ds(12|\\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\\-|_)|g1 u|g560|gene|gf\\-5|g\\-mo|go(\\.w|od)|gr(ad|un)|haie|hcit|hd\\-(m|p|t)|hei\\-|hi(pt|ta)|hp( i|ip)|hs\\-c|ht(c(\\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\\-(20|go|ma)|i230|iac( |\\-|\\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\\/)|klon|kpt |kwc\\-|kyo(c|k)|le(no|xi)|lg( g|\\/(k|l|u)|50|54|\\-[a-w])|libw|lynx|m1\\-w|m3ga|m50\\/|ma(te|ui|xo)|mc(01|21|ca)|m\\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\\-2|po(ck|rt|se)|prox|psio|pt\\-g|qa\\-a|qc(07|12|21|32|60|\\-[2-7]|i\\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\\-|oo|p\\-)|sdk\\/|se(c(\\-|0|1)|47|mc|nd|ri)|sgh\\-|shar|sie(\\-|m)|sk\\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\\-|v\\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\\-|tdg\\-|tel(i|m)|tim\\-|t\\-mo|to(pl|sh)|ts(70|m\\-|m3|m5)|tx\\-9|up(\\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\\-|your|zeto|zte\\-/i\n .test(a.substr(0, 4));\n }\n return false;\n}\nexport function isBrowser() {\n return (typeof window !== 'undefined' && window.document != null) ||\n //@ts-ignore\n (typeof WorkerGlobalScope !== 'undefined');\n}\n//# sourceMappingURL=device_util.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport './engine';\nimport * as device_util from './device_util';\nimport { env } from './environment';\nconst ENV = env();\n/**\n * This file contains environment-related flag registrations.\n */\n/** Whether to enable debug mode. */\nENV.registerFlag('DEBUG', () => false, debugValue => {\n if (debugValue) {\n console.warn('Debugging mode is ON. The output of every math call will ' +\n 'be downloaded to CPU and checked for NaNs. ' +\n 'This significantly impacts performance.');\n }\n});\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag('IS_BROWSER', () => device_util.isBrowser());\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag('IS_NODE', () => (typeof process !== 'undefined') &&\n (typeof process.versions !== 'undefined') &&\n (typeof process.versions.node !== 'undefined'));\n/** Whether this browser is Chrome. */\nENV.registerFlag('IS_CHROME', () => typeof navigator !== 'undefined' && navigator != null &&\n navigator.userAgent != null && /Chrome/.test(navigator.userAgent) &&\n /Google Inc/.test(navigator.vendor));\n/**\n * True when the environment is \"production\" where we disable safety checks\n * to gain performance.\n */\nENV.registerFlag('PROD', () => false);\n/**\n * Whether to do sanity checks when inferring a shape from user-provided\n * values, used when creating a new tensor.\n */\nENV.registerFlag('TENSORLIKE_CHECK_SHAPE_CONSISTENCY', () => ENV.getBool('DEBUG'));\n/** Whether deprecation warnings are enabled. */\nENV.registerFlag('DEPRECATION_WARNINGS_ENABLED', () => true);\n/** True if running unit tests. */\nENV.registerFlag('IS_TEST', () => false);\n//# sourceMappingURL=flags.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from './engine';\nimport { env } from './environment';\nimport { Tensor } from './tensor';\nimport { assert, flatten, inferDtype, isTypedArray, toTypedArray } from './util';\nexport function inferShape(val, dtype) {\n let firstElem = val;\n if (isTypedArray(val)) {\n return dtype === 'string' ? [] : [val.length];\n }\n if (!Array.isArray(val)) {\n return []; // Scalar.\n }\n const shape = [];\n while (Array.isArray(firstElem) ||\n isTypedArray(firstElem) && dtype !== 'string') {\n shape.push(firstElem.length);\n firstElem = firstElem[0];\n }\n if (Array.isArray(val) &&\n env().getBool('TENSORLIKE_CHECK_SHAPE_CONSISTENCY')) {\n deepAssertShapeConsistency(val, shape, []);\n }\n return shape;\n}\nfunction deepAssertShapeConsistency(val, shape, indices) {\n indices = indices || [];\n if (!(Array.isArray(val)) && !isTypedArray(val)) {\n assert(shape.length === 0, () => `Element arr[${indices.join('][')}] is a primitive, ` +\n `but should be an array/TypedArray of ${shape[0]} elements`);\n return;\n }\n assert(shape.length > 0, () => `Element arr[${indices.join('][')}] should be a primitive, ` +\n `but is an array of ${val.length} elements`);\n assert(val.length === shape[0], () => `Element arr[${indices.join('][')}] should have ${shape[0]} ` +\n `elements, but has ${val.length} elements`);\n const subShape = shape.slice(1);\n for (let i = 0; i < val.length; ++i) {\n deepAssertShapeConsistency(val[i], subShape, indices.concat(i));\n }\n}\nfunction assertDtype(expectedDtype, actualDType, argName, functionName) {\n if (expectedDtype == null) {\n return;\n }\n if (expectedDtype !== 'numeric' && expectedDtype !== actualDType ||\n expectedDtype === 'numeric' && actualDType === 'string') {\n throw new Error(`Argument '${argName}' passed to '${functionName}' must ` +\n `be ${expectedDtype} tensor, but got ${actualDType} tensor`);\n }\n}\nexport function convertToTensor(x, argName, functionName, parseAsDtype = 'numeric') {\n if (x instanceof Tensor) {\n assertDtype(parseAsDtype, x.dtype, argName, functionName);\n return x;\n }\n let inferredDtype = inferDtype(x);\n // If the user expects a bool/int/float, use that info to update the\n // inferredDtype when it is not a string.\n if (inferredDtype !== 'string' &&\n ['bool', 'int32', 'float32'].indexOf(parseAsDtype) >= 0) {\n inferredDtype = parseAsDtype;\n }\n assertDtype(parseAsDtype, inferredDtype, argName, functionName);\n if ((x == null) ||\n (!isTypedArray(x) && !Array.isArray(x) && typeof x !== 'number' &&\n typeof x !== 'boolean' && typeof x !== 'string')) {\n const type = x == null ? 'null' : x.constructor.name;\n throw new Error(`Argument '${argName}' passed to '${functionName}' must be a ` +\n `Tensor or TensorLike, but got '${type}'`);\n }\n const inferredShape = inferShape(x, inferredDtype);\n if (!isTypedArray(x) && !Array.isArray(x)) {\n x = [x];\n }\n const skipTypedArray = true;\n const values = inferredDtype !== 'string' ?\n toTypedArray(x, inferredDtype) :\n flatten(x, [], skipTypedArray);\n return ENGINE.makeTensor(values, inferredShape, inferredDtype);\n}\nexport function convertToTensorArray(arg, argName, functionName, parseAsDtype = 'numeric') {\n if (!Array.isArray(arg)) {\n throw new Error(`Argument ${argName} passed to ${functionName} must be a ` +\n '`Tensor[]` or `TensorLike[]`');\n }\n const tensors = arg;\n return tensors.map((t, i) => convertToTensor(t, `${argName}[${i}]`, functionName), parseAsDtype);\n}\n//# sourceMappingURL=tensor_util_env.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nexport const OP_SCOPE_SUFFIX = '__op';\n/**\n * Used for wrapping functions that perform math operations on\n * Tensors. The function will be wrapped in a named scope that cleans all\n * memory usage after the function is done.\n */\nexport function op(f) {\n const keys = Object.keys(f);\n if (keys.length !== 1) {\n throw new Error(`Please provide an object with a single key ` +\n `(operation name) mapping to a function. Got an object with ` +\n `${keys.length} keys.`);\n }\n let opName = keys[0];\n const fn = f[opName];\n // Strip the underscore from the end of the function name.\n if (opName.endsWith('_')) {\n opName = opName.substring(0, opName.length - 1);\n }\n // add an __op suffix to distinguish ops from kernels in tf.profile\n opName = opName + OP_SCOPE_SUFFIX;\n // tslint:disable-next-line:no-any\n const f2 = (...args) => {\n ENGINE.startScope(opName);\n try {\n const result = fn(...args);\n if (result instanceof Promise) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n ENGINE.endScope(result);\n return result;\n }\n catch (ex) {\n ENGINE.endScope(null);\n throw ex;\n }\n };\n Object.defineProperty(f2, 'name', { value: opName, configurable: true });\n // tslint:disable-next-line:no-any\n return f2;\n}\n//# sourceMappingURL=operation.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Complex } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Converts two real numbers to a complex number.\n *\n * Given a tensor `real` representing the real part of a complex number, and a\n * tensor `imag` representing the imaginary part of a complex number, this\n * operation returns complex numbers elementwise of the form [r0, i0, r1, i1],\n * where r represents the real part and i represents the imag part.\n *\n * The input tensors real and imag must have the same shape.\n *\n * ```js\n * const real = tf.tensor1d([2.25, 3.25]);\n * const imag = tf.tensor1d([4.75, 5.75]);\n * const complex = tf.complex(real, imag);\n *\n * complex.print();\n * ```\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction complex_(real, imag) {\n const $real = convertToTensor(real, 'real', 'complex');\n const $imag = convertToTensor(imag, 'imag', 'complex');\n util.assertShapesMatch($real.shape, $imag.shape, `real and imag shapes, ${$real.shape} and ${$imag.shape}, ` +\n `must match in call to tf.complex().`);\n const forward = (backend) => {\n return backend.complex($real, $imag);\n };\n const inputs = { real: $real, imag: $imag };\n return ENGINE.runKernelFunc(forward, inputs, null /* gradient */, Complex);\n}\nexport const complex = op({ complex_ });\n//# sourceMappingURL=complex.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { assert, assertNonNegativeIntegerDimensions, flatten, inferDtype, isTypedArray, sizeFromShape, toTypedArray } from '../util';\n/** This is shared code across all tensor creation methods. */\nexport function makeTensor(values, shape, inferredShape, dtype) {\n if (dtype == null) {\n dtype = inferDtype(values);\n }\n if (dtype === 'complex64') {\n throw new Error(`Cannot construct a complex64 tensor directly. ` +\n `Please use tf.complex(real, imag).`);\n }\n if (!isTypedArray(values) && !Array.isArray(values) &&\n typeof values !== 'number' && typeof values !== 'boolean' &&\n typeof values !== 'string') {\n throw new Error('values passed to tensor(values) must be a number/boolean/string or ' +\n 'an array of numbers/booleans/strings, or a TypedArray');\n }\n if (shape != null) {\n assertNonNegativeIntegerDimensions(shape);\n const providedSize = sizeFromShape(shape);\n const inferredSize = sizeFromShape(inferredShape);\n assert(providedSize === inferredSize, () => `Based on the provided shape, [${shape}], the tensor should have ` +\n `${providedSize} values but has ${inferredSize}`);\n for (let i = 0; i < inferredShape.length; ++i) {\n const inferred = inferredShape[i];\n const flatDimsDontMatch = i === inferredShape.length - 1 ?\n inferred !== sizeFromShape(shape.slice(i)) :\n true;\n assert(inferredShape[i] === shape[i] || !flatDimsDontMatch, () => `Error creating a new Tensor. Inferred shape ` +\n `(${inferredShape}) does not match the provided ` +\n `shape (${shape}). `);\n }\n }\n if (!isTypedArray(values) && !Array.isArray(values)) {\n values = [values];\n }\n shape = shape || inferredShape;\n values = dtype !== 'string' ?\n toTypedArray(values, dtype) :\n flatten(values, [], true);\n return ENGINE.makeTensor(values, shape, dtype);\n}\n//# sourceMappingURL=tensor_ops_util.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { inferShape } from '../tensor_util_env';\nimport { makeTensor } from './tensor_ops_util';\n/**\n * Creates a `tf.Tensor` with the provided values, shape and dtype.\n *\n * ```js\n * // Pass an array of values to create a vector.\n * tf.tensor([1, 2, 3, 4]).print();\n * ```\n *\n * ```js\n * // Pass a nested array of values to make a matrix or a higher\n * // dimensional tensor.\n * tf.tensor([[1, 2], [3, 4]]).print();\n * ```\n *\n * ```js\n * // Pass a flat array and specify a shape yourself.\n * tf.tensor([1, 2, 3, 4], [2, 2]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`. If the values are strings,\n * they will be encoded as utf-8 and kept as `Uint8Array[]`.\n * @param shape The shape of the tensor. Optional. If not provided,\n * it is inferred from `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor(values, shape, dtype) {\n const inferredShape = inferShape(values, dtype);\n return makeTensor(values, shape, inferredShape, dtype);\n}\n//# sourceMappingURL=tensor.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/* Type definitions for exporting and importing of models. */\n/**\n * A map from Tensor dtype to number of bytes per element of the Tensor.\n */\nexport const DTYPE_VALUE_SIZE_MAP = {\n 'float32': 4,\n 'float16': 2,\n 'int32': 4,\n 'uint16': 2,\n 'uint8': 1,\n 'bool': 1,\n 'complex64': 8\n};\n//# sourceMappingURL=types.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { complex } from '../ops/complex';\nimport { tensor } from '../ops/tensor';\nimport { sizeFromShape } from '../util';\nimport { DTYPE_VALUE_SIZE_MAP } from './types';\n/** Number of bytes reserved for the length of the string. (32bit integer). */\nconst NUM_BYTES_STRING_LENGTH = 4;\n/**\n * Encode a map from names to weight values as an ArrayBuffer, along with an\n * `Array` of `WeightsManifestEntry` as specification of the encoded weights.\n *\n * This function does not perform sharding.\n *\n * This function is the reverse of `decodeWeights`.\n *\n * @param tensors A map (\"dict\") from names to tensors.\n * @param group Group to which the weights belong (optional).\n * @returns A `Promise` of\n * - A flat `ArrayBuffer` with all the binary values of the `Tensor`s\n * concatenated.\n * - An `Array` of `WeightManifestEntry`s, carrying information including\n * tensor names, `dtype`s and shapes.\n * @throws Error: on unsupported tensor `dtype`.\n */\nexport async function encodeWeights(tensors, group) {\n // TODO(adarob, cais): Support quantization.\n const specs = [];\n const dataPromises = [];\n const names = Array.isArray(tensors) ?\n tensors.map(tensor => tensor.name) :\n Object.keys(tensors);\n for (let i = 0; i < names.length; ++i) {\n const name = names[i];\n const t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name];\n if (t.dtype !== 'float32' && t.dtype !== 'int32' && t.dtype !== 'bool' &&\n t.dtype !== 'string' && t.dtype !== 'complex64') {\n throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`);\n }\n const spec = { name, shape: t.shape, dtype: t.dtype };\n if (t.dtype === 'string') {\n const utf8bytes = new Promise(async (resolve) => {\n const vals = await t.bytes();\n const totalNumBytes = vals.reduce((p, c) => p + c.length, 0) +\n NUM_BYTES_STRING_LENGTH * vals.length;\n const bytes = new Uint8Array(totalNumBytes);\n let offset = 0;\n for (let i = 0; i < vals.length; i++) {\n const val = vals[i];\n const bytesOfLength = new Uint8Array(new Uint32Array([val.length]).buffer);\n bytes.set(bytesOfLength, offset);\n offset += NUM_BYTES_STRING_LENGTH;\n bytes.set(val, offset);\n offset += val.length;\n }\n resolve(bytes);\n });\n dataPromises.push(utf8bytes);\n }\n else {\n dataPromises.push(t.data());\n }\n if (group != null) {\n spec.group = group;\n }\n specs.push(spec);\n }\n const tensorValues = await Promise.all(dataPromises);\n return { data: concatenateTypedArrays(tensorValues), specs };\n}\n/**\n * Decode flat ArrayBuffer as weights.\n *\n * This function does not handle sharding.\n *\n * This function is the reverse of `encodeWeights`.\n *\n * @param buffer A flat ArrayBuffer carrying the binary values of the tensors\n * concatenated in the order specified in `specs`.\n * @param specs Specifications of the names, dtypes and shapes of the tensors\n * whose value are encoded by `buffer`.\n * @return A map from tensor name to tensor value, with the names corresponding\n * to names in `specs`.\n * @throws Error, if any of the tensors has unsupported dtype.\n */\nexport function decodeWeights(buffer, specs) {\n // TODO(adarob, cais): Support quantization.\n const out = {};\n let float16Decode;\n let offset = 0;\n for (const spec of specs) {\n const name = spec.name;\n const dtype = spec.dtype;\n const shape = spec.shape;\n const size = sizeFromShape(shape);\n let values;\n if ('quantization' in spec) {\n const quantization = spec.quantization;\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n if (!('min' in quantization && 'scale' in quantization)) {\n throw new Error(`Weight ${spec.name} with quantization ${quantization.dtype} ` +\n `doesn't have corresponding metadata min and scale.`);\n }\n }\n else if (quantization.dtype === 'float16') {\n if (dtype !== 'float32') {\n throw new Error(`Weight ${spec.name} is quantized with ${quantization.dtype} ` +\n `which only supports weights of type float32 not ${dtype}.`);\n }\n }\n else {\n throw new Error(`Weight ${spec.name} has unknown ` +\n `quantization dtype ${quantization.dtype}. ` +\n `Supported quantization dtypes are: ` +\n `'uint8', 'uint16', and 'float16'.`);\n }\n const quantizationSizeFactor = DTYPE_VALUE_SIZE_MAP[quantization.dtype];\n const byteBuffer = buffer.slice(offset, offset + size * quantizationSizeFactor);\n const quantizedArray = (quantization.dtype === 'uint8') ?\n new Uint8Array(byteBuffer) :\n new Uint16Array(byteBuffer);\n if (dtype === 'float32') {\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n values = new Float32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = v * quantization.scale + quantization.min;\n }\n }\n else if (quantization.dtype === 'float16') {\n if (float16Decode === undefined) {\n float16Decode = getFloat16Decoder();\n }\n values = float16Decode(quantizedArray);\n }\n else {\n throw new Error(`Unsupported quantization type ${quantization.dtype} ` +\n `for weight type float32.`);\n }\n }\n else if (dtype === 'int32') {\n if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') {\n throw new Error(`Unsupported quantization type ${quantization.dtype} ` +\n `for weight type int32.`);\n }\n values = new Int32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = Math.round(v * quantization.scale + quantization.min);\n }\n }\n else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * quantizationSizeFactor;\n }\n else if (dtype === 'string') {\n const size = sizeFromShape(spec.shape);\n values = [];\n for (let i = 0; i < size; i++) {\n const byteLength = new Uint32Array(buffer.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0];\n offset += NUM_BYTES_STRING_LENGTH;\n const bytes = new Uint8Array(buffer.slice(offset, offset + byteLength));\n values.push(bytes);\n offset += byteLength;\n }\n }\n else {\n const dtypeFactor = DTYPE_VALUE_SIZE_MAP[dtype];\n const byteBuffer = buffer.slice(offset, offset + size * dtypeFactor);\n if (dtype === 'float32') {\n values = new Float32Array(byteBuffer);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(byteBuffer);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(byteBuffer);\n }\n else if (dtype === 'complex64') {\n values = new Float32Array(byteBuffer);\n const real = new Float32Array(values.length / 2);\n const image = new Float32Array(values.length / 2);\n for (let i = 0; i < real.length; i++) {\n real[i] = values[i * 2];\n image[i] = values[i * 2 + 1];\n }\n const realTensor = tensor(real, shape, 'float32');\n const imageTensor = tensor(image, shape, 'float32');\n out[name] = complex(realTensor, imageTensor);\n }\n else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * dtypeFactor;\n }\n if (dtype !== 'complex64') {\n out[name] = tensor(values, shape, dtype);\n }\n }\n return out;\n}\n/**\n * Concatenate TypedArrays into an ArrayBuffer.\n */\nexport function concatenateTypedArrays(xs) {\n // TODO(adarob, cais): Support quantization.\n if (xs === null) {\n throw new Error(`Invalid input value: ${JSON.stringify(xs)}`);\n }\n let totalByteLength = 0;\n // `normalizedXs` is here for this reason: a `TypedArray`'s `buffer'\n // can have a different byte length from that of the `TypedArray` itself,\n // for example, when the `TypedArray` is created from an offset in an\n // `ArrayBuffer`. `normliazedXs` holds `TypedArray`s whose `buffer`s match\n // the `TypedArray` in byte length. If an element of `xs` does not show\n // this property, a new `TypedArray` that satisfy this property will be\n // constructed and pushed into `normalizedXs`.\n const normalizedXs = [];\n xs.forEach((x) => {\n totalByteLength += x.byteLength;\n // tslint:disable:no-any\n normalizedXs.push(x.byteLength === x.buffer.byteLength ? x :\n new x.constructor(x));\n if (!(x instanceof Float32Array || x instanceof Int32Array ||\n x instanceof Uint8Array)) {\n throw new Error(`Unsupported TypedArray subtype: ${x.constructor.name}`);\n }\n // tslint:enable:no-any\n });\n const y = new Uint8Array(totalByteLength);\n let offset = 0;\n normalizedXs.forEach((x) => {\n y.set(new Uint8Array(x.buffer), offset);\n offset += x.byteLength;\n });\n return y.buffer;\n}\n// Use Buffer on Node.js instead of Blob/atob/btoa\nconst useNodeBuffer = typeof Buffer !== 'undefined' &&\n (typeof Blob === 'undefined' || typeof atob === 'undefined' ||\n typeof btoa === 'undefined');\n/**\n * Calculate the byte length of a JavaScript string.\n *\n * Note that a JavaScript string can contain wide characters, therefore the\n * length of the string is not necessarily equal to the byte length.\n *\n * @param str Input string.\n * @returns Byte length.\n */\nexport function stringByteLength(str) {\n if (useNodeBuffer) {\n return Buffer.byteLength(str);\n }\n return new Blob([str]).size;\n}\n/**\n * Encode an ArrayBuffer as a base64 encoded string.\n *\n * @param buffer `ArrayBuffer` to be converted.\n * @returns A string that base64-encodes `buffer`.\n */\nexport function arrayBufferToBase64String(buffer) {\n if (useNodeBuffer) {\n return Buffer.from(buffer).toString('base64');\n }\n const buf = new Uint8Array(buffer);\n let s = '';\n for (let i = 0, l = buf.length; i < l; i++) {\n s += String.fromCharCode(buf[i]);\n }\n return btoa(s);\n}\n/**\n * Decode a base64 string as an ArrayBuffer.\n *\n * @param str Base64 string.\n * @returns Decoded `ArrayBuffer`.\n */\nexport function base64StringToArrayBuffer(str) {\n if (useNodeBuffer) {\n const buf = Buffer.from(str, 'base64');\n return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);\n }\n const s = atob(str);\n const buffer = new Uint8Array(s.length);\n for (let i = 0; i < s.length; ++i) {\n buffer.set([s.charCodeAt(i)], i);\n }\n return buffer.buffer;\n}\n/**\n * Concatenate a number of ArrayBuffers into one.\n *\n * @param buffers A number of array buffers to concatenate.\n * @returns Result of concatenating `buffers` in order.\n */\nexport function concatenateArrayBuffers(buffers) {\n if (buffers.length === 1) {\n return buffers[0];\n }\n let totalByteLength = 0;\n buffers.forEach((buffer) => {\n totalByteLength += buffer.byteLength;\n });\n const temp = new Uint8Array(totalByteLength);\n let offset = 0;\n buffers.forEach((buffer) => {\n temp.set(new Uint8Array(buffer), offset);\n offset += buffer.byteLength;\n });\n return temp.buffer;\n}\n/**\n * Get the basename of a path.\n *\n * Behaves in a way analogous to Linux's basename command.\n *\n * @param path\n */\nexport function basename(path) {\n const SEPARATOR = '/';\n path = path.trim();\n while (path.endsWith(SEPARATOR)) {\n path = path.slice(0, path.length - 1);\n }\n const items = path.split(SEPARATOR);\n return items[items.length - 1];\n}\n/**\n * Populate ModelArtifactsInfo fields for a model with JSON topology.\n * @param modelArtifacts\n * @returns A ModelArtifactsInfo object.\n */\nexport function getModelArtifactsInfoForJSON(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('Expected JSON model topology, received ArrayBuffer.');\n }\n return {\n dateSaved: new Date(),\n modelTopologyType: 'JSON',\n modelTopologyBytes: modelArtifacts.modelTopology == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.modelTopology)),\n weightSpecsBytes: modelArtifacts.weightSpecs == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)),\n weightDataBytes: modelArtifacts.weightData == null ?\n 0 :\n modelArtifacts.weightData.byteLength,\n };\n}\n/**\n * Computes mantisa table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 2048 mantissa lookup values.\n */\nfunction computeFloat16MantisaTable() {\n const convertMantissa = (i) => {\n let m = i << 13;\n let e = 0;\n while ((m & 0x00800000) === 0) {\n e -= 0x00800000;\n m <<= 1;\n }\n m &= ~0x00800000;\n e += 0x38800000;\n return m | e;\n };\n const mantisaTable = new Uint32Array(2048);\n mantisaTable[0] = 0;\n for (let i = 1; i < 1024; i++) {\n mantisaTable[i] = convertMantissa(i);\n }\n for (let i = 1024; i < 2048; i++) {\n mantisaTable[i] = 0x38000000 + ((i - 1024) << 13);\n }\n return mantisaTable;\n}\n/**\n * Computes exponent table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 64 exponent lookup values.\n */\nfunction computeFloat16ExponentTable() {\n const exponentTable = new Uint32Array(64);\n exponentTable[0] = 0;\n exponentTable[31] = 0x47800000;\n exponentTable[32] = 0x80000000;\n exponentTable[63] = 0xc7800000;\n for (let i = 1; i < 31; i++) {\n exponentTable[i] = i << 23;\n }\n for (let i = 33; i < 63; i++) {\n exponentTable[i] = 0x80000000 + ((i - 32) << 23);\n }\n return exponentTable;\n}\n/**\n * Computes offset table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 6d offset values.\n */\nfunction computeFloat16OffsetTable() {\n const offsetTable = new Uint32Array(64);\n for (let i = 0; i < 64; i++) {\n offsetTable[i] = 1024;\n }\n offsetTable[0] = offsetTable[32] = 0;\n return offsetTable;\n}\n/**\n * Retrieve a Float16 decoder which will decode a ByteArray of Float16 values\n * to a Float32Array.\n *\n * @returns Function (buffer: Uint16Array) => Float32Array which decodes\n * the Uint16Array of Float16 bytes to a Float32Array.\n */\nexport function getFloat16Decoder() {\n // Algorithm is based off of\n // http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n // Cache lookup tables\n const mantisaTable = computeFloat16MantisaTable();\n const exponentTable = computeFloat16ExponentTable();\n const offsetTable = computeFloat16OffsetTable();\n return (quantizedArray) => {\n const buffer = new ArrayBuffer(4 * quantizedArray.length);\n const bufferUint32View = new Uint32Array(buffer);\n for (let index = 0; index < quantizedArray.length; index++) {\n const float16Bits = quantizedArray[index];\n const float32Bits = mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +\n exponentTable[float16Bits >> 10];\n bufferUint32View[index] = float32Bits;\n }\n return new Float32Array(buffer);\n };\n}\n//# sourceMappingURL=io_utils.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport class IORouterRegistry {\n constructor() {\n this.saveRouters = [];\n this.loadRouters = [];\n }\n static getInstance() {\n if (IORouterRegistry.instance == null) {\n IORouterRegistry.instance = new IORouterRegistry();\n }\n return IORouterRegistry.instance;\n }\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerSaveRouter(saveRouter) {\n IORouterRegistry.getInstance().saveRouters.push(saveRouter);\n }\n /**\n * Register a load-handler router.\n *\n * @param loadRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `load` method defined or `null`.\n */\n static registerLoadRouter(loadRouter) {\n IORouterRegistry.getInstance().loadRouters.push(loadRouter);\n }\n /**\n * Look up IOHandler for saving, given a URL-like string.\n *\n * @param url\n * @returns If only one match is found, an instance of IOHandler with the\n * `save` method defined. If no match is found, `null`.\n * @throws Error, if more than one match is found.\n */\n static getSaveHandlers(url) {\n return IORouterRegistry.getHandlers(url, 'save');\n }\n /**\n * Look up IOHandler for loading, given a URL-like string.\n *\n * @param url\n * @param loadOptions Optional, custom load options.\n * @returns All valid handlers for `url`, given the currently registered\n * handler routers.\n */\n static getLoadHandlers(url, loadOptions) {\n return IORouterRegistry.getHandlers(url, 'load', loadOptions);\n }\n static getHandlers(url, handlerType, loadOptions) {\n const validHandlers = [];\n const routers = handlerType === 'load' ?\n IORouterRegistry.getInstance().loadRouters :\n IORouterRegistry.getInstance().saveRouters;\n routers.forEach(router => {\n const handler = router(url, loadOptions);\n if (handler !== null) {\n validHandlers.push(handler);\n }\n });\n return validHandlers;\n }\n}\nexport const registerSaveRouter = (loudRouter) => IORouterRegistry.registerSaveRouter(loudRouter);\nexport const registerLoadRouter = (loudRouter) => IORouterRegistry.registerLoadRouter(loudRouter);\nexport const getSaveHandlers = (url) => IORouterRegistry.getSaveHandlers(url);\nexport const getLoadHandlers = (url, loadOptions) => IORouterRegistry.getLoadHandlers(url, loadOptions);\n//# sourceMappingURL=router_registry.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst DATABASE_NAME = 'tensorflowjs';\nconst DATABASE_VERSION = 1;\n// Model data and ModelArtifactsInfo (metadata) are stored in two separate\n// stores for efficient access of the list of stored models and their metadata.\n// 1. The object store for model data: topology, weights and weight manifests.\nconst MODEL_STORE_NAME = 'models_store';\n// 2. The object store for ModelArtifactsInfo, including meta-information such\n// as the type of topology (JSON vs binary), byte size of the topology, byte\n// size of the weights, etc.\nconst INFO_STORE_NAME = 'model_info_store';\n/**\n * Delete the entire database for tensorflow.js, including the models store.\n */\nexport async function deleteDatabase() {\n const idbFactory = getIndexedDBFactory();\n return new Promise((resolve, reject) => {\n const deleteRequest = idbFactory.deleteDatabase(DATABASE_NAME);\n deleteRequest.onsuccess = () => resolve();\n deleteRequest.onerror = error => reject(error);\n });\n}\nfunction getIndexedDBFactory() {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Add more info about what IOHandler subtypes are available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error('Failed to obtain IndexedDB factory because the current environment' +\n 'is not a web browser.');\n }\n // tslint:disable-next-line:no-any\n const theWindow = typeof window === 'undefined' ? self : window;\n const factory = theWindow.indexedDB || theWindow.mozIndexedDB ||\n theWindow.webkitIndexedDB || theWindow.msIndexedDB ||\n theWindow.shimIndexedDB;\n if (factory == null) {\n throw new Error('The current browser does not appear to support IndexedDB.');\n }\n return factory;\n}\nfunction setUpDatabase(openRequest) {\n const db = openRequest.result;\n db.createObjectStore(MODEL_STORE_NAME, { keyPath: 'modelPath' });\n db.createObjectStore(INFO_STORE_NAME, { keyPath: 'modelPath' });\n}\n/**\n * IOHandler subclass: Browser IndexedDB.\n *\n * See the doc string of `browserIndexedDB` for more details.\n */\nexport class BrowserIndexedDB {\n constructor(modelPath) {\n this.indexedDB = getIndexedDBFactory();\n if (modelPath == null || !modelPath) {\n throw new Error('For IndexedDB, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n }\n async save(modelArtifacts) {\n // TODO(cais): Support saving GraphDef models.\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n return this.databaseAction(this.modelPath, modelArtifacts);\n }\n async load() {\n return this.databaseAction(this.modelPath);\n }\n /**\n * Perform database action to put model artifacts into or read model artifacts\n * from IndexedDB object store.\n *\n * Whether the action is put or get depends on whether `modelArtifacts` is\n * specified. If it is specified, the action will be put; otherwise the action\n * will be get.\n *\n * @param modelPath A unique string path for the model.\n * @param modelArtifacts If specified, it will be the model artifacts to be\n * stored in IndexedDB.\n * @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise`\n * of `ModelArtifacts`, if the action is get.\n */\n databaseAction(modelPath, modelArtifacts) {\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n if (modelArtifacts == null) {\n // Read model out from object store.\n const modelTx = db.transaction(MODEL_STORE_NAME, 'readonly');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const getRequest = modelStore.get(this.modelPath);\n getRequest.onsuccess = () => {\n if (getRequest.result == null) {\n db.close();\n return reject(new Error(`Cannot find model with path '${this.modelPath}' ` +\n `in IndexedDB.`));\n }\n else {\n resolve(getRequest.result.modelArtifacts);\n }\n };\n getRequest.onerror = error => {\n db.close();\n return reject(getRequest.error);\n };\n modelTx.oncomplete = () => db.close();\n }\n else {\n // Put model into object store.\n const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts);\n // First, put ModelArtifactsInfo into info store.\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n let infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const putInfoRequest = infoStore.put({ modelPath: this.modelPath, modelArtifactsInfo });\n let modelTx;\n putInfoRequest.onsuccess = () => {\n // Second, put model data into model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const putModelRequest = modelStore.put({\n modelPath: this.modelPath,\n modelArtifacts,\n modelArtifactsInfo\n });\n putModelRequest.onsuccess = () => resolve({ modelArtifactsInfo });\n putModelRequest.onerror = error => {\n // If the put-model request fails, roll back the info entry as\n // well.\n infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const deleteInfoRequest = infoStore.delete(this.modelPath);\n deleteInfoRequest.onsuccess = () => {\n db.close();\n return reject(putModelRequest.error);\n };\n deleteInfoRequest.onerror = error => {\n db.close();\n return reject(putModelRequest.error);\n };\n };\n };\n putInfoRequest.onerror = error => {\n db.close();\n return reject(putInfoRequest.error);\n };\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n }\n else {\n modelTx.oncomplete = () => db.close();\n }\n };\n }\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\nBrowserIndexedDB.URL_SCHEME = 'indexeddb://';\nexport const indexedDBRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserIndexedDB.URL_SCHEME)) {\n return browserIndexedDB(url.slice(BrowserIndexedDB.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(indexedDBRouter);\nIORouterRegistry.registerLoadRouter(indexedDBRouter);\n/**\n * Creates a browser IndexedDB IOHandler for saving and loading models.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save('indexeddb://MyModel'));\n * console.log(saveResult);\n * ```\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),\n * which can be used with, e.g., `tf.Model.save`.\n */\nexport function browserIndexedDB(modelPath) {\n return new BrowserIndexedDB(modelPath);\n}\nfunction maybeStripScheme(key) {\n return key.startsWith(BrowserIndexedDB.URL_SCHEME) ?\n key.slice(BrowserIndexedDB.URL_SCHEME.length) :\n key;\n}\nexport class BrowserIndexedDBManager {\n constructor() {\n this.indexedDB = getIndexedDBFactory();\n }\n async listModels() {\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const tx = db.transaction(INFO_STORE_NAME, 'readonly');\n const store = tx.objectStore(INFO_STORE_NAME);\n // tslint:disable:max-line-length\n // Need to cast `store` as `any` here because TypeScript's DOM\n // library does not have the `getAll()` method even though the\n // method is supported in the latest version of most mainstream\n // browsers:\n // https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll\n // tslint:enable:max-line-length\n // tslint:disable-next-line:no-any\n const getAllInfoRequest = store.getAll();\n getAllInfoRequest.onsuccess = () => {\n const out = {};\n for (const item of getAllInfoRequest.result) {\n out[item.modelPath] = item.modelArtifactsInfo;\n }\n resolve(out);\n };\n getAllInfoRequest.onerror = error => {\n db.close();\n return reject(getAllInfoRequest.error);\n };\n tx.oncomplete = () => db.close();\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n async removeModel(path) {\n path = maybeStripScheme(path);\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n const infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const getInfoRequest = infoStore.get(path);\n let modelTx;\n getInfoRequest.onsuccess = () => {\n if (getInfoRequest.result == null) {\n db.close();\n return reject(new Error(`Cannot find model with path '${path}' ` +\n `in IndexedDB.`));\n }\n else {\n // First, delete the entry in the info store.\n const deleteInfoRequest = infoStore.delete(path);\n const deleteModelData = () => {\n // Second, delete the entry in the model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const deleteModelRequest = modelStore.delete(path);\n deleteModelRequest.onsuccess = () => resolve(getInfoRequest.result.modelArtifactsInfo);\n deleteModelRequest.onerror = error => reject(getInfoRequest.error);\n };\n // Proceed with deleting model data regardless of whether deletion\n // of info data succeeds or not.\n deleteInfoRequest.onsuccess = deleteModelData;\n deleteInfoRequest.onerror = error => {\n deleteModelData();\n db.close();\n return reject(getInfoRequest.error);\n };\n }\n };\n getInfoRequest.onerror = error => {\n db.close();\n return reject(getInfoRequest.error);\n };\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n }\n else {\n modelTx.oncomplete = () => db.close();\n }\n };\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\n//# sourceMappingURL=indexed_db.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { assert } from '../util';\nimport { arrayBufferToBase64String, base64StringToArrayBuffer, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst PATH_SEPARATOR = '/';\nconst PATH_PREFIX = 'tensorflowjs_models';\nconst INFO_SUFFIX = 'info';\nconst MODEL_TOPOLOGY_SUFFIX = 'model_topology';\nconst WEIGHT_SPECS_SUFFIX = 'weight_specs';\nconst WEIGHT_DATA_SUFFIX = 'weight_data';\nconst MODEL_METADATA_SUFFIX = 'model_metadata';\n/**\n * Purge all tensorflow.js-saved model artifacts from local storage.\n *\n * @returns Paths of the models purged.\n */\nexport function purgeLocalStorageArtifacts() {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n throw new Error('purgeLocalStorageModels() cannot proceed because local storage is ' +\n 'unavailable in the current environment.');\n }\n const LS = window.localStorage;\n const purgedModelPaths = [];\n for (let i = 0; i < LS.length; ++i) {\n const key = LS.key(i);\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n if (key.startsWith(prefix) && key.length > prefix.length) {\n LS.removeItem(key);\n const modelName = getModelPathFromKey(key);\n if (purgedModelPaths.indexOf(modelName) === -1) {\n purgedModelPaths.push(modelName);\n }\n }\n }\n return purgedModelPaths;\n}\nfunction getModelKeys(path) {\n return {\n info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR),\n topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR),\n weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR),\n weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR),\n modelMetadata: [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR)\n };\n}\n/**\n * Get model path from a local-storage key.\n *\n * E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1'\n *\n * @param key\n */\nfunction getModelPathFromKey(key) {\n const items = key.split(PATH_SEPARATOR);\n if (items.length < 3) {\n throw new Error(`Invalid key format: ${key}`);\n }\n return items.slice(1, items.length - 1).join(PATH_SEPARATOR);\n}\nfunction maybeStripScheme(key) {\n return key.startsWith(BrowserLocalStorage.URL_SCHEME) ?\n key.slice(BrowserLocalStorage.URL_SCHEME.length) :\n key;\n}\n/**\n * IOHandler subclass: Browser Local Storage.\n *\n * See the doc string to `browserLocalStorage` for more details.\n */\nexport class BrowserLocalStorage {\n constructor(modelPath) {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n // TODO(cais): Add more info about what IOHandler subtypes are\n // available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error('The current environment does not support local storage.');\n }\n this.LS = window.localStorage;\n if (modelPath == null || !modelPath) {\n throw new Error('For local storage, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n this.keys = getModelKeys(this.modelPath);\n }\n /**\n * Save model artifacts to browser local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @param modelArtifacts The model artifacts to be stored.\n * @returns An instance of SaveResult.\n */\n async save(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n else {\n const topology = JSON.stringify(modelArtifacts.modelTopology);\n const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs);\n const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts);\n try {\n this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo));\n this.LS.setItem(this.keys.topology, topology);\n this.LS.setItem(this.keys.weightSpecs, weightSpecs);\n this.LS.setItem(this.keys.weightData, arrayBufferToBase64String(modelArtifacts.weightData));\n this.LS.setItem(this.keys.modelMetadata, JSON.stringify({\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n userDefinedMetadata: modelArtifacts.userDefinedMetadata\n }));\n return { modelArtifactsInfo };\n }\n catch (err) {\n // If saving failed, clean up all items saved so far.\n this.LS.removeItem(this.keys.info);\n this.LS.removeItem(this.keys.topology);\n this.LS.removeItem(this.keys.weightSpecs);\n this.LS.removeItem(this.keys.weightData);\n this.LS.removeItem(this.keys.modelMetadata);\n throw new Error(`Failed to save model '${this.modelPath}' to local storage: ` +\n `size quota being exceeded is a possible cause of this failure: ` +\n `modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` +\n `weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` +\n `weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`);\n }\n }\n }\n /**\n * Load a model from local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @returns The loaded model (if loading succeeds).\n */\n async load() {\n const info = JSON.parse(this.LS.getItem(this.keys.info));\n if (info == null) {\n throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);\n }\n if (info.modelTopologyType !== 'JSON') {\n throw new Error('BrowserLocalStorage does not support loading non-JSON model ' +\n 'topology yet.');\n }\n const out = {};\n // Load topology.\n const topology = JSON.parse(this.LS.getItem(this.keys.topology));\n if (topology == null) {\n throw new Error(`In local storage, the topology of model '${this.modelPath}' ` +\n `is missing.`);\n }\n out.modelTopology = topology;\n // Load weight specs.\n const weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs));\n if (weightSpecs == null) {\n throw new Error(`In local storage, the weight specs of model '${this.modelPath}' ` +\n `are missing.`);\n }\n out.weightSpecs = weightSpecs;\n // Load meta-data fields.\n const metadataString = this.LS.getItem(this.keys.modelMetadata);\n if (metadataString != null) {\n const metadata = JSON.parse(metadataString);\n out.format = metadata['format'];\n out.generatedBy = metadata['generatedBy'];\n out.convertedBy = metadata['convertedBy'];\n out.userDefinedMetadata = metadata['userDefinedMetadata'];\n }\n // Load weight data.\n const weightDataBase64 = this.LS.getItem(this.keys.weightData);\n if (weightDataBase64 == null) {\n throw new Error(`In local storage, the binary weight values of model ` +\n `'${this.modelPath}' are missing.`);\n }\n out.weightData = base64StringToArrayBuffer(weightDataBase64);\n return out;\n }\n}\nBrowserLocalStorage.URL_SCHEME = 'localstorage://';\nexport const localStorageRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserLocalStorage.URL_SCHEME)) {\n return browserLocalStorage(url.slice(BrowserLocalStorage.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(localStorageRouter);\nIORouterRegistry.registerLoadRouter(localStorageRouter);\n/**\n * Factory function for local storage IOHandler.\n *\n * This `IOHandler` supports both `save` and `load`.\n *\n * For each model's saved artifacts, four items are saved to local storage.\n * - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the\n * model, such as date saved, type of the topology, size in bytes, etc.\n * - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras-\n * style models, this is a stringized JSON.\n * - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the\n * model, can be used to decode the saved binary weight values (see\n * item below).\n * - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary\n * weight values, stored as a base64-encoded string.\n *\n * Saving may throw an `Error` if the total size of the artifacts exceed the\n * browser-specific quota.\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `IOHandler`, which can be used with, e.g.,\n * `tf.Model.save`.\n */\nexport function browserLocalStorage(modelPath) {\n return new BrowserLocalStorage(modelPath);\n}\nexport class BrowserLocalStorageManager {\n constructor() {\n assert(env().getBool('IS_BROWSER'), () => 'Current environment is not a web browser');\n assert(typeof window === 'undefined' ||\n typeof window.localStorage !== 'undefined', () => 'Current browser does not appear to support localStorage');\n this.LS = window.localStorage;\n }\n async listModels() {\n const out = {};\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n const suffix = PATH_SEPARATOR + INFO_SUFFIX;\n for (let i = 0; i < this.LS.length; ++i) {\n const key = this.LS.key(i);\n if (key.startsWith(prefix) && key.endsWith(suffix)) {\n const modelPath = getModelPathFromKey(key);\n out[modelPath] = JSON.parse(this.LS.getItem(key));\n }\n }\n return out;\n }\n async removeModel(path) {\n path = maybeStripScheme(path);\n const keys = getModelKeys(path);\n if (this.LS.getItem(keys.info) == null) {\n throw new Error(`Cannot find model at path '${path}'`);\n }\n const info = JSON.parse(this.LS.getItem(keys.info));\n this.LS.removeItem(keys.info);\n this.LS.removeItem(keys.topology);\n this.LS.removeItem(keys.weightSpecs);\n this.LS.removeItem(keys.weightData);\n return info;\n }\n}\n//# sourceMappingURL=local_storage.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Classes and functions for model management across multiple storage mediums.\n *\n * Supported client actions:\n * - Listing models on all registered storage mediums.\n * - Remove model by URL from any registered storage mediums, by using URL\n * string.\n * - Moving or copying model from one path to another in the same medium or from\n * one medium to another, by using URL strings.\n */\nimport { assert } from '../util';\nimport { IORouterRegistry } from './router_registry';\nconst URL_SCHEME_SUFFIX = '://';\nexport class ModelStoreManagerRegistry {\n constructor() {\n this.managers = {};\n }\n static getInstance() {\n if (ModelStoreManagerRegistry.instance == null) {\n ModelStoreManagerRegistry.instance = new ModelStoreManagerRegistry();\n }\n return ModelStoreManagerRegistry.instance;\n }\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerManager(scheme, manager) {\n assert(scheme != null, () => 'scheme must not be undefined or null.');\n if (scheme.endsWith(URL_SCHEME_SUFFIX)) {\n scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX));\n }\n assert(scheme.length > 0, () => 'scheme must not be an empty string.');\n const registry = ModelStoreManagerRegistry.getInstance();\n assert(registry.managers[scheme] == null, () => `A model store manager is already registered for scheme '${scheme}'.`);\n registry.managers[scheme] = manager;\n }\n static getManager(scheme) {\n const manager = this.getInstance().managers[scheme];\n if (manager == null) {\n throw new Error(`Cannot find model manager for scheme '${scheme}'`);\n }\n return manager;\n }\n static getSchemes() {\n return Object.keys(this.getInstance().managers);\n }\n}\n/**\n * Helper method for parsing a URL string into a scheme and a path.\n *\n * @param url E.g., 'localstorage://my-model'\n * @returns A dictionary with two fields: scheme and path.\n * Scheme: e.g., 'localstorage' in the example above.\n * Path: e.g., 'my-model' in the example above.\n */\nfunction parseURL(url) {\n if (url.indexOf(URL_SCHEME_SUFFIX) === -1) {\n throw new Error(`The url string provided does not contain a scheme. ` +\n `Supported schemes are: ` +\n `${ModelStoreManagerRegistry.getSchemes().join(',')}`);\n }\n return {\n scheme: url.split(URL_SCHEME_SUFFIX)[0],\n path: url.split(URL_SCHEME_SUFFIX)[1],\n };\n}\nasync function cloneModelInternal(sourceURL, destURL, deleteSource = false) {\n assert(sourceURL !== destURL, () => `Old path and new path are the same: '${sourceURL}'`);\n const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL);\n assert(loadHandlers.length > 0, () => `Copying failed because no load handler is found for source URL ${sourceURL}.`);\n assert(loadHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `load handlers for source URL ${sourceURL}.`);\n const loadHandler = loadHandlers[0];\n const saveHandlers = IORouterRegistry.getSaveHandlers(destURL);\n assert(saveHandlers.length > 0, () => `Copying failed because no save handler is found for destination ` +\n `URL ${destURL}.`);\n assert(saveHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `save handlers for destination URL ${destURL}.`);\n const saveHandler = saveHandlers[0];\n const sourceScheme = parseURL(sourceURL).scheme;\n const sourcePath = parseURL(sourceURL).path;\n const sameMedium = sourceScheme === parseURL(sourceURL).scheme;\n const modelArtifacts = await loadHandler.load();\n // If moving within the same storage medium, remove the old model as soon as\n // the loading is done. Without doing this, it is possible that the combined\n // size of the two models will cause the cloning to fail.\n if (deleteSource && sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n const saveResult = await saveHandler.save(modelArtifacts);\n // If moving between mediums, the deletion is done after the save succeeds.\n // This guards against the case in which saving to the destination medium\n // fails.\n if (deleteSource && !sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n return saveResult.modelArtifactsInfo;\n}\n/**\n * List all models stored in registered storage mediums.\n *\n * For a web browser environment, the registered mediums are Local Storage and\n * IndexedDB.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @returns A `Promise` of a dictionary mapping URLs of existing models to\n * their model artifacts info. URLs include medium-specific schemes, e.g.,\n * 'indexeddb://my/model/1'. Model artifacts info include type of the\n * model's topology, byte sizes of the topology, weights, etc.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function listModels() {\n const schemes = ModelStoreManagerRegistry.getSchemes();\n const out = {};\n for (const scheme of schemes) {\n const schemeOut = await ModelStoreManagerRegistry.getManager(scheme).listModels();\n for (const path in schemeOut) {\n const url = scheme + URL_SCHEME_SUFFIX + path;\n out[url] = schemeOut[path];\n }\n }\n return out;\n}\n/**\n * Remove a model specified by URL from a reigstered storage medium.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @param url A URL to a stored model, with a scheme prefix, e.g.,\n * 'localstorage://my-model-1', 'indexeddb://my/model/2'.\n * @returns ModelArtifactsInfo of the deleted model (if and only if deletion\n * is successful).\n * @throws Error if deletion fails, e.g., if no model exists at `path`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function removeModel(url) {\n const schemeAndPath = parseURL(url);\n const manager = ModelStoreManagerRegistry.getManager(schemeAndPath.scheme);\n return manager.removeModel(schemeAndPath.path);\n}\n/**\n * Copy a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Copying within a storage medium, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Copying between two storage mediums, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Copy the model, from Local Storage to IndexedDB.\n * await tf.io.copyModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove both models.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of copying.\n * @param destURL Destination URL of copying.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function copyModel(sourceURL, destURL) {\n const deleteSource = false;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\n/**\n * Move a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Moving within a storage medium, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Moving between two storage mediums, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Move the model, from Local Storage to IndexedDB.\n * await tf.io.moveModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove the moved model.\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of moving.\n * @param destURL Destination URL of moving.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function moveModel(sourceURL, destURL) {\n const deleteSource = true;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\nexport { moveModel, copyModel, removeModel, listModels };\n//# sourceMappingURL=model_management.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { BrowserIndexedDB, BrowserIndexedDBManager } from '../io/indexed_db';\nimport { BrowserLocalStorage, BrowserLocalStorageManager } from '../io/local_storage';\nimport { ModelStoreManagerRegistry } from '../io/model_management';\nexport class PlatformBrowser {\n fetch(path, init) {\n return fetch(path, init);\n }\n now() {\n return performance.now();\n }\n encode(text, encoding) {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(`Browser's encoder only supports utf-8, but got ${encoding}`);\n }\n if (this.textEncoder == null) {\n this.textEncoder = new TextEncoder();\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes, encoding) {\n return new TextDecoder(encoding).decode(bytes);\n }\n}\nif (env().get('IS_BROWSER')) {\n env().setPlatform('browser', new PlatformBrowser());\n // Register LocalStorage IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(BrowserLocalStorage.URL_SCHEME, new BrowserLocalStorageManager());\n }\n catch (err) {\n }\n // Register IndexedDB IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(BrowserIndexedDB.URL_SCHEME, new BrowserIndexedDBManager());\n }\n catch (err) {\n }\n}\n//# sourceMappingURL=platform_browser.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from '../environment';\n// We are wrapping this within an object so it can be stubbed by Jasmine.\nexport const getNodeFetch = {\n // tslint:disable-next-line:no-require-imports\n importFetch: () => require('node-fetch')\n};\nlet systemFetch;\n// These getters and setters are for testing so we don't export a mutable\n// variable.\nexport function resetSystemFetch() {\n systemFetch = null;\n}\nexport function setSystemFetch(fetchFn) {\n systemFetch = fetchFn;\n}\nexport function getSystemFetch() {\n return systemFetch;\n}\nexport class PlatformNode {\n constructor() {\n // tslint:disable-next-line:no-require-imports\n this.util = require('util');\n // According to the spec, the built-in encoder can do only UTF-8 encoding.\n // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder\n this.textEncoder = new this.util.TextEncoder();\n }\n fetch(path, requestInits) {\n if (env().global.fetch != null) {\n return env().global.fetch(path, requestInits);\n }\n if (systemFetch == null) {\n systemFetch = getNodeFetch.importFetch();\n }\n return systemFetch(path, requestInits);\n }\n now() {\n const time = process.hrtime();\n return time[0] * 1000 + time[1] / 1000000;\n }\n encode(text, encoding) {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(`Node built-in encoder only supports utf-8, but got ${encoding}`);\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes, encoding) {\n if (bytes.length === 0) {\n return '';\n }\n return new this.util.TextDecoder(encoding).decode(bytes);\n }\n}\nif (env().get('IS_NODE')) {\n env().setPlatform('node', new PlatformNode());\n}\n//# sourceMappingURL=platform_node.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { TensorBuffer } from '../tensor';\nimport * as util from '../util';\n/**\n * Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.\n *\n * The values are stored in CPU as `TypedArray`. Fill the buffer using\n * `buffer.set()`, or by modifying directly `buffer.values`.\n *\n * When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with\n * those values.\n *\n * ```js\n * // Create a buffer and set values at particular indices.\n * const buffer = tf.buffer([2, 2]);\n * buffer.set(3, 0, 0);\n * buffer.set(5, 1, 0);\n *\n * // Convert the buffer back to a tensor.\n * buffer.toTensor().print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param dtype The dtype of the buffer. Defaults to 'float32'.\n * @param values The values of the buffer as `TypedArray`. Defaults to\n * zeros.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function buffer(shape, dtype = 'float32', values) {\n dtype = dtype || 'float32';\n util.assertNonNegativeIntegerDimensions(shape);\n return new TensorBuffer(shape, dtype, values);\n}\n//# sourceMappingURL=buffer.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Cast } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Casts a `tf.Tensor` to a new dtype.\n *\n * ```js\n * const x = tf.tensor1d([1.5, 2.5, 3]);\n * tf.cast(x, 'int32').print();\n * ```\n * @param x The input tensor to be casted.\n * @param dtype The dtype to cast the input tensor to.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction cast_(x, dtype) {\n const $x = convertToTensor(x, 'x', 'cast');\n // Sanity checks.\n if (!util.isValidDtype(dtype)) {\n throw new Error(`Failed to cast to unknown dtype ${dtype}`);\n }\n if (dtype === 'string' && $x.dtype !== 'string' ||\n dtype !== 'string' && $x.dtype === 'string') {\n throw new Error('Only strings can be casted to strings');\n }\n const inputs = { x: $x };\n const attrs = { dtype };\n return ENGINE.runKernelFunc(backend => backend.cast($x, dtype), inputs, null /* grad */, Cast, attrs);\n}\nexport const cast = op({ cast_ });\n//# sourceMappingURL=cast.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Identity } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport { op } from './operation';\n/**\n * Creates a new tensor with the same values and shape as the specified\n * tensor.\n *\n * ```js\n * const x = tf.tensor([1, 2]);\n *\n * x.clone().print();\n * ```\n *\n * @param x The tensor to clone.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction clone_(x) {\n const $x = convertToTensor(x, 'x', 'clone', null);\n const forward = () => ENGINE.makeTensorFromDataId($x.dataId, $x.shape, $x.dtype);\n const inputs = { x: $x };\n // Note this op is called tf.identity in python. Hence the kernel name used\n // here.\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, Identity);\n}\nexport const clone = op({ clone_ });\n//# sourceMappingURL=clone.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Prints information about the `tf.Tensor` including its data.\n *\n * ```js\n * const verbose = true;\n * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);\n * ```\n * @param x The tensor to be printed.\n * @param verbose Whether to print verbose information about the ` Tensor`,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function print(x, verbose = false) {\n console.log(x.toString(verbose));\n}\n//# sourceMappingURL=print.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Required side effectful code for tfjs-core\n// Set up Engine and ENV\nimport { getOrMakeEngine } from './engine';\ngetOrMakeEngine();\n// Register backend-agnostic flags.\nimport './flags';\n// Register platforms\nimport './platforms/platform_browser';\nimport './platforms/platform_node';\n// Set up OpHandler\nimport { buffer } from './ops/buffer';\nimport { cast } from './ops/cast';\nimport { clone } from './ops/clone';\nimport { print } from './ops/print';\nimport { setOpHandler } from './tensor';\nconst opHandler = {\n buffer,\n cast,\n clone,\n print\n};\nsetOpHandler(opHandler);\n//# sourceMappingURL=base_side_effects.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * IOHandlers related to files, such as browser-triggered file downloads,\n * user-selected files in browser.\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { basename, concatenateArrayBuffers, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst DEFAULT_FILE_NAME_PREFIX = 'model';\nconst DEFAULT_JSON_EXTENSION_NAME = '.json';\nconst DEFAULT_WEIGHT_DATA_EXTENSION_NAME = '.weights.bin';\nfunction defer(f) {\n return new Promise(resolve => setTimeout(resolve)).then(f);\n}\nexport class BrowserDownloads {\n constructor(fileNamePrefix) {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Provide info on what IOHandlers are available under the\n // current environment.\n throw new Error('browserDownloads() cannot proceed because the current environment ' +\n 'is not a browser.');\n }\n if (fileNamePrefix.startsWith(BrowserDownloads.URL_SCHEME)) {\n fileNamePrefix = fileNamePrefix.slice(BrowserDownloads.URL_SCHEME.length);\n }\n if (fileNamePrefix == null || fileNamePrefix.length === 0) {\n fileNamePrefix = DEFAULT_FILE_NAME_PREFIX;\n }\n this.modelTopologyFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME;\n this.weightDataFileName =\n fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME;\n }\n async save(modelArtifacts) {\n if (typeof (document) === 'undefined') {\n throw new Error('Browser downloads are not supported in ' +\n 'this environment since `document` is not present');\n }\n const weightsURL = window.URL.createObjectURL(new Blob([modelArtifacts.weightData], { type: 'application/octet-stream' }));\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserDownloads.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n else {\n const weightsManifest = [{\n paths: ['./' + this.weightDataFileName],\n weights: modelArtifacts.weightSpecs\n }];\n const modelTopologyAndWeightManifest = {\n modelTopology: modelArtifacts.modelTopology,\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n weightsManifest\n };\n const modelTopologyAndWeightManifestURL = window.URL.createObjectURL(new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: 'application/json' }));\n // If anchor elements are not provided, create them without attaching them\n // to parents, so that the downloaded file names can be controlled.\n const jsonAnchor = this.jsonAnchor == null ? document.createElement('a') :\n this.jsonAnchor;\n jsonAnchor.download = this.modelTopologyFileName;\n jsonAnchor.href = modelTopologyAndWeightManifestURL;\n // Trigger downloads by evoking a click event on the download anchors.\n // When multiple downloads are started synchronously, Firefox will only\n // save the last one.\n await defer(() => jsonAnchor.dispatchEvent(new MouseEvent('click')));\n if (modelArtifacts.weightData != null) {\n const weightDataAnchor = this.weightDataAnchor == null ?\n document.createElement('a') :\n this.weightDataAnchor;\n weightDataAnchor.download = this.weightDataFileName;\n weightDataAnchor.href = weightsURL;\n await defer(() => weightDataAnchor.dispatchEvent(new MouseEvent('click')));\n }\n return { modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts) };\n }\n }\n}\nBrowserDownloads.URL_SCHEME = 'downloads://';\nclass BrowserFiles {\n constructor(files) {\n if (files == null || files.length < 1) {\n throw new Error(`When calling browserFiles, at least 1 file is required, ` +\n `but received ${files}`);\n }\n this.files = files;\n }\n async load() {\n const jsonFile = this.files[0];\n const weightFiles = this.files.slice(1);\n return new Promise((resolve, reject) => {\n const jsonReader = new FileReader();\n jsonReader.onload = (event) => {\n // tslint:disable-next-line:no-any\n const modelJSON = JSON.parse(event.target.result);\n const modelTopology = modelJSON.modelTopology;\n if (modelTopology == null) {\n reject(new Error(`modelTopology field is missing from file ${jsonFile.name}`));\n return;\n }\n if (weightFiles.length === 0) {\n resolve({ modelTopology });\n }\n const weightsManifest = modelJSON.weightsManifest;\n if (weightsManifest == null) {\n reject(new Error(`weightManifest field is missing from file ${jsonFile.name}`));\n return;\n }\n let pathToFile;\n try {\n pathToFile =\n this.checkManifestAndWeightFiles(weightsManifest, weightFiles);\n }\n catch (err) {\n reject(err);\n return;\n }\n const weightSpecs = [];\n const paths = [];\n const perFileBuffers = [];\n weightsManifest.forEach(weightsGroup => {\n weightsGroup.paths.forEach(path => {\n paths.push(path);\n perFileBuffers.push(null);\n });\n weightSpecs.push(...weightsGroup.weights);\n });\n weightsManifest.forEach(weightsGroup => {\n weightsGroup.paths.forEach(path => {\n const weightFileReader = new FileReader();\n weightFileReader.onload = (event) => {\n // tslint:disable-next-line:no-any\n const weightData = event.target.result;\n const index = paths.indexOf(path);\n perFileBuffers[index] = weightData;\n if (perFileBuffers.indexOf(null) === -1) {\n resolve({\n modelTopology,\n weightSpecs,\n weightData: concatenateArrayBuffers(perFileBuffers),\n format: modelJSON.format,\n generatedBy: modelJSON.generatedBy,\n convertedBy: modelJSON.convertedBy,\n userDefinedMetadata: modelJSON.userDefinedMetadata\n });\n }\n };\n weightFileReader.onerror = error => reject(`Failed to weights data from file of path '${path}'.`);\n weightFileReader.readAsArrayBuffer(pathToFile[path]);\n });\n });\n };\n jsonReader.onerror = error => reject(`Failed to read model topology and weights manifest JSON ` +\n `from file '${jsonFile.name}'. BrowserFiles supports loading ` +\n `Keras-style tf.Model artifacts only.`);\n jsonReader.readAsText(jsonFile);\n });\n }\n /**\n * Check the compatibility between weights manifest and weight files.\n */\n checkManifestAndWeightFiles(manifest, files) {\n const basenames = [];\n const fileNames = files.map(file => basename(file.name));\n const pathToFile = {};\n for (const group of manifest) {\n group.paths.forEach(path => {\n const pathBasename = basename(path);\n if (basenames.indexOf(pathBasename) !== -1) {\n throw new Error(`Duplicate file basename found in weights manifest: ` +\n `'${pathBasename}'`);\n }\n basenames.push(pathBasename);\n if (fileNames.indexOf(pathBasename) === -1) {\n throw new Error(`Weight file with basename '${pathBasename}' is not provided.`);\n }\n else {\n pathToFile[path] = files[fileNames.indexOf(pathBasename)];\n }\n });\n }\n if (basenames.length !== files.length) {\n throw new Error(`Mismatch in the number of files in weights manifest ` +\n `(${basenames.length}) and the number of weight files provided ` +\n `(${files.length}).`);\n }\n return pathToFile;\n }\n}\nexport const browserDownloadsRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserDownloads.URL_SCHEME)) {\n return browserDownloads(url.slice(BrowserDownloads.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(browserDownloadsRouter);\n/**\n * Creates an IOHandler that triggers file downloads from the browser.\n *\n * The returned `IOHandler` instance can be used as model exporting methods such\n * as `tf.Model.save` and supports only saving.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * const saveResult = await model.save('downloads://mymodel');\n * // This will trigger downloading of two files:\n * // 'mymodel.json' and 'mymodel.weights.bin'.\n * console.log(saveResult);\n * ```\n *\n * @param fileNamePrefix Prefix name of the files to be downloaded. For use with\n * `tf.Model`, `fileNamePrefix` should follow either of the following two\n * formats:\n * 1. `null` or `undefined`, in which case the default file\n * names will be used:\n * - 'model.json' for the JSON file containing the model topology and\n * weights manifest.\n * - 'model.weights.bin' for the binary file containing the binary weight\n * values.\n * 2. A single string or an Array of a single string, as the file name prefix.\n * For example, if `'foo'` is provided, the downloaded JSON\n * file and binary weights file will be named 'foo.json' and\n * 'foo.weights.bin', respectively.\n * @param config Additional configuration for triggering downloads.\n * @returns An instance of `BrowserDownloads` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserDownloads(fileNamePrefix = 'model') {\n return new BrowserDownloads(fileNamePrefix);\n}\n/**\n * Creates an IOHandler that loads model artifacts from user-selected files.\n *\n * This method can be used for loading from files such as user-selected files\n * in the browser.\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * // Note: This code snippet won't run properly without the actual file input\n * // elements in the HTML DOM.\n *\n * // Suppose there are two HTML file input (``)\n * // elements.\n * const uploadJSONInput = document.getElementById('upload-json');\n * const uploadWeightsInput = document.getElementById('upload-weights');\n * const model = await tf.loadLayersModel(tf.io.browserFiles(\n * [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));\n * ```\n *\n * @param files `File`s to load from. Currently, this function supports only\n * loading from files that contain Keras-style models (i.e., `tf.Model`s), for\n * which an `Array` of `File`s is expected (in that order):\n * - A JSON file containing the model topology and weight manifest.\n * - Optionally, One or more binary files containing the binary weights.\n * These files must have names that match the paths in the `weightsManifest`\n * contained by the aforementioned JSON file, or errors will be thrown\n * during loading. These weights files have the same format as the ones\n * generated by `tensorflowjs_converter` that comes with the `tensorflowjs`\n * Python PIP package. If no weights files are provided, only the model\n * topology will be loaded from the JSON file above.\n * @returns An instance of `Files` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserFiles(files) {\n return new BrowserFiles(files);\n}\n//# sourceMappingURL=browser_files.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { assert } from '../util';\n/**\n * Monitor Promise.all progress, fire onProgress callback function.\n *\n * @param promises Promise list going to be monitored\n * @param onProgress Callback function. Fired when a promise resolved.\n * @param startFraction Optional fraction start. Default to 0.\n * @param endFraction Optional fraction end. Default to 1.\n */\nexport function monitorPromisesProgress(promises, onProgress, startFraction, endFraction) {\n checkPromises(promises);\n startFraction = startFraction == null ? 0 : startFraction;\n endFraction = endFraction == null ? 1 : endFraction;\n checkFraction(startFraction, endFraction);\n let resolvedPromise = 0;\n const registerMonitor = (promise) => {\n promise.then(value => {\n const fraction = startFraction +\n ++resolvedPromise / promises.length * (endFraction - startFraction);\n // pass fraction as parameter to callback function.\n onProgress(fraction);\n return value;\n });\n return promise;\n };\n function checkPromises(promises) {\n assert(promises != null && Array.isArray(promises) && promises.length > 0, () => 'promises must be a none empty array');\n }\n function checkFraction(startFraction, endFraction) {\n assert(startFraction >= 0 && startFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` +\n `got startFraction ${startFraction}`);\n assert(endFraction >= 0 && endFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` +\n `got endFraction ${endFraction}`);\n assert(endFraction >= startFraction, () => `startFraction must be no more than endFraction, but ` +\n `got startFraction ${startFraction} and endFraction ` +\n `${endFraction}`);\n }\n return Promise.all(promises.map(registerMonitor));\n}\n//# sourceMappingURL=progress.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from '../environment';\nimport * as util from '../util';\nimport { decodeWeights } from './io_utils';\nimport { monitorPromisesProgress } from './progress';\nimport { DTYPE_VALUE_SIZE_MAP } from './types';\n/**\n * Reads binary weights data from a number of URLs.\n *\n * @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls.\n * @param requestOptions RequestInit (options) for the HTTP requests.\n * @param fetchFunc Optional overriding value for the `window.fetch` function.\n * @param onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same\n * length as `fetchURLs`.\n */\nexport async function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) {\n if (loadOptions == null) {\n loadOptions = {};\n }\n const fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch :\n loadOptions.fetchFunc;\n // Create the requests for all of the weights in parallel.\n const requests = fetchURLs.map(fetchURL => fetchFunc(fetchURL, loadOptions.requestInit, { isBinary: true }));\n const fetchStartFraction = 0;\n const fetchEndFraction = 0.5;\n const responses = loadOptions.onProgress == null ?\n await Promise.all(requests) :\n await monitorPromisesProgress(requests, loadOptions.onProgress, fetchStartFraction, fetchEndFraction);\n const bufferPromises = responses.map(response => response.arrayBuffer());\n const bufferStartFraction = 0.5;\n const bufferEndFraction = 1;\n const buffers = loadOptions.onProgress == null ?\n await Promise.all(bufferPromises) :\n await monitorPromisesProgress(bufferPromises, loadOptions.onProgress, bufferStartFraction, bufferEndFraction);\n return buffers;\n}\n/**\n * Reads a weights manifest JSON configuration, fetches the weights and\n * returns them as `Tensor`s.\n *\n * @param manifest The weights manifest JSON.\n * @param filePathPrefix The path prefix for filenames given in the manifest.\n * Defaults to the empty string.\n * @param weightNames The names of the weights to be fetched.\n */\nexport async function loadWeights(manifest, filePathPrefix = '', weightNames, requestInit) {\n // TODO(nsthorat): Groups are currently fetched atomically. If you need a\n // single weight from a group, the whole group will be fetched. At a future\n // date, we should support fetching only the individual shards within a\n // group that are needed to reconstruct the requested weight.\n // TODO(cais): Use `decodeWeights` for implementation.\n const fetchWeights = (fetchUrls) => loadWeightsAsArrayBuffer(fetchUrls, { requestInit });\n const loadWeights = weightsLoaderFactory(fetchWeights);\n return loadWeights(manifest, filePathPrefix, weightNames);\n}\n/**\n * Creates a function, which reads a weights manifest JSON configuration,\n * fetches the weight files using the specified function and returns them as\n * `Tensor`s.\n *\n * ```js\n * // example for creating a nodejs weight loader, which reads the weight files\n * // from disk using fs.readFileSync\n *\n * import * as fs from 'fs'\n *\n * const fetchWeightsFromDisk = (filePaths: string[]) =>\n * filePaths.map(filePath => fs.readFileSync(filePath).buffer)\n *\n * const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)\n *\n * const manifest = JSON.parse(\n * fs.readFileSync('./my_model-weights_manifest').toString()\n * )\n * const weightMap = await loadWeights(manifest, './')\n * ```\n * @param fetchWeightsFunction The function used for fetching the weight files.\n * @returns Weight loading function.\n */\nexport function weightsLoaderFactory(fetchWeightsFunction) {\n return async (manifest, filePathPrefix = '', weightNames) => {\n // Collect all the groups, weights, and their relative offsets to be\n // fetched.\n const groupIndicesToFetchMap = manifest.map(() => false);\n const groupWeightsToFetch = {};\n const weightsFound = weightNames != null ? weightNames.map(() => false) : [];\n const allManifestWeightNames = [];\n manifest.forEach((manifestGroupConfig, groupIndex) => {\n let groupOffset = 0;\n manifestGroupConfig.weights.forEach(weightsEntry => {\n const rawDtype = ('quantization' in weightsEntry) ?\n weightsEntry.quantization.dtype :\n weightsEntry.dtype;\n const weightsBytes = DTYPE_VALUE_SIZE_MAP[rawDtype] *\n util.sizeFromShape(weightsEntry.shape);\n const enqueueWeightsForFetchingFn = () => {\n groupIndicesToFetchMap[groupIndex] = true;\n if (groupWeightsToFetch[groupIndex] == null) {\n groupWeightsToFetch[groupIndex] = [];\n }\n groupWeightsToFetch[groupIndex].push({\n manifestEntry: weightsEntry,\n groupOffset,\n sizeBytes: weightsBytes\n });\n };\n if (weightNames != null) {\n weightNames.forEach((weightName, weightIndex) => {\n if (weightName === weightsEntry.name) {\n enqueueWeightsForFetchingFn();\n weightsFound[weightIndex] = true;\n }\n });\n }\n else {\n enqueueWeightsForFetchingFn();\n }\n allManifestWeightNames.push(weightsEntry.name);\n groupOffset += weightsBytes;\n });\n });\n if (!weightsFound.every(found => found)) {\n const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]);\n throw new Error(`Could not find weights in manifest with names: ` +\n `${weightsNotFound.join(', ')}. \\n` +\n `Manifest JSON has weights with names: ` +\n `${allManifestWeightNames.join(', ')}.`);\n }\n // Convert the one-hot boolean groupId => shouldFetch map to a list of group\n // IDs.\n const groupIndicesToFetch = groupIndicesToFetchMap.reduce((accumulator, shouldFetch, i) => {\n if (shouldFetch) {\n accumulator.push(i);\n }\n return accumulator;\n }, []);\n const fetchUrls = [];\n groupIndicesToFetch.forEach(i => {\n manifest[i].paths.forEach(filepath => {\n const fetchUrl = filePathPrefix +\n (!filePathPrefix.endsWith('/') ? '/' : '') + filepath;\n fetchUrls.push(fetchUrl);\n });\n });\n const buffers = await fetchWeightsFunction(fetchUrls);\n const weightsTensorMap = {};\n let bufferIndexOffset = 0;\n groupIndicesToFetch.forEach(i => {\n const numBuffers = manifest[i].paths.length;\n let groupBytes = 0;\n for (let i = 0; i < numBuffers; i++) {\n groupBytes += buffers[bufferIndexOffset + i].byteLength;\n }\n // Create a buffer for the whole group.\n const groupBuffer = new ArrayBuffer(groupBytes);\n const groupByteBuffer = new Uint8Array(groupBuffer);\n let groupBufferOffset = 0;\n for (let i = 0; i < numBuffers; i++) {\n const buffer = new Uint8Array(buffers[bufferIndexOffset + i]);\n groupByteBuffer.set(buffer, groupBufferOffset);\n groupBufferOffset += buffer.byteLength;\n }\n const weightsEntries = groupWeightsToFetch[i];\n weightsEntries.forEach(weightsEntry => {\n const byteBuffer = groupBuffer.slice(weightsEntry.groupOffset, weightsEntry.groupOffset + weightsEntry.sizeBytes);\n const nameToTensorMap = decodeWeights(byteBuffer, [weightsEntry.manifestEntry]);\n for (const name in nameToTensorMap) {\n weightsTensorMap[name] = nameToTensorMap[name];\n }\n });\n bufferIndexOffset += numBuffers;\n });\n return weightsTensorMap;\n };\n}\n//# sourceMappingURL=weights_loader.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * IOHandler implementations based on HTTP requests in the web browser.\n *\n * Uses [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n */\nimport { env } from '../environment';\nimport { assert } from '../util';\nimport { concatenateArrayBuffers, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nimport { loadWeightsAsArrayBuffer } from './weights_loader';\nconst OCTET_STREAM_MIME_TYPE = 'application/octet-stream';\nconst JSON_TYPE = 'application/json';\nexport class HTTPRequest {\n constructor(path, loadOptions) {\n this.DEFAULT_METHOD = 'POST';\n if (loadOptions == null) {\n loadOptions = {};\n }\n this.weightPathPrefix = loadOptions.weightPathPrefix;\n this.onProgress = loadOptions.onProgress;\n this.weightUrlConverter = loadOptions.weightUrlConverter;\n if (loadOptions.fetchFunc != null) {\n assert(typeof loadOptions.fetchFunc === 'function', () => 'Must pass a function that matches the signature of ' +\n '`fetch` (see ' +\n 'https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)');\n this.fetch = loadOptions.fetchFunc;\n }\n else {\n this.fetch = env().platform.fetch;\n }\n assert(path != null && path.length > 0, () => 'URL path for http must not be null, undefined or ' +\n 'empty.');\n if (Array.isArray(path)) {\n assert(path.length === 2, () => 'URL paths for http must have a length of 2, ' +\n `(actual length is ${path.length}).`);\n }\n this.path = path;\n if (loadOptions.requestInit != null &&\n loadOptions.requestInit.body != null) {\n throw new Error('requestInit is expected to have no pre-existing body, but has one.');\n }\n this.requestInit = loadOptions.requestInit || {};\n }\n async save(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserHTTPRequest.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n const init = Object.assign({ method: this.DEFAULT_METHOD }, this.requestInit);\n init.body = new FormData();\n const weightsManifest = [{\n paths: ['./model.weights.bin'],\n weights: modelArtifacts.weightSpecs,\n }];\n const modelTopologyAndWeightManifest = {\n modelTopology: modelArtifacts.modelTopology,\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n userDefinedMetadata: modelArtifacts.userDefinedMetadata,\n weightsManifest\n };\n init.body.append('model.json', new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: JSON_TYPE }), 'model.json');\n if (modelArtifacts.weightData != null) {\n init.body.append('model.weights.bin', new Blob([modelArtifacts.weightData], { type: OCTET_STREAM_MIME_TYPE }), 'model.weights.bin');\n }\n const response = await this.fetch(this.path, init);\n if (response.ok) {\n return {\n modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts),\n responses: [response],\n };\n }\n else {\n throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ` +\n `${response.status}.`);\n }\n }\n /**\n * Load model artifacts via HTTP request(s).\n *\n * See the documentation to `tf.io.http` for details on the saved\n * artifacts.\n *\n * @returns The loaded model artifacts (if loading succeeds).\n */\n async load() {\n const modelConfigRequest = await this.fetch(this.path, this.requestInit);\n if (!modelConfigRequest.ok) {\n throw new Error(`Request to ${this.path} failed with status code ` +\n `${modelConfigRequest.status}. Please verify this URL points to ` +\n `the model JSON of the model to load.`);\n }\n let modelConfig;\n try {\n modelConfig = await modelConfigRequest.json();\n }\n catch (e) {\n let message = `Failed to parse model JSON of response from ${this.path}.`;\n // TODO(nsthorat): Remove this after some time when we're comfortable that\n // .pb files are mostly gone.\n if (this.path.endsWith('.pb')) {\n message += ' Your path contains a .pb file extension. ' +\n 'Support for .pb models have been removed in TensorFlow.js 1.0 ' +\n 'in favor of .json models. You can re-convert your Python ' +\n 'TensorFlow model using the TensorFlow.js 1.0 conversion scripts ' +\n 'or you can convert your.pb models with the \\'pb2json\\'' +\n 'NPM script in the tensorflow/tfjs-converter repository.';\n }\n else {\n message += ' Please make sure the server is serving valid ' +\n 'JSON for this request.';\n }\n throw new Error(message);\n }\n const modelTopology = modelConfig.modelTopology;\n const weightsManifest = modelConfig.weightsManifest;\n const generatedBy = modelConfig.generatedBy;\n const convertedBy = modelConfig.convertedBy;\n const format = modelConfig.format;\n const userDefinedMetadata = modelConfig.userDefinedMetadata;\n // We do not allow both modelTopology and weightsManifest to be missing.\n if (modelTopology == null && weightsManifest == null) {\n throw new Error(`The JSON from HTTP path ${this.path} contains neither model ` +\n `topology or manifest for weights.`);\n }\n let weightSpecs;\n let weightData;\n if (weightsManifest != null) {\n const results = await this.loadWeights(weightsManifest);\n [weightSpecs, weightData] = results;\n }\n const artifacts = {\n modelTopology,\n weightSpecs,\n weightData,\n userDefinedMetadata,\n generatedBy,\n convertedBy,\n format\n };\n const initializer = modelConfig.modelInitializer;\n if (initializer) {\n artifacts.modelInitializer = initializer;\n }\n return artifacts;\n }\n async loadWeights(weightsManifest) {\n const weightPath = Array.isArray(this.path) ? this.path[1] : this.path;\n const [prefix, suffix] = parseUrl(weightPath);\n const pathPrefix = this.weightPathPrefix || prefix;\n const weightSpecs = [];\n for (const entry of weightsManifest) {\n weightSpecs.push(...entry.weights);\n }\n const fetchURLs = [];\n const urlPromises = [];\n for (const weightsGroup of weightsManifest) {\n for (const path of weightsGroup.paths) {\n if (this.weightUrlConverter != null) {\n urlPromises.push(this.weightUrlConverter(path));\n }\n else {\n fetchURLs.push(pathPrefix + path + suffix);\n }\n }\n }\n if (this.weightUrlConverter) {\n fetchURLs.push(...await Promise.all(urlPromises));\n }\n const buffers = await loadWeightsAsArrayBuffer(fetchURLs, {\n requestInit: this.requestInit,\n fetchFunc: this.fetch,\n onProgress: this.onProgress\n });\n return [weightSpecs, concatenateArrayBuffers(buffers)];\n }\n}\nHTTPRequest.URL_SCHEME_REGEX = /^https?:\\/\\//;\n/**\n * Extract the prefix and suffix of the url, where the prefix is the path before\n * the last file, and suffix is the search params after the last file.\n * ```\n * const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file'\n * [prefix, suffix] = parseUrl(url)\n * // prefix = 'http://tfhub.dev/model/1/'\n * // suffix = '?tfjs-format=file'\n * ```\n * @param url the model url to be parsed.\n */\nexport function parseUrl(url) {\n const lastSlash = url.lastIndexOf('/');\n const lastSearchParam = url.lastIndexOf('?');\n const prefix = url.substring(0, lastSlash);\n const suffix = lastSearchParam > lastSlash ? url.substring(lastSearchParam) : '';\n return [prefix + '/', suffix];\n}\nexport function isHTTPScheme(url) {\n return url.match(HTTPRequest.URL_SCHEME_REGEX) != null;\n}\nexport const httpRouter = (url, loadOptions) => {\n if (typeof fetch === 'undefined' &&\n (loadOptions == null || loadOptions.fetchFunc == null)) {\n // `http` uses `fetch` or `node-fetch`, if one wants to use it in\n // an environment that is not the browser or node they have to setup a\n // global fetch polyfill.\n return null;\n }\n else {\n let isHTTP = true;\n if (Array.isArray(url)) {\n isHTTP = url.every(urlItem => isHTTPScheme(urlItem));\n }\n else {\n isHTTP = isHTTPScheme(url);\n }\n if (isHTTP) {\n return http(url, loadOptions);\n }\n }\n return null;\n};\nIORouterRegistry.registerSaveRouter(httpRouter);\nIORouterRegistry.registerLoadRouter(httpRouter);\n/**\n * Creates an IOHandler subtype that sends model artifacts to HTTP server.\n *\n * An HTTP request of the `multipart/form-data` mime type will be sent to the\n * `path` URL. The form data includes artifacts that represent the topology\n * and/or weights of the model. In the case of Keras-style `tf.Model`, two\n * blobs (files) exist in form-data:\n * - A JSON file consisting of `modelTopology` and `weightsManifest`.\n * - A binary weights file consisting of the concatenated weight values.\n * These files are in the same format as the one generated by\n * [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).\n *\n * The following code snippet exemplifies the client-side code that uses this\n * function:\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save(tf.io.http(\n * 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));\n * console.log(saveResult);\n * ```\n *\n * If the default `POST` method is to be used, without any custom parameters\n * such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`:\n *\n * ```js\n * const saveResult = await model.save('http://model-server:5000/upload');\n * ```\n *\n * The following GitHub Gist\n * https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864\n * implements a server based on [flask](https://github.com/pallets/flask) that\n * can receive the request. Upon receiving the model artifacts via the requst,\n * this particular server reconsistutes instances of [Keras\n * Models](https://keras.io/models/model/) in memory.\n *\n *\n * @param path A URL path to the model.\n * Can be an absolute HTTP path (e.g.,\n * 'http://localhost:8000/model-upload)') or a relative path (e.g.,\n * './model-upload').\n * @param requestInit Request configurations to be used when sending\n * HTTP request to server using `fetch`. It can contain fields such as\n * `method`, `credentials`, `headers`, `mode`, etc. See\n * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request\n * for more information. `requestInit` must not have a body, because the\n * body will be set by TensorFlow.js. File blobs representing the model\n * topology (filename: 'model.json') and the weights of the model (filename:\n * 'model.weights.bin') will be appended to the body. If `requestInit` has a\n * `body`, an Error will be thrown.\n * @param loadOptions Optional configuration for the loading. It includes the\n * following fields:\n * - weightPathPrefix Optional, this specifies the path prefix for weight\n * files, by default this is calculated from the path param.\n * - fetchFunc Optional, custom `fetch` function. E.g., in Node.js,\n * the `fetch` from node-fetch can be used here.\n * - onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns An instance of `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function http(path, loadOptions) {\n return new HTTPRequest(path, loadOptions);\n}\n/**\n * Deprecated. Use `tf.io.http`.\n * @param path\n * @param loadOptions\n */\nexport function browserHTTPRequest(path, loadOptions) {\n return http(path, loadOptions);\n}\n//# sourceMappingURL=http.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nclass PassthroughLoader {\n constructor(modelArtifacts) {\n this.modelArtifacts = modelArtifacts;\n }\n async load() {\n return this.modelArtifacts;\n }\n}\nclass PassthroughSaver {\n constructor(saveHandler) {\n this.saveHandler = saveHandler;\n }\n async save(modelArtifacts) {\n return this.saveHandler(modelArtifacts);\n }\n}\n/**\n * Creates an IOHandler that loads model artifacts from memory.\n *\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * const model = await tf.loadLayersModel(tf.io.fromMemory(\n * modelTopology, weightSpecs, weightData));\n * ```\n *\n * @param modelArtifacts a object containing model topology (i.e., parsed from\n * the JSON format).\n * @param weightSpecs An array of `WeightsManifestEntry` objects describing the\n * names, shapes, types, and quantization of the weight data.\n * @param weightData A single `ArrayBuffer` containing the weight data,\n * concatenated in the order described by the weightSpecs.\n * @param trainingConfig Model training configuration. Optional.\n *\n * @returns A passthrough `IOHandler` that simply loads the provided data.\n */\nexport function fromMemory(modelArtifacts, weightSpecs, weightData, trainingConfig) {\n if (arguments.length === 1) {\n const isModelArtifacts = modelArtifacts.modelTopology != null ||\n modelArtifacts.weightSpecs != null;\n if (isModelArtifacts) {\n return new PassthroughLoader(modelArtifacts);\n }\n else {\n // Legacy support: with only modelTopology.\n // TODO(cais): Remove this deprecated API.\n console.warn('Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({ modelTopology: modelArtifacts });\n }\n }\n else {\n // Legacy support.\n // TODO(cais): Remove this deprecated API.\n console.warn('Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({\n modelTopology: modelArtifacts,\n weightSpecs,\n weightData,\n trainingConfig\n });\n }\n}\n/**\n * Creates an IOHandler that passes saved model artifacts to a callback.\n *\n * ```js\n * function handleSave(artifacts) {\n * // ... do something with the artifacts ...\n * return {modelArtifactsInfo: {...}, ...};\n * }\n *\n * const saveResult = model.save(tf.io.withSaveHandler(handleSave));\n * ```\n *\n * @param saveHandler A function that accepts a `ModelArtifacts` and returns a\n * `SaveResult`.\n */\nexport function withSaveHandler(saveHandler) {\n return new PassthroughSaver(saveHandler);\n}\n//# sourceMappingURL=passthrough.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Importing local_storage and indexed_db is necessary for the routers to be\n// registered.\nimport './indexed_db';\nimport './local_storage';\nimport { browserFiles } from './browser_files';\nimport { browserHTTPRequest, http, isHTTPScheme } from './http';\nimport { concatenateArrayBuffers, decodeWeights, encodeWeights, getModelArtifactsInfoForJSON } from './io_utils';\nimport { fromMemory, withSaveHandler } from './passthrough';\nimport { getLoadHandlers, getSaveHandlers, registerLoadRouter, registerSaveRouter } from './router_registry';\nimport { loadWeights, weightsLoaderFactory } from './weights_loader';\nexport { copyModel, listModels, moveModel, removeModel } from './model_management';\nexport { browserFiles, browserHTTPRequest, concatenateArrayBuffers, decodeWeights, encodeWeights, fromMemory, getLoadHandlers, getModelArtifactsInfoForJSON, getSaveHandlers, http, isHTTPScheme, loadWeights, registerLoadRouter, registerSaveRouter, weightsLoaderFactory, withSaveHandler };\n//# sourceMappingURL=io.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Reshape } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Reshapes a `tf.Tensor` to a given shape.\n *\n * Given an input tensor, returns a new tensor with the same values as the\n * input tensor with shape `shape`.\n *\n * If one component of shape is the special value -1, the size of that\n * dimension is computed so that the total size remains constant. In\n * particular, a shape of [-1] flattens into 1-D. At most one component of\n * shape can be -1.\n *\n * If shape is 1-D or higher, then the operation returns a tensor with shape\n * shape filled with the values of tensor. In this case, the number of\n * elements implied by shape must be the same as the number of elements in\n * tensor.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n * x.reshape([2, 2]).print();\n * ```\n *\n * @param x The input tensor to be reshaped.\n * @param shape An array of integers defining the output tensor shape.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction reshape_(x, shape) {\n const $x = convertToTensor(x, 'x', 'reshape', null);\n const inputs = { x: $x };\n const attrs = { shape };\n const forward = (backend, save) => {\n shape = util.inferFromImplicitShape(shape, $x.size);\n util.assert($x.size === util.sizeFromShape(shape), () => 'new shape and old shape must have the same number of elements.');\n save([$x]);\n return backend.reshape($x, shape);\n };\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, Reshape, attrs);\n}\nexport const reshape = op({ reshape_ });\n//# sourceMappingURL=reshape.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { BatchMatMul } from '../kernel_names';\nimport { makeTypesMatch } from '../tensor_util';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\nimport { reshape } from './reshape';\n/**\n * Computes the dot product of two matrices, A * B. These must be matrices.\n *\n * ```js\n * const a = tf.tensor2d([1, 2], [1, 2]);\n * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * a.matMul(b).print(); // or tf.matMul(a, b)\n * ```\n * @param a First matrix in dot product operation.\n * @param b Second matrix in dot product operation.\n * @param transposeA If true, `a` is transposed before multiplication.\n * @param transposeB If true, `b` is transposed before multiplication.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction matMul_(a, b, transposeA = false, transposeB = false) {\n let $a = convertToTensor(a, 'a', 'matMul');\n let $b = convertToTensor(b, 'b', 'matMul');\n [$a, $b] = makeTypesMatch($a, $b);\n util.assert($a.rank >= 2 && $b.rank >= 2 && $a.rank === $b.rank, () => `Error in matMul: inputs must have the same rank of at least 2, ` +\n `got ranks ${$a.rank} and ${$b.rank}.`);\n const innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1];\n const innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2];\n const outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2];\n const outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1];\n const outerDimsA = $a.shape.slice(0, -2);\n const outerDimsB = $b.shape.slice(0, -2);\n const batchDimA = util.sizeFromShape(outerDimsA);\n const batchDimB = util.sizeFromShape(outerDimsB);\n util.assert(util.arraysEqual(outerDimsA, outerDimsB), () => `Error in matMul: outer dimensions (${outerDimsA}) and (` +\n `${outerDimsB}) of Tensors with shapes ${$a.shape} and ` +\n `${$b.shape} must match.`);\n util.assert(innerShapeA === innerShapeB, () => `Error in matMul: inner shapes (${innerShapeA}) and (` +\n `${innerShapeB}) of Tensors with shapes ${$a.shape} and ` +\n `${$b.shape} and transposeA=${transposeA}` +\n ` and transposeB=${transposeB} must match.`);\n const outShape = $a.shape.slice(0, -2).concat([outerShapeA, outerShapeB]);\n const a3D = transposeA ? reshape($a, [batchDimA, innerShapeA, outerShapeA]) :\n reshape($a, [batchDimA, outerShapeA, innerShapeA]);\n const b3D = transposeB ? reshape($b, [batchDimB, outerShapeB, innerShapeB]) :\n reshape($b, [batchDimB, innerShapeB, outerShapeB]);\n const forward = (backend, save) => {\n save([a3D, b3D]);\n return backend.batchMatMul(a3D, b3D, transposeA, transposeB);\n };\n const inputs = { a: a3D, b: b3D };\n const attrs = { transposeA, transposeB };\n const res = ENGINE.runKernelFunc(forward, inputs, null /* grad */, BatchMatMul, attrs);\n return reshape(res, outShape);\n}\nexport const matMul = op({ matMul_ });\n//# sourceMappingURL=mat_mul.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { OneHot } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport { op } from './operation';\nimport { reshape } from './reshape';\n/**\n * Creates a one-hot `tf.Tensor`. The locations represented by `indices` take\n * value `onValue` (defaults to 1), while all other locations take value\n * `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank\n * `R+1` with the last axis of size `depth`.\n *\n * ```js\n * tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print();\n * ```\n *\n * @param indices `tf.Tensor` of indices with dtype `int32`.\n * @param depth The depth of the one hot dimension.\n * @param onValue A number used to fill in the output when the index matches\n * the location.\n * @param offValue A number used to fill in the output when the index does\n * not match the location.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction oneHot_(indices, depth, onValue = 1, offValue = 0) {\n if (depth < 2) {\n throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`);\n }\n const $indices = convertToTensor(indices, 'indices', 'oneHot', 'int32');\n const outShape = [...$indices.shape, depth];\n const forward = (backend, save) => {\n save([$indices]);\n return reshape(backend.oneHot(reshape($indices, [$indices.size]), depth, onValue, offValue), outShape);\n };\n const inputs = { indices: $indices };\n const attrs = { depth, onValue, offValue };\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, OneHot, attrs);\n}\nexport const oneHot = op({ oneHot_ });\n//# sourceMappingURL=one_hot.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Transpose } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`.\n *\n * The returned `tf.Tensor`'s dimension `i` will correspond to the input\n * dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`,\n * where `n` is the rank of the input `tf.Tensor`. Hence by default, this\n * operation performs a regular matrix transpose on 2-D input `tf.Tensor`s.\n *\n * ```js\n * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);\n *\n * a.transpose().print(); // or tf.transpose(a)\n * ```\n *\n * @param x The tensor to transpose.\n * @param perm The permutation of the dimensions of a.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction transpose_(x, perm) {\n const $x = convertToTensor(x, 'x', 'transpose');\n if (perm == null) {\n perm = $x.shape.map((s, i) => i).reverse();\n }\n util.assert($x.rank === perm.length, () => `Error in transpose: rank of input ${$x.rank} ` +\n `must match length of perm ${perm}.`);\n perm.forEach(axis => {\n util.assert(axis >= 0 && axis < $x.rank, () => `All entries in 'perm' must be between 0 and ${$x.rank - 1}` +\n ` but got ${perm}`);\n });\n if ($x.rank <= 1) {\n return $x.clone();\n }\n const inputs = { x: $x };\n const attrs = { perm };\n return ENGINE.runKernelFunc(backend => backend.transpose($x, perm), inputs, null /* gradient */, Transpose, attrs);\n}\nexport const transpose = op({ transpose_ });\n//# sourceMappingURL=transpose.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { cast } from './cast';\nimport { matMul } from './mat_mul';\nimport { oneHot } from './one_hot';\nimport { op } from './operation';\nimport { transpose } from './transpose';\n/**\n * Computes the confusion matrix from true labels and predicted labels.\n *\n * ```js\n * const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32');\n * const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32');\n * const numClasses = 3;\n * const out = tf.math.confusionMatrix(labels, predictions, numClasses);\n * out.print();\n * // Expected output matrix:\n * // [[2, 0, 0],\n * // [0, 1, 1],\n * // [0, 0, 1]]\n * ```\n *\n * @param labels The target labels, assumed to be 0-based integers\n * for the classes. The shape is `[numExamples]`, where\n * `numExamples` is the number of examples included.\n * @param predictions The predicted classes, assumed to be\n * 0-based integers for the classes. Must have the same shape as `labels`.\n * @param numClasses Number of all classes, as an integer.\n * Its value must be larger than the largest element in `labels` and\n * `predictions`.\n * @returns The confusion matrix as a int32-type 2D tensor. The value at\n * row `r` and column `c` is the number of times examples of actual class\n * `r` were predicted as class `c`.\n *\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nexport function confusionMatrix_(labels, predictions, numClasses) {\n const $labels = convertToTensor(labels, 'labels', 'confusionMatrix');\n const $predictions = convertToTensor(predictions, 'predictions', 'confusionMatrix');\n util.assert(numClasses == null || numClasses > 0 && Number.isInteger(numClasses), () => `If provided, numClasses must be a positive integer, ` +\n `but got ${numClasses}`);\n util.assert($labels.rank === 1, () => `Expected the rank of labels to be 1, but got ${$labels.rank}`);\n util.assert($predictions.rank === 1, () => `Expected the rank of predictions to be 1, ` +\n `but got ${$predictions.rank}`);\n util.assert($labels.shape[0] === $predictions.shape[0], () => `Mismatch in the number of examples: ` +\n `${$labels.shape[0]} vs. ${$predictions.shape[0]}. ` +\n `Labels and predictions should have the same number of elements.`);\n util.assert(numClasses > 0 && Number.isInteger(numClasses), () => `numClasses is required to be a positive integer, but got ` +\n `${numClasses}`);\n // TODO(cais): In the future, if oneHot supports tensors inputs for\n // `numClasses`, `confusionMatrix` can make `numClasses` optional.\n const oneHotLabels = oneHot(cast($labels, 'int32'), numClasses);\n const oneHotPredictions = oneHot(cast($predictions, 'int32'), numClasses);\n const oneHotLabelsT = transpose(oneHotLabels);\n return cast(matMul(oneHotLabelsT, oneHotPredictions), 'int32');\n}\nexport const confusionMatrix = op({ confusionMatrix_ });\n//# sourceMappingURL=confusion_matrix.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Exports under the tf.math.* namespace.\n */\nimport { confusionMatrix } from './ops/confusion_matrix';\nexport { confusionMatrix };\n//# sourceMappingURL=math.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { inferShape } from '../tensor_util_env';\nimport { assertNonNull } from '../util';\nimport { makeTensor } from './tensor_ops_util';\n/**\n * Creates rank-3 `tf.Tensor` with the provided values, shape and dtype.\n *\n * The same functionality can be achieved with `tf.tensor`, but in general\n * we recommend using `tf.tensor3d` as it makes the code more readable.\n *\n * ```js\n * // Pass a nested array.\n * tf.tensor3d([[[1], [2]], [[3], [4]]]).print();\n * ```\n * ```js\n * // Pass a flat array and specify a shape.\n * tf.tensor3d([1, 2, 3, 4], [2, 2, 1]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`.\n * @param shape The shape of the tensor. If not provided, it is inferred from\n * `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor3d(values, shape, dtype) {\n assertNonNull(values);\n if (shape != null && shape.length !== 3) {\n throw new Error('tensor3d() requires shape to have three numbers');\n }\n const inferredShape = inferShape(values, dtype);\n if (inferredShape.length !== 3 && inferredShape.length !== 1) {\n throw new Error('tensor3d() requires values to be number[][][] or flat/TypedArray');\n }\n if (inferredShape.length === 1 && shape == null) {\n throw new Error('tensor3d() requires shape to be provided when `values` ' +\n 'are a flat array');\n }\n return makeTensor(values, shape, inferredShape, dtype);\n}\n//# sourceMappingURL=tensor3d.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { FromPixels } from '../kernel_names';\nimport { getKernel } from '../kernel_registry';\nimport { Tensor } from '../tensor';\nimport { convertToTensor } from '../tensor_util_env';\nimport { cast } from './cast';\nimport { op } from './operation';\nimport { tensor3d } from './tensor3d';\nlet fromPixels2DContext;\n/**\n * Creates a `tf.Tensor` from an image.\n *\n * ```js\n * const image = new ImageData(1, 1);\n * image.data[0] = 100;\n * image.data[1] = 150;\n * image.data[2] = 200;\n * image.data[3] = 255;\n *\n * tf.browser.fromPixels(image).print();\n * ```\n *\n * @param pixels The input image to construct the tensor from. The\n * supported image types are all 4-channel. You can also pass in an image\n * object with following attributes:\n * `{data: Uint8Array; width: number; height: number}`\n * @param numChannels The number of channels of the output tensor. A\n * numChannels value less than 4 allows you to ignore channels. Defaults to\n * 3 (ignores alpha channel of input image).\n *\n * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}\n */\nfunction fromPixels_(pixels, numChannels = 3) {\n // Sanity checks.\n if (numChannels > 4) {\n throw new Error('Cannot construct Tensor with more than 4 channels from pixels.');\n }\n if (pixels == null) {\n throw new Error('pixels passed to tf.browser.fromPixels() can not be null');\n }\n let isPixelData = false;\n let isImageData = false;\n let isVideo = false;\n let isImage = false;\n let isCanvasLike = false;\n if (pixels.data instanceof Uint8Array) {\n isPixelData = true;\n }\n else if (typeof (ImageData) !== 'undefined' && pixels instanceof ImageData) {\n isImageData = true;\n }\n else if (typeof (HTMLVideoElement) !== 'undefined' &&\n pixels instanceof HTMLVideoElement) {\n isVideo = true;\n }\n else if (typeof (HTMLImageElement) !== 'undefined' &&\n pixels instanceof HTMLImageElement) {\n isImage = true;\n // tslint:disable-next-line: no-any\n }\n else if (pixels.getContext != null) {\n isCanvasLike = true;\n }\n else {\n throw new Error('pixels passed to tf.browser.fromPixels() must be either an ' +\n `HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` +\n `in browser, or OffscreenCanvas, ImageData in webworker` +\n ` or {data: Uint32Array, width: number, height: number}, ` +\n `but was ${pixels.constructor.name}`);\n }\n if (isVideo) {\n const HAVE_CURRENT_DATA_READY_STATE = 2;\n if (isVideo &&\n pixels.readyState <\n HAVE_CURRENT_DATA_READY_STATE) {\n throw new Error('The video element has not loaded data yet. Please wait for ' +\n '`loadeddata` event on the