diff --git a/README.md b/README.md index a54eebd..8480f07 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Forked from **face-api.js** version **0.22.2** released on March 22nd, 2020 - - -Currently based on **`TensorFlow/JS` 2.6.0** +Currently based on **`TensorFlow/JS` 2.7.0** If you want to access `TFJS` classes and methods directly, they are exported as `faceapi.tf` ### Why? @@ -43,7 +43,7 @@ Which means valid models are **tinyFaceDetector** and **mobileNetv1** ## Installation Face-API ships with several pre-build versions of the library: -- `dist/face-api.js`: IIFE format for client-side Browser exeuction +- `dist/face-api.js`: IIFE format for client-side Browser execution - `dist/face-api.esm.js`: ESM format for client-side Browser execution with TFJS pre-bundled - `dist/face-api.esm.nobundle.js`: ESM format for client-side Browser execution without TFJS and not minified - `dist/face-api.node.js`: CommonJS format for server-side NodeJS execution with TFJS pre-bundled @@ -97,7 +97,7 @@ and then in your `index.js` ``` or to use non-bundled version: ```js - import * as tf from `https://cdnjs.cloudflare.com/ajax/libs/tensorflow/2.6.0/tf.es2017.min.js`; // load tfjs directly from CDN link + import * as tf from `https://cdnjs.cloudflare.com/ajax/libs/tensorflow/2.7.0/tf.es2017.min.js`; // load tfjs directly from CDN link import * as faceapi from 'dist/face-api.nobundle.js'; ``` diff --git a/build/package.json b/build/package.json index dbcbc5c..0defcf9 100644 --- a/build/package.json +++ b/build/package.json @@ -1,6 +1,6 @@ { "name": "@vladmandic/face-api", - "version": "0.8.4", + "version": "0.8.5", "description": "JavaScript module for Face Detection and Face Recognition Using Tensorflow/JS", "main": "dist/face-api.node.js", "module": "dist/face-api.esm.js", @@ -41,16 +41,15 @@ }, "homepage": "https://github.com/vladmandic/face-api#readme", "Dependencies": {}, - "peerDependencies": { - "@tensorflow/tfjs": "^2.6.0" - }, "devDependencies": { "@types/node": "^14.11.8", "esbuild": "^0.6.34", "rimraf": "^3.0.2", "ts-node": "^9.0.0", "tslib": "^2.0.3", - "typescript": "^4.1.0-dev.20201013", - "@tensorflow/tfjs": "^2.6.0" + "typescript": "^4.1.0-dev.20201013" + }, + "dependencies": { + "@tensorflow/tfjs": "^2.7.0" } } diff --git a/dist/face-api.esm.js b/dist/face-api.esm.js index 930db2b..5bba14a 100644 --- a/dist/face-api.esm.js +++ b/dist/face-api.esm.js @@ -1,39 +1,39 @@ -var lm=Object.defineProperty,FX=Object.prototype.hasOwnProperty,lS=(r,l)=>()=>(l||(l={exports:{}},r(l.exports,l)),l.exports),LC=r=>lm(r,"__esModule",{value:!0}),hm=(r,l)=>{LC(r);for(var u in l)lm(r,u,{get:l[u],enumerable:!0})},_X=(r,l)=>{if(LC(r),typeof l=="object"||typeof l=="function")for(let u in l)!FX.call(r,u)&&u!=="default"&&lm(r,u,{get:()=>l[u],enumerable:!0});return r},Ke=r=>r&&r.__esModule?r:_X(lm({},"default",{value:r,enumerable:!0}),r);var IC=lS((mc,SC)=>{"use strict";var WX=function(){if(typeof self!="undefined")return self;if(typeof window!="undefined")return window;if(typeof ar!="undefined")return ar;throw new Error("unable to locate global object")},ar=WX();SC.exports=mc=ar.fetch;ar.fetch&&(mc.default=ar.fetch.bind(ar));mc.Headers=ar.Headers;mc.Request=ar.Request;mc.Response=ar.Response});var Xe=lS((um,xC)=>{(function(r,l){typeof um=="object"&&typeof xC!="undefined"?l(um):typeof define=="function"&&define.amd?define(["exports"],l):(r=r||self,l(r.tf=r.tf||{}))})(um,function(r){"use strict";const l=1e-7,u=1e-4;class p{constructor(e,t){this.backend=e,this.dataMover=t,this.data=new WeakMap,this.dataIdsCount=0}get(e){return this.data.has(e)||this.dataMover.moveData(this.backend,e),this.data.get(e)}set(e,t){this.dataIdsCount++,this.data.set(e,t)}has(e){return this.data.has(e)}delete(e){return this.dataIdsCount--,this.data.delete(e)}numDataIds(){return this.dataIdsCount}}class g{time(e){return f("time")}read(e){return f("read")}readSync(e){return f("readSync")}numDataIds(){return f("numDataIds")}disposeData(e){return f("disposeData")}write(e,t,n){return f("write")}move(e,t,n,s){return f("move")}memory(){return f("memory")}floatPrecision(){return f("floatPrecision")}epsilon(){return this.floatPrecision()===32?l:u}batchMatMul(e,t,n,s){return f("batchMatMul")}fusedBatchMatMul({a:e,b:t,transposeA:n,transposeB:s,bias:i,activation:o,preluActivationWeights:a}){return f("fusedBatchMatMul")}slice(e,t,n){return f("slice")}stridedSlice(e,t,n,s){return f("stridedSlice")}unstack(e,t){return f("unstack")}reverse(e,t){return f("reverse")}concat(e,t){return f("concat")}neg(e){return f("neg")}add(e,t){return f("add")}addN(e){return f("addN")}subtract(e,t){return f("subtract")}multiply(e,t){return f("multiply")}realDivide(e,t){return f("realDivide")}floorDiv(e,t){return f("floorDiv")}sum(e,t){return f("sum")}prod(e,t){return f("prod")}unsortedSegmentSum(e,t,n){return f("unsortedSegmentSum")}argMin(e,t){return f("argMin")}argMax(e,t){return f("argMax")}equal(e,t){return f("equal")}notEqual(e,t){return f("notEqual")}less(e,t){return f("less")}lessEqual(e,t){return f("lessEqual")}greater(e,t){return f("greater")}greaterEqual(e,t){return f("greaterEqual")}logicalNot(e){return f("logicalNot")}logicalAnd(e,t){return f("logicalAnd")}logicalOr(e,t){return f("logicalOr")}where(e){return f("where")}select(e,t,n){return f("select")}topk(e,t,n){return f("topk")}min(e,t){return f("min")}minimum(e,t){return f("minimum")}mod(e,t){return f("mod")}max(e,t){return f("max")}maximum(e,t){return f("maximum")}all(e,t){return f("all")}any(e,t){return f("any")}squaredDifference(e,t){return f("squaredDifference")}ceil(e){return f("ceil")}floor(e){return f("floor")}round(e){return f("round")}sign(e){return f("sign")}isNaN(e){return f("isNaN")}isInf(e){return f("isInf")}isFinite(e){return f("isFinite")}pow(e,t){return f("pow")}exp(e){return f("exp")}expm1(e){return f("expm1")}softmax(e,t){return f("softmax")}log(e){return f("log")}log1p(e){return f("log1p")}sqrt(e){return f("sqrt")}rsqrt(e){return f("rsqrt")}square(e){return f("square")}reciprocal(e){return f("reciprocal")}relu(e){return f("relu")}relu6(e){return f("relu6")}prelu(e,t){return f("prelu")}elu(e){return f("elu")}eluDer(e,t){return f("eluDer")}selu(e){return f("selu")}int(e){return f("int")}clip(e,t,n){return f("clip")}abs(e){return f("abs")}complexAbs(e){return f("complexAbs")}sigmoid(e){return f("sigmoid")}softplus(e){return f("softplus")}sin(e){return f("sin")}cos(e){return f("cos")}tan(e){return f("tan")}asin(e){return f("asin")}acos(e){return f("acos")}atan(e){return f("atan")}atan2(e,t){return f("atan2")}sinh(e){return f("sinh")}cosh(e){return f("cosh")}tanh(e){return f("tanh")}asinh(e){return f("asinh")}acosh(e){return f("acosh")}atanh(e){return f("atanh")}erf(e){return f("erf")}step(e,t){return f("step")}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){return f("fusedConv2d")}conv2d(e,t,n){return f("conv2d")}conv2dDerInput(e,t,n){return f("conv2dDerInput")}conv2dDerFilter(e,t,n){return f("conv2dDerFilter")}fusedDepthwiseConv2D({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){return f("fusedDepthwiseConv2D")}depthwiseConv2D(e,t,n){return f("depthwiseConv2D")}depthwiseConv2DDerInput(e,t,n){return f("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(e,t,n){return f("depthwiseConv2DDerFilter")}conv3d(e,t,n){return f("conv3d")}conv3dDerInput(e,t,n){return f("conv3dDerInput")}conv3dDerFilter(e,t,n){return f("conv3dDerFilter")}maxPool(e,t){return f("maxPool")}maxPoolBackprop(e,t,n,s){return f("maxPoolBackprop")}avgPool(e,t){return f("avgPool")}avgPoolBackprop(e,t,n){return f("avgPoolBackprop")}avgPool3d(e,t){return f("avgPool3d")}avgPool3dBackprop(e,t,n){return f("avgPool3dBackprop")}maxPool3d(e,t){return f("maxPool3d")}maxPool3dBackprop(e,t,n,s){return f("maxPool3dBackprop")}reshape(e,t){return f("reshape")}cast(e,t){return f("cast")}tile(e,t){return f("tile")}pad(e,t,n){return f("pad")}transpose(e,t){return f("transpose")}gather(e,t,n){return f("gather")}gatherND(e,t){return f("gatherND")}scatterND(e,t,n){return f("scatterND")}batchToSpaceND(e,t,n){return f("batchToSpaceND")}spaceToBatchND(e,t,n){return f("spaceToBatchND")}resizeBilinear(e,t,n,s){return f("resizeBilinear")}resizeBilinearBackprop(e,t,n){return f("resizeBilinearBackprop")}resizeNearestNeighbor(e,t,n,s){return f("resizeNearestNeighbor")}resizeNearestNeighborBackprop(e,t,n){return f("resizeNearestNeighborBackprop")}batchNorm(e,t,n,s,i,o){return f("batchNorm")}localResponseNormalization4D(e,t,n,s,i){return f("localResponseNormalization4D")}LRNGrad(e,t,n,s,i,o,a){return f("LRNGrad")}multinomial(e,t,n,s){return f("multinomial")}oneHot(e,t,n,s){return f("oneHot")}cumsum(e,t,n,s){return f("cumsum")}nonMaxSuppression(e,t,n,s,i){return f("nonMaxSuppression")}fft(e){return f("fft")}ifft(e){return f("ifft")}complex(e,t){return f("complex")}real(e){return f("real")}imag(e){return f("imag")}cropAndResize(e,t,n,s,i,o){return f("cropAndResize")}depthToSpace(e,t,n){return f("depthToSpace")}split(e,t,n){return f("split")}sparseToDense(e,t,n,s){return f("sparseToDense")}diag(e){return f("diag")}fill(e,t,n){return f("fill")}onesLike(e){return f("onesLike")}zerosLike(e){return f("zerosLike")}linspace(e,t,n){return f("linspace")}dispose(){return f("dispose")}}function f(e){throw new Error(`'${e}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}const I="tfjsflags";class S{constructor(e){this.global=e,this.flags={},this.flagRegistry={},this.urlFlags={},this.populateURLFlags()}setPlatform(e,t){this.platform!=null&&console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${t}.`),this.platformName=e,this.platform=t}registerFlag(e,t,n){if(this.flagRegistry[e]={evaluationFn:t,setHook:n},this.urlFlags[e]!=null){const s=this.urlFlags[e];console.warn(`Setting feature override from URL ${e}: ${s}.`),this.set(e,s)}}async getAsync(e){return e in this.flags?this.flags[e]:(this.flags[e]=await this.evaluateFlag(e),this.flags[e])}get(e){if(e in this.flags)return this.flags[e];const t=this.evaluateFlag(e);if(t instanceof Promise)throw new Error(`Flag ${e} cannot be synchronously evaluated. Please use getAsync() instead.`);return this.flags[e]=t,this.flags[e]}getNumber(e){return this.get(e)}getBool(e){return this.get(e)}getFlags(){return this.flags}get features(){return this.flags}set(e,t){if(this.flagRegistry[e]==null)throw new Error(`Cannot set flag ${e} as it has not been registered.`);this.flags[e]=t,this.flagRegistry[e].setHook!=null&&this.flagRegistry[e].setHook(t)}evaluateFlag(e){if(this.flagRegistry[e]==null)throw new Error(`Cannot evaluate flag '${e}': no evaluation function found.`);return this.flagRegistry[e].evaluationFn()}setFlags(e){this.flags=Object.assign({},e)}reset(){this.flags={},this.urlFlags={},this.populateURLFlags()}populateURLFlags(){if(typeof this.global=="undefined"||typeof this.global.location=="undefined"||typeof this.global.location.search=="undefined")return;const e=x(this.global.location.search);if(I in e){const t=e[I].split(",");t.forEach(n=>{const[s,i]=n.split(":");this.urlFlags[s]=O(s,i)})}}}function x(e){const t={};return e.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(n,...s)=>(v(t,s[0],s[1]),s.join("="))),t}function v(e,t,n){e[decodeURIComponent(t)]=decodeURIComponent(n||"")}function O(e,t){if(t=t.toLowerCase(),t==="true"||t==="false")return t==="true";if(`${+t}`===t)return+t;throw new Error(`Could not parse value flag value ${t} for flag ${e}.`)}function C(){return r.ENV}r.ENV=null;function U(e){r.ENV=e}let G;function ne(){if(G==null){let e;if(typeof window!="undefined")e=window;else if(typeof global!="undefined")e=global;else if(typeof process!="undefined")e=process;else if(typeof self!="undefined")e=self;else throw new Error("Could not find a global object");G=e}return G}function te(){const e=ne();return e._tfGlobals==null&&(e._tfGlobals=new Map),e._tfGlobals}function oe(e,t){const n=te();if(n.has(e))return n.get(e);{const s=t();return n.set(e,s),n.get(e)}}const ge="Abs",fe="Acos",Ae="Acosh",Te="Add",Ve="AddN",rt="All",vt="Any",$t="ArgMax",Kt="ArgMin",Dn="Asin",Tn="Asinh",An="Atan",Ks="Atanh",Li="Atan2",Xs="AvgPool",ua="AvgPoolBackprop",Xc="AvgPool3D",ex="AvgPool3DBackprop",ag="BatchMatMul",cg="BatchToSpaceND",lg="BroadcastTo",Jc="Cast",Zc="Ceil",Qc="ClipByValue",hg="Complex",$u="Concat",ug="Conv2D",tx="Conv2DBackpropFilter",dg="Conv2DBackpropInput",pg="Conv3D",nx="Conv3DBackpropFilterV2",sx="Conv3DBackpropInputV2",da="Cos",el="Cosh",mg="Cumsum",ix="CropAndResize",rx="DepthToSpace",fg="DepthwiseConv2dNative",ox="DepthwiseConv2dNativeBackpropFilter",ax="DepthwiseConv2dNativeBackpropInput",cx="Diag",Uu="Dilation2D",Bu="Dilation2DBackpropInput",Mu="Dilation2DBackpropFilter",pa="Div",tl="Elu",lx="EluGrad",nl="Erf",hx="Equal",sl="Exp",il="Expm1",gg="FFT",ux="Fill",Pu="FlipLeftRight",rl="Floor",yg="FloorDiv",ol="FusedBatchNorm",bg="GatherV2",dx="GatherNd",px="Greater",wg="GreaterEqual",al="Identity",Lg="IFFT",Sg="Imag",cl="IsFinite",ll="IsInf",hl="IsNan",mx="Less",fx="LessEqual",gx="LinSpace",ul="Log",dl="Log1p",yx="LogicalAnd",zu="LogicalNot",bx="LogicalOr",Ig="LogSoftmax",xg="LRN",wx="LRNBackprop",pl="Max",Tg="Maximum",ml="MaxPool",Gu="MaxPoolBackprop",Ag="MaxPool3D",Lx="MaxPool3DBackprop",Vu="MaxPoolWithArgmax",cD="Mean",vg="Min",Ng="Minimum",Cg="Mod",fl="Multiply",Rg="Negate",Yu="NotEqual",Og="NonMaxSuppressionV3",Hu="NonMaxSuppressionV4",qu="NonMaxSuppressionV5",Eg="OnesLike",Dg="OneHot",ju="PadV2",lD="Pool",kg="Pow",Fg="Prelu",Sx="Prod",Ix="Range",_g="Real",gl="Reciprocal",Wg="Relu",yl="Reshape",$g="ResizeNearestNeighbor",xx="ResizeNearestNeighborGrad",Ug="ResizeBilinear",Tx="ResizeBilinearGrad",Bg="Relu6",Mg="Reverse",bl="Round",wl="Rsqrt",Ax="ScatterNd",Pg="SelectV2",Ll="Selu",Ku="Slice",ma="Sin",Sl="Sinh",Il="Sign",xl="Sigmoid",Tl="Softplus",Al="Sqrt",zg="Sum",Xu="SpaceToBatchND",Gg="SplitV",Vg="Softmax",fa="SquaredDifference",Ju="Square",vl="Sub",vx="SparseToDense",Nx="StridedSlice",ga="Tan",Nl="Tanh",Yg="Tile",Cx="TopK",Cl="Transpose",Zu="Unique",Hg="Unpack",qg="UnsortedSegmentSum",jg="ZerosLike",Rl="Step",Qu="FromPixels",ed="RotateWithOffset",Kg="_FusedMatMul",Xg="FusedConv2D",Jg="FusedDepthwiseConv2D";const ya=oe("kernelRegistry",()=>new Map),Ol=oe("gradRegistry",()=>new Map);function Zg(e,t){const n=ey(e,t);return ya.get(n)}function Qg(e){return Ol.get(e)}function td(e){const t=ya.entries(),n=[];for(;;){const{done:s,value:i}=t.next();if(s)break;const[o,a]=i,[c]=o.split("_");c===e&&n.push(a)}return n}function nd(e){const{kernelName:t,backendName:n}=e,s=ey(t,n);ya.has(s)&&console.warn(`The kernel '${t}' for backend '${n}' is already registered`),ya.set(s,e)}function Rx(e){const{kernelName:t}=e;Ol.has(t)&&(C().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${t}'`)),Ol.set(t,e)}function hD(e,t){const n=ey(e,t);if(!ya.has(n))throw new Error(`The kernel '${e}' for backend '${t}' is not registered`);ya.delete(n)}function uD(e){if(!Ol.has(e))throw new Error(`The gradient '${e}' for backend is not registered`);Ol.delete(e)}function dD(e,t){const n=td(e);n.forEach(s=>{const i=Object.assign({},s,{backendName:t});nd(i)})}function ey(e,t){return`${t}_${e}`}function ty(e){let t=e.length,n=0,s=0;for(;t>0;)s=Math.random()*t|0,t--,n=e[t],e[t]=e[s],e[s]=n}function El(e,t,n){return Math.max(e,Math.min(t,n))}function ny(e){return e%2===0?e:e+1}function Ox(e){let t=0;for(let n=0;nn+` Shapes ${e} and ${t} must match`)}function ao(e){k(e!=null,()=>"The input to the tensor constructor must be a non-null value.")}function Yi(e,t=[],n=!1){if(t==null&&(t=[]),Array.isArray(e)||wn(e)&&!n)for(let s=0;s0,n){return new Promise((s,i)=>{let o=0;const a=()=>{if(e()){s();return}o++;const c=t(o);if(n!=null&&o>=n){i();return}setTimeout(a,c)};a()})}function id(e,t){let n=1,s=-1;for(let o=0;o=0)n*=e[o];else if(e[o]===-1){if(s!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${s} and dim ${o}`);s=o}else if(e[o]<0)throw Error(`Shapes can not be < 0. Found ${e[o]} at dim ${o}`);if(s===-1){if(t>0&&t!==n)throw Error(`Size(${t}) must match the product of shape ${e}`);return e}if(n===0)throw Error(`Cannot infer the missing size in [${e}] when there are 0 elements`);if(t%n!==0)throw Error(`The implicit shape can't be a fractional number. Got ${t} / ${n}`);const i=e.slice();return i[s]=t/n,i}function ft(e,t){const n=t.length;return e=e==null?t.map((s,i)=>i):[].concat(e),k(e.every(s=>s>=-n&&s`All values in axis param must be in range [-${n}, ${n}) but got axis ${e}`),k(e.every(s=>Ut(s)),()=>`All values in axis param must be integers but got axis ${e}`),e.map(s=>s<0?n+s:s)}function Sr(e,t){const n=[],s=[],i=t!=null&&Array.isArray(t)&&t.length===0,o=t==null||i?null:ft(t,e).sort();let a=0;for(let c=0;cc)&&e[c]===1&&(n.push(e[c]),s.push(c)),o[a]<=c&&a++}e[c]!==1&&(n.push(e[c]),s.push(c))}return{newShape:n,keptDims:s}}function bn(e,t){let n=null;if(e==null||e==="float32")n=new Float32Array(t);else if(e==="int32")n=new Int32Array(t);else if(e==="bool")n=new Uint8Array(t);else throw new Error(`Unknown data type ${e}`);return n}function lo(e,t){let n=null;if(e==null||e==="float32")n=new Float32Array(t);else if(e==="int32")n=new Int32Array(t);else if(e==="bool")n=new Uint8Array(t);else if(e==="string")n=new Array(t);else throw new Error(`Unknown data type ${e}`);return n}function Ex(e,t){for(let n=0;nt+=n.length),t}function Ir(e){return typeof e=="string"||e instanceof String}function Fx(e){return typeof e=="boolean"}function rd(e){return typeof e=="number"}function ba(e){return Array.isArray(e)?ba(e[0]):e instanceof Float32Array?"float32":e instanceof Int32Array||e instanceof Uint8Array?"int32":rd(e)?"float32":Ir(e)?"string":Fx(e)?"bool":"float32"}function xr(e){return!!(e&&e.constructor&&e.call&&e.apply)}function od(e,t){for(let n=t;n=0;--s)n[s]=n[s+1]*e[s+1];return n}function _x(e,t){return t==="string"?ly(e):Tr([e],t)}function Tr(e,t){if(t==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(e)&&(e=Yi(e)),C().getBool("DEBUG")&&Ex(e,t),bD(e,t))return e;if(t==null||t==="float32"||t==="complex64")return new Float32Array(e);if(t==="int32")return new Int32Array(e);if(t==="bool"){const n=new Uint8Array(e.length);for(let s=0;sc*h);for(let c=0;cs*i);if(n===0)return[];if(n!==t.length)throw new Error(`[${e}] does not match the input size ${t.length}.`);return Wx(0,e,t)}function bD(e,t){return e instanceof Float32Array&&t==="float32"||e instanceof Int32Array&&t==="int32"||e instanceof Uint8Array&&t==="bool"}function oy(e,t){const n=wa(e,t);for(let s=0;ss*i,1);if(t==null||t==="float32")return ys(e,new Float32Array(n));if(t==="int32")return ys(e,new Int32Array(n));if(t==="bool")return ys(e,new Uint8Array(n));throw new Error(`Unknown data type ${t}`)}function Vn(){return C().platform.now()}function cy(e){e.forEach(t=>{k(Number.isInteger(t)&&t>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${e}].`)})}function $x(e,t){return C().platform.fetch(e,t)}function ly(e,t="utf-8"){return t=t||"utf-8",C().platform.encode(e,t)}function Dl(e,t="utf-8"){return t=t||"utf-8",C().platform.decode(e,t)}function Js(e,t,n){if(t===0)return 0;if(t===1)return e[0];let s=e[e.length-1];for(let i=0;i{s=n()},o=this.backendTimer.time(i);for(let c=0;c{SD(d,h.dtype,e)})}const a={kernelName:e,outputs:s,inputs:t,timeMs:o.then(c=>c.kernelMs),extraInfo:o.then(c=>c.getExtraProfileInfo!=null?c.getExtraProfileInfo():"")};return a}logKernelProfile(e){const{kernelName:t,outputs:n,timeMs:s,inputs:i,extraInfo:o}=e;n.forEach(a=>{Promise.all([a.data(),s,o]).then(c=>{this.logger.logKernelProfile(t,a,c[0],c[1],i,c[2])})})}}function SD(e,t,n){if(t!=="float32")return!1;for(let s=0;s0?L:""} `}}console.log(`%c${c} %c${a} %c${h}D ${m} %c${d} %c${y} %c${o}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function xD(e,t,n){const s={},i={};for(let h=0;hs[T.id]=!0),w=!0,i[d.id]=!0;break}if(w)break}}const o={};o[n.id]=!0;const a={};for(let h=e.length-1;h>=0;h--){const d=e[h],m=d.inputs;for(let y=0;y=0;i--){const o=t[i],a=[];if(o.outputs.forEach(h=>{const d=e[h.id];d!=null?a.push(d):a.push(null)}),o.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${o.kernelName}.`);const c=o.gradient(a);for(const h in o.inputs){if(!(h in c))throw new Error(`Cannot backprop through input ${h}. Available gradients found: ${Object.keys(c)}.`);const d=n(()=>c[h]());if(d.dtype!=="float32")throw new Error(`Error in gradient for op ${o.kernelName}. The gradient of input ${h} must have 'float32' dtype, but has '${d.dtype}'`);const m=o.inputs[h];if(!ot(d.shape,m.shape))throw new Error(`Error in gradient for op ${o.kernelName}. The gradient of input '${h}' has shape '${d.shape}', which does not match the shape of the input '${m.shape}'`);if(e[m.id]==null)e[m.id]=d;else{const y=e[m.id];e[m.id]=s(y,d),y.dispose()}}}}const Ux=20,kl=3,hy=7;function AD(e,t,n,s){const i=Ot(t),o=vD(e,t,n,i),a=t.length,c=ad(e,t,n,i,o),h=["Tensor"];return s&&(h.push(` dtype: ${n}`),h.push(` rank: ${a}`),h.push(` shape: [${t}]`),h.push(" values:")),h.push(c.map(d=>" "+d).join(` +var Mm=Object.defineProperty,LJ=Object.prototype.hasOwnProperty,ES=(r,l)=>()=>(l||(l={exports:{}},r(l.exports,l)),l.exports),XC=r=>Mm(r,"__esModule",{value:!0}),Pm=(r,l)=>{XC(r);for(var u in l)Mm(r,u,{get:l[u],enumerable:!0})},SJ=(r,l)=>{if(XC(r),typeof l=="object"||typeof l=="function")for(let u in l)!LJ.call(r,u)&&u!=="default"&&Mm(r,u,{get:()=>l[u],enumerable:!0});return r},Je=r=>r&&r.__esModule?r:SJ(Mm({},"default",{value:r,enumerable:!0}),r);var ZC=ES((Ac,JC)=>{"use strict";var IJ=function(){if(typeof self!="undefined")return self;if(typeof window!="undefined")return window;if(typeof mr!="undefined")return mr;throw new Error("unable to locate global object")},mr=IJ();JC.exports=Ac=mr.fetch;mr.fetch&&(Ac.default=mr.fetch.bind(mr));Ac.Headers=mr.Headers;Ac.Request=mr.Request;Ac.Response=mr.Response});var Ze=ES((zm,QC)=>{(function(r,l){typeof zm=="object"&&typeof QC!="undefined"?l(zm):typeof define=="function"&&define.amd?define(["exports"],l):(r=r||self,l(r.tf=r.tf||{}))})(zm,function(r){"use strict";const l=1e-7,u=1e-4;class p{constructor(e,t){this.backend=e,this.dataMover=t,this.data=new WeakMap,this.dataIdsCount=0}get(e){return this.data.has(e)||this.dataMover.moveData(this.backend,e),this.data.get(e)}set(e,t){this.dataIdsCount++,this.data.set(e,t)}has(e){return this.data.has(e)}delete(e){return this.dataIdsCount--,this.data.delete(e)}numDataIds(){return this.dataIdsCount}}class y{time(e){return g("time")}read(e){return g("read")}readSync(e){return g("readSync")}numDataIds(){return g("numDataIds")}disposeData(e){return g("disposeData")}write(e,t,n){return g("write")}move(e,t,n,s){return g("move")}memory(){return g("memory")}floatPrecision(){return g("floatPrecision")}epsilon(){return this.floatPrecision()===32?l:u}batchMatMul(e,t,n,s){return g("batchMatMul")}fusedBatchMatMul({a:e,b:t,transposeA:n,transposeB:s,bias:i,activation:o,preluActivationWeights:a}){return g("fusedBatchMatMul")}slice(e,t,n){return g("slice")}stridedSlice(e,t,n,s){return g("stridedSlice")}unstack(e,t){return g("unstack")}reverse(e,t){return g("reverse")}concat(e,t){return g("concat")}neg(e){return g("neg")}add(e,t){return g("add")}addN(e){return g("addN")}subtract(e,t){return g("subtract")}multiply(e,t){return g("multiply")}realDivide(e,t){return g("realDivide")}floorDiv(e,t){return g("floorDiv")}sum(e,t){return g("sum")}prod(e,t){return g("prod")}unsortedSegmentSum(e,t,n){return g("unsortedSegmentSum")}argMin(e,t){return g("argMin")}argMax(e,t){return g("argMax")}equal(e,t){return g("equal")}notEqual(e,t){return g("notEqual")}less(e,t){return g("less")}lessEqual(e,t){return g("lessEqual")}greater(e,t){return g("greater")}greaterEqual(e,t){return g("greaterEqual")}logicalNot(e){return g("logicalNot")}logicalAnd(e,t){return g("logicalAnd")}logicalOr(e,t){return g("logicalOr")}where(e){return g("where")}select(e,t,n){return g("select")}topk(e,t,n){return g("topk")}min(e,t){return g("min")}minimum(e,t){return g("minimum")}mod(e,t){return g("mod")}max(e,t){return g("max")}maximum(e,t){return g("maximum")}all(e,t){return g("all")}any(e,t){return g("any")}squaredDifference(e,t){return g("squaredDifference")}ceil(e){return g("ceil")}floor(e){return g("floor")}round(e){return g("round")}sign(e){return g("sign")}isNaN(e){return g("isNaN")}isInf(e){return g("isInf")}isFinite(e){return g("isFinite")}pow(e,t){return g("pow")}exp(e){return g("exp")}expm1(e){return g("expm1")}softmax(e,t){return g("softmax")}log(e){return g("log")}log1p(e){return g("log1p")}sqrt(e){return g("sqrt")}rsqrt(e){return g("rsqrt")}square(e){return g("square")}reciprocal(e){return g("reciprocal")}relu(e){return g("relu")}relu6(e){return g("relu6")}prelu(e,t){return g("prelu")}elu(e){return g("elu")}eluDer(e,t){return g("eluDer")}selu(e){return g("selu")}int(e){return g("int")}clip(e,t,n){return g("clip")}abs(e){return g("abs")}complexAbs(e){return g("complexAbs")}sigmoid(e){return g("sigmoid")}softplus(e){return g("softplus")}sin(e){return g("sin")}cos(e){return g("cos")}tan(e){return g("tan")}asin(e){return g("asin")}acos(e){return g("acos")}atan(e){return g("atan")}atan2(e,t){return g("atan2")}sinh(e){return g("sinh")}cosh(e){return g("cosh")}tanh(e){return g("tanh")}asinh(e){return g("asinh")}acosh(e){return g("acosh")}atanh(e){return g("atanh")}erf(e){return g("erf")}step(e,t){return g("step")}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){return g("fusedConv2d")}conv2d(e,t,n){return g("conv2d")}conv2dDerInput(e,t,n){return g("conv2dDerInput")}conv2dDerFilter(e,t,n){return g("conv2dDerFilter")}fusedDepthwiseConv2D({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){return g("fusedDepthwiseConv2D")}depthwiseConv2D(e,t,n){return g("depthwiseConv2D")}depthwiseConv2DDerInput(e,t,n){return g("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(e,t,n){return g("depthwiseConv2DDerFilter")}conv3d(e,t,n){return g("conv3d")}conv3dDerInput(e,t,n){return g("conv3dDerInput")}conv3dDerFilter(e,t,n){return g("conv3dDerFilter")}maxPool(e,t){return g("maxPool")}maxPoolBackprop(e,t,n,s){return g("maxPoolBackprop")}avgPool(e,t){return g("avgPool")}avgPoolBackprop(e,t,n){return g("avgPoolBackprop")}avgPool3d(e,t){return g("avgPool3d")}avgPool3dBackprop(e,t,n){return g("avgPool3dBackprop")}maxPool3d(e,t){return g("maxPool3d")}maxPool3dBackprop(e,t,n,s){return g("maxPool3dBackprop")}reshape(e,t){return g("reshape")}cast(e,t){return g("cast")}tile(e,t){return g("tile")}pad(e,t,n){return g("pad")}transpose(e,t){return g("transpose")}gather(e,t,n){return g("gather")}gatherND(e,t){return g("gatherND")}scatterND(e,t,n){return g("scatterND")}batchToSpaceND(e,t,n){return g("batchToSpaceND")}spaceToBatchND(e,t,n){return g("spaceToBatchND")}resizeBilinear(e,t,n,s){return g("resizeBilinear")}resizeBilinearBackprop(e,t,n){return g("resizeBilinearBackprop")}resizeNearestNeighbor(e,t,n,s){return g("resizeNearestNeighbor")}resizeNearestNeighborBackprop(e,t,n){return g("resizeNearestNeighborBackprop")}batchNorm(e,t,n,s,i,o){return g("batchNorm")}localResponseNormalization4D(e,t,n,s,i){return g("localResponseNormalization4D")}LRNGrad(e,t,n,s,i,o,a){return g("LRNGrad")}multinomial(e,t,n,s){return g("multinomial")}oneHot(e,t,n,s){return g("oneHot")}cumsum(e,t,n,s){return g("cumsum")}nonMaxSuppression(e,t,n,s,i){return g("nonMaxSuppression")}fft(e){return g("fft")}ifft(e){return g("ifft")}complex(e,t){return g("complex")}real(e){return g("real")}imag(e){return g("imag")}cropAndResize(e,t,n,s,i,o){return g("cropAndResize")}depthToSpace(e,t,n){return g("depthToSpace")}split(e,t,n){return g("split")}sparseToDense(e,t,n,s){return g("sparseToDense")}diag(e){return g("diag")}fill(e,t,n){return g("fill")}onesLike(e){return g("onesLike")}zerosLike(e){return g("zerosLike")}linspace(e,t,n){return g("linspace")}dispose(){return g("dispose")}}function g(e){throw new Error(`'${e}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}function I(e){let t=e.length,n=0,s=0;for(;t>0;)s=Math.random()*t|0,t--,n=e[t],e[t]=e[s],e[s]=n}function S(e,t,n){return Math.max(e,Math.min(t,n))}function T(e){return e%2===0?e:e+1}function C(e){let t=0;for(let n=0;nn+` Shapes ${e} and ${t} must match`)}function ne(e){A(e!=null,()=>"The input to the tensor constructor must be a non-null value.")}function te(e,t=[],n=!1){if(t==null&&(t=[]),Array.isArray(e)||hn(e)&&!n)for(let s=0;s0,n){return new Promise((s,i)=>{let o=0;const a=()=>{if(e()){s();return}o++;const c=t(o);if(n!=null&&o>=n){i();return}setTimeout(a,c)};a()})}function Vt(e,t){let n=1,s=-1;for(let o=0;o=0)n*=e[o];else if(e[o]===-1){if(s!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${s} and dim ${o}`);s=o}else if(e[o]<0)throw Error(`Shapes can not be < 0. Found ${e[o]} at dim ${o}`);if(s===-1){if(t>0&&t!==n)throw Error(`Size(${t}) must match the product of shape ${e}`);return e}if(n===0)throw Error(`Cannot infer the missing size in [${e}] when there are 0 elements`);if(t%n!==0)throw Error(`The implicit shape can't be a fractional number. Got ${t} / ${n}`);const i=e.slice();return i[s]=t/n,i}function qe(e,t){const n=t.length;return e=e==null?t.map((s,i)=>i):[].concat(e),A(e.every(s=>s>=-n&&s`All values in axis param must be in range [-${n}, ${n}) but got axis ${e}`),A(e.every(s=>Le(s)),()=>`All values in axis param must be integers but got axis ${e}`),e.map(s=>s<0?n+s:s)}function ln(e,t){const n=[],s=[],i=t!=null&&Array.isArray(t)&&t.length===0,o=t==null||i?null:qe(t,e).sort();let a=0;for(let c=0;cc)&&e[c]===1&&(n.push(e[c]),s.push(c)),o[a]<=c&&a++}e[c]!==1&&(n.push(e[c]),s.push(c))}return{newShape:n,keptDims:s}}function bt(e,t){let n=null;if(e==null||e==="float32")n=new Float32Array(t);else if(e==="int32")n=new Int32Array(t);else if(e==="bool")n=new Uint8Array(t);else throw new Error(`Unknown data type ${e}`);return n}function ws(e,t){let n=null;if(e==null||e==="float32")n=new Float32Array(t);else if(e==="int32")n=new Int32Array(t);else if(e==="bool")n=new Uint8Array(t);else if(e==="string")n=new Array(t);else throw new Error(`Unknown data type ${e}`);return n}function Nr(e,t){for(let n=0;nt+=n.length),t}function Yi(e){return typeof e=="string"||e instanceof String}function xx(e){return typeof e=="boolean"}function Qu(e){return typeof e=="number"}function wa(e){return Array.isArray(e)?wa(e[0]):e instanceof Float32Array?"float32":e instanceof Int32Array||e instanceof Uint8Array?"int32":Qu(e)?"float32":Yi(e)?"string":xx(e)?"bool":"float32"}function Rr(e){return!!(e&&e.constructor&&e.call&&e.apply)}function ed(e,t){for(let n=t;n=0;--s)n[s]=n[s+1]*e[s+1];return n}function Tx(e,t,n){const s=new Array;if(t.length===1){const i=t[0];for(let o=0;oc*h);for(let c=0;cs*i);if(n===0)return[];if(n!==t.length)throw new Error(`[${e}] does not match the input size ${t.length}.`);return Tx(0,e,t)}function Mg(e,t){const n=La(e,t);for(let s=0;ss*i,1);if(t==null||t==="float32")return Ls(e,new Float32Array(n));if(t==="int32")return Ls(e,new Int32Array(n));if(t==="bool")return Ls(e,new Uint8Array(n));throw new Error(`Unknown data type ${t}`)}function zg(e){e.forEach(t=>{A(Number.isInteger(t)&&t>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${e}].`)})}function _s(e,t,n){if(t===0)return 0;if(t===1)return e[0];let s=e[e.length-1];for(let i=0;i{const[s,i]=n.split(":");this.urlFlags[s]=GD(s,i)})}}}function zD(e){const t={};return e.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(n,...s)=>(VD(t,s[0],s[1]),s.join("="))),t}function VD(e,t,n){e[decodeURIComponent(t)]=decodeURIComponent(n||"")}function GD(e,t){if(t=t.toLowerCase(),t==="true"||t==="false")return t==="true";if(`${+t}`===t)return+t;throw new Error(`Could not parse value flag value ${t} for flag ${e}.`)}function oe(){return r.ENV}r.ENV=null;function YD(e){r.ENV=e}let Vg;function Nx(){if(Vg==null){let e;if(typeof window!="undefined")e=window;else if(typeof global!="undefined")e=global;else if(typeof process!="undefined")e=process;else if(typeof self!="undefined")e=self;else throw new Error("Could not find a global object");Vg=e}return Vg}function HD(){const e=Nx();return e._tfGlobals==null&&(e._tfGlobals=new Map),e._tfGlobals}function Cx(e,t){const n=HD();if(n.has(e))return n.get(e);{const s=t();return n.set(e,s),n.get(e)}}const td="Abs",ol="Acos",al="Acosh",wo="Add",Gg="AddN",Rx="All",Ox="Any",Yg="ArgMax",Hg="ArgMin",cl="Asin",ll="Asinh",hl="Atan",ul="Atanh",nd="Atan2",dl="AvgPool",sd="AvgPoolBackprop",qg="AvgPool3D",Ex="AvgPool3DBackprop",id="BatchMatMul",jg="BatchToSpaceND",Kg="BroadcastTo",Sa="Cast",pl="Ceil",ml="ClipByValue",rd="Complex",fl="Concat",od="Conv2D",Xg="Conv2DBackpropFilter",ad="Conv2DBackpropInput",cd="Conv3D",Jg="Conv3DBackpropFilterV2",Zg="Conv3DBackpropInputV2",Ia="Cos",gl="Cosh",Qg="Cumsum",Dx="CropAndResize",kx="DepthToSpace",ld="DepthwiseConv2dNative",ey="DepthwiseConv2dNativeBackpropFilter",ty="DepthwiseConv2dNativeBackpropInput",Fx="Diag",hd="Dilation2D",ud="Dilation2DBackpropInput",dd="Dilation2DBackpropFilter",xa="Div",yl="Elu",_x="EluGrad",bl="Erf",Wx="Equal",wl="Exp",Ll="Expm1",pd="FFT",ny="Fill",md="FlipLeftRight",Sl="Floor",sy="FloorDiv",Il="FusedBatchNorm",iy="GatherV2",$x="GatherNd",Ux="Greater",ry="GreaterEqual",xl="Identity",fd="IFFT",gd="Imag",Tl="IsFinite",Al="IsInf",vl="IsNan",Bx="Less",Mx="LessEqual",Px="LinSpace",Nl="Log",Cl="Log1p",zx="LogicalAnd",yd="LogicalNot",Vx="LogicalOr",oy="LogSoftmax",ay="LRN",Gx="LRNBackprop",Rl="Max",cy="Maximum",Ol="MaxPool",bd="MaxPoolBackprop",ly="MaxPool3D",Yx="MaxPool3DBackprop",wd="MaxPoolWithArgmax",hy="Mean",uy="Min",dy="Minimum",El="MirrorPad",py="Mod",Ta="Multiply",my="Negate",Dl="NotEqual",fy="NonMaxSuppressionV3",Ld="NonMaxSuppressionV4",Sd="NonMaxSuppressionV5",gy="OnesLike",yy="OneHot",Id="PadV2",qD="Pool",by="Pow",xd="Prelu",Hx="Prod",qx="Range",Td="Real",kl="Reciprocal",Fl="Relu",_l="Reshape",wy="ResizeNearestNeighbor",jx="ResizeNearestNeighborGrad",Ly="ResizeBilinear",Kx="ResizeBilinearGrad",Wl="Relu6",Sy="Reverse",$l="Round",Ul="Rsqrt",Xx="ScatterNd",Iy="SelectV2",Bl="Selu",Ad="Slice",Aa="Sin",Ml="Sinh",Pl="Sign",zl="Sigmoid",Vl="Softplus",Gl="Sqrt",xy="Sum",vd="SpaceToBatchND",Ty="SplitV",Ay="Softmax",va="SquaredDifference",Nd="Square",Na="Sub",Jx="SparseToDense",Zx="StridedSlice",Ca="Tan",Yl="Tanh",vy="Tile",Qx="TopK",Hl="Transpose",Cd="Unique",Ny="Unpack",Cy="UnsortedSegmentSum",Ry="ZerosLike",ql="Step",Rd="FromPixels",Od="RotateWithOffset",Ed="_FusedMatMul",Dd="FusedConv2D",kd="FusedDepthwiseConv2D";const Ra=Cx("kernelRegistry",()=>new Map),jl=Cx("gradRegistry",()=>new Map);function Oy(e,t){const n=Dy(e,t);return Ra.get(n)}function Ey(e){return jl.get(e)}function Fd(e){const t=Ra.entries(),n=[];for(;;){const{done:s,value:i}=t.next();if(s)break;const[o,a]=i,[c]=o.split("_");c===e&&n.push(a)}return n}function _d(e){const{kernelName:t,backendName:n}=e,s=Dy(t,n);Ra.has(s)&&console.warn(`The kernel '${t}' for backend '${n}' is already registered`),Ra.set(s,e)}function eT(e){const{kernelName:t}=e;jl.has(t)&&(oe().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${t}'`)),jl.set(t,e)}function jD(e,t){const n=Dy(e,t);if(!Ra.has(n))throw new Error(`The kernel '${e}' for backend '${t}' is not registered`);Ra.delete(n)}function KD(e){if(!jl.has(e))throw new Error(`The gradient '${e}' for backend is not registered`);jl.delete(e)}function XD(e,t){const n=Fd(e);n.forEach(s=>{const i=Object.assign({},s,{backendName:t});_d(i)})}function Dy(e,t){return`${t}_${e}`}function tT(e,t){return t==="string"?Wd(e):Or([e],t)}function JD(e,t){return e instanceof Float32Array&&t==="float32"||e instanceof Int32Array&&t==="int32"||e instanceof Uint8Array&&t==="bool"}function Or(e,t){if(t==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(e)&&(e=te(e)),oe().getBool("DEBUG")&&Nr(e,t),JD(e,t))return e;if(t==null||t==="float32"||t==="complex64")return new Float32Array(e);if(t==="int32")return new Int32Array(e);if(t==="bool"){const n=new Uint8Array(e.length);for(let s=0;s{s=n()},o=this.backendTimer.time(i);for(let c=0;c{ek(d,h.dtype,e)})}const a={kernelName:e,outputs:s,inputs:t,timeMs:o.then(c=>c.kernelMs),extraInfo:o.then(c=>c.getExtraProfileInfo!=null?c.getExtraProfileInfo():"")};return a}logKernelProfile(e){const{kernelName:t,outputs:n,timeMs:s,inputs:i,extraInfo:o}=e;n.forEach(a=>{Promise.all([a.data(),s,o]).then(c=>{this.logger.logKernelProfile(t,a,c[0],c[1],i,c[2])})})}}function ek(e,t,n){if(t!=="float32")return!1;for(let s=0;s0?L:""} `}}console.log(`%c${c} %c${a} %c${h}D ${m} %c${d} %c${f} %c${o}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function nk(e,t,n){const s={},i={};for(let h=0;hs[x.id]=!0),w=!0,i[d.id]=!0;break}if(w)break}}const o={};o[n.id]=!0;const a={};for(let h=e.length-1;h>=0;h--){const d=e[h],m=d.inputs;for(let f=0;f=0;i--){const o=t[i],a=[];if(o.outputs.forEach(h=>{const d=e[h.id];d!=null?a.push(d):a.push(null)}),o.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${o.kernelName}.`);const c=o.gradient(a);for(const h in o.inputs){if(!(h in c))throw new Error(`Cannot backprop through input ${h}. Available gradients found: ${Object.keys(c)}.`);const d=n(()=>c[h]());if(d.dtype!=="float32")throw new Error(`Error in gradient for op ${o.kernelName}. The gradient of input ${h} must have 'float32' dtype, but has '${d.dtype}'`);const m=o.inputs[h];if(!ae(d.shape,m.shape))throw new Error(`Error in gradient for op ${o.kernelName}. The gradient of input '${h}' has shape '${d.shape}', which does not match the shape of the input '${m.shape}'`);if(e[m.id]==null)e[m.id]=d;else{const f=e[m.id];e[m.id]=s(f,d),f.dispose()}}}}const sT=20,Xl=3,ky=7;function ik(e,t,n,s){const i=je(t),o=rk(e,t,n,i),a=t.length,c=$d(e,t,n,i,o),h=["Tensor"];return s&&(h.push(` dtype: ${n}`),h.push(` rank: ${a}`),h.push(` shape: [${t}]`),h.push(" values:")),h.push(c.map(d=>" "+d).join(` `)),h.join(` -`)}function vD(e,t,n,s){const i=we(t),o=s[s.length-1],a=new Array(o).fill(0),c=t.length,h=n==="complex64"?_l(e):e;if(c>1)for(let d=0;dUx){const A=kl*a;let N=Array.from(e.slice(0,A)),E=Array.from(e.slice((c-kl)*a,c*a));return n==="complex64"&&(N=_l(N),E=_l(E)),["["+N.map((D,F)=>Fl(D,i[F],n)).join(", ")+", ..., "+E.map((D,F)=>Fl(D,i[c-kl+F],n)).join(", ")+"]"]}const T=n==="complex64"?_l(e):Array.from(e);return["["+T.map((A,N)=>Fl(A,i[N],n)).join(", ")+"]"]}const d=t.slice(1),m=s.slice(1),y=s[0]*a,b=[];if(c>Ux){for(let T=0;T`Length of values '${s}' does not match the size inferred by the shape '${this.size}'.`)}if(t==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=n||lo(t,this.size),this.strides=Ot(e)}set(e,...t){t.length===0&&(t=[0]),k(t.length===this.rank,()=>`The number of provided coordinates (${t.length}) must match the rank (${this.rank})`);const n=this.locToIndex(t);this.values[n]=e}get(...e){e.length===0&&(e=[0]);let t=0;for(const s of e){if(s<0||s>=this.shape[t]){const i=`Requested out of range element at ${e}. Buffer shape=${this.shape}`;throw new Error(i)}t++}let n=e[e.length-1];for(let s=0;sDl(n))}catch(n){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return e}dataSync(){this.throwIfDisposed();const e=Si().readSync(this.dataId);if(this.dtype==="string")try{return e.map(t=>Dl(t))}catch(t){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return e}async bytes(){this.throwIfDisposed();const e=await Si().read(this.dataId);return this.dtype==="string"?e:new Uint8Array(e.buffer)}dispose(){if(this.isDisposed)return;Si().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(e=!1){return Sa.print(this,e)}clone(){return this.throwIfDisposed(),Sa.clone(this)}toString(e=!1){const t=this.dataSync();return AD(t,this.shape,this.dtype,e)}cast(e){return this.throwIfDisposed(),Sa.cast(this,e)}variable(e=!0,t,n){return this.throwIfDisposed(),Si().makeVariable(this,e,t,n)}}Object.defineProperty(Q,Symbol.hasInstance,{value:e=>!!e&&e.data!=null&&e.dataSync!=null&&e.throwIfDisposed!=null});class Wl extends Q{constructor(e,t,n,s){super(e.shape,e.dtype,e.dataId,s);this.trainable=t,this.name=n}assign(e){if(e.dtype!==this.dtype)throw new Error(`dtype of the new value (${e.dtype}) and previous value (${this.dtype}) must match`);if(!ot(e.shape,this.shape))throw new Error(`shape of the new value (${e.shape}) and previous value (${this.shape}) must match`);Si().disposeTensor(this),this.dataId=e.dataId,Si().incRef(this,null)}dispose(){Si().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty(Wl,Symbol.hasInstance,{value:e=>e instanceof Q&&e.assign!=null&&e.assign instanceof Function});(function(e){e.R0="R0",e.R1="R1",e.R2="R2",e.R3="R3",e.R4="R4",e.R5="R5",e.R6="R6"})(r.Rank||(r.Rank={}));var uy;(function(e){e.float32="float32",e.int32="int32",e.bool="int32",e.complex64="complex64"})(uy||(uy={}));var dy;(function(e){e.float32="float32",e.int32="int32",e.bool="bool",e.complex64="complex64"})(dy||(dy={}));var py;(function(e){e.float32="float32",e.int32="float32",e.bool="float32",e.complex64="complex64"})(py||(py={}));var my;(function(e){e.float32="complex64",e.int32="complex64",e.bool="complex64",e.complex64="complex64"})(my||(my={}));const OD={float32:py,int32:uy,bool:dy,complex64:my};function vn(e,t){if(e==="string"||t==="string"){if(e==="string"&&t==="string")return"string";throw new Error(`Can not upcast ${e} with ${t}`)}return OD[e][t]}function cd(e){return vn(e,"int32")}function Bt(e,t){if(e.dtype===t.dtype)return[e,t];const n=vn(e.dtype,t.dtype);return[e.cast(n),t.cast(n)]}function Px(e,t){k(e.dtype===t.dtype,()=>`The dtypes of the first(${e.dtype}) and second(${t.dtype}) input must match`)}function ld(e,t){return t.some(n=>n.id===e.id)}function Hi(e){const t=[],n=new Set;return zx(e,t,n),t}function zx(e,t,n){if(e==null)return;if(e instanceof Q){t.push(e);return}if(!ED(e))return;const s=e;for(const i in s){const o=s[i];n.has(o)||(n.add(o),zx(o,t,n))}}function ED(e){return Array.isArray(e)||typeof e=="object"}var DD=Object.freeze({__proto__:null,makeTypesMatch:Bt,assertTypesMatch:Px,isTensorInList:ld,getTensorsInContainer:Hi});class Gx{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(const e in this.registeredVariables)this.registeredVariables[e].dispose()}}class $l{constructor(e){this.ENV=e,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new Gx}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;const e=this.getSortedBackends();for(let t=0;t{t.setupFunc!=null&&t.setupFunc(this.backendInstance)})}disposeRegisteredKernels(e){const t=td(e);t.forEach(n=>{n.disposeFunc!=null&&n.disposeFunc(this.registry[e])})}initializeBackend(e){const t=this.registryFactory[e];if(t==null)throw new Error(`Cannot initialize backend ${e}, no registration found.`);try{const n=t.factory();if(n&&!(n instanceof g)&&typeof n.then=="function"){const s=++this.pendingBackendInitId,i=n.then(o=>s(sthis.registryFactory[t].priority-this.registryFactory[e].priority)}initializeBackendsAndReturnBest(){const e=this.getSortedBackends();for(let t=0;tthis.startScope(n),()=>this.endScope(s),()=>(s=t(),s instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),s))}scopedRun(e,t,n){e();try{const s=n();return t(),s}catch(s){throw t(),s}}nextTensorId(){return $l.nextTensorId++}nextVariableId(){return $l.nextVariableId++}clone(e){const t=this.makeTensorFromDataId(e.dataId,e.shape,e.dtype),n={x:e},s=o=>({x:()=>{const a="float32",c={x:o},h={dtype:a};return V.runKernelFunc(d=>d.cast(o,a),c,null,Jc,h)}}),i=[];return this.addTapeNode(this.state.activeScope.name,n,[t],s,i,{}),t}runKernel(e,t,n,s,i){const o=null,a=null;return this.runKernelFunc(o,t,a,e,n,s,i)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(e,t,n){const s=this.backend.numDataIds();let i=0;n.forEach(c=>{i+=c.dtype==="complex64"?3:1});const o=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],a=s-t-i-o;if(a>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${a} data ids) after running '${e}'`)}runKernelFunc(e,t,n,s,i,o,a){let c,h=[];const d=this.isTapeOn();s==null&&(s=this.state.activeScope!=null?this.state.activeScope.name:"");const m=this.state.numBytes,y=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let b;const w=Zg(s,this.backendName);let L;if(w!=null)b=()=>{const A=this.backend.numDataIds();L=w.kernelFunc({inputs:t,attrs:i,backend:this.backend});const N=Array.isArray(L)?L:[L];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(s,A,N);const E=N.map(({dataId:D,shape:F,dtype:_})=>this.makeTensorFromDataId(D,F,_));if(d){let D=this.getTensorsForGradient(s,t,E);if(D==null){a==null&&(a=[]);const F=E.filter((_,B)=>a[B]);D=(o||[]).slice().concat(F)}h=this.saveTensorsForBackwardMode(D)}return E};else{const A=N=>{if(!d)return;h=N.map(E=>this.keep(this.clone(E)))};b=()=>{const N=this.backend.numDataIds();L=this.tidy(()=>e(this.backend,A));const E=Array.isArray(L)?L:[L];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(s,N,E),E}}let T;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?c=b():(T=this.profiler.profileKernel(s,t,()=>b()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(T),c=T.outputs)}),d&&this.addTapeNode(s,t,c,n,h,i),this.state.profiling&&this.state.activeProfile.kernels.push({name:s,bytesAdded:this.state.numBytes-m,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-y,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(t).map(A=>t[A]!=null?t[A].shape:null),outputShapes:c.map(A=>A.shape),kernelTimeMs:T.timeMs,extraInfo:T.extraInfo}),Array.isArray(L)?c:c[0]}saveTensorsForBackwardMode(e){const t=e.map(n=>this.keep(this.clone(n)));return t}getTensorsForGradient(e,t,n){const s=Qg(e);if(s!=null){const i=s.inputsToSave||[],o=s.outputsToSave||[];let a;s.saveAllInputs?(k(Array.isArray(t),()=>"saveAllInputs is true, expected inputs to be an array."),a=Object.keys(t).map(h=>t[h])):a=i.map(h=>t[h]);const c=n.filter((h,d)=>o[d]);return a.concat(c)}return null}makeTensor(e,t,n,s){if(e==null)throw new Error("Values passed to engine.makeTensor() are null");n=n||"float32",s=s||this.backend;let i=e;n==="string"&&Ir(e[0])&&(i=e.map(c=>ly(c)));const o=s.write(i,t,n),a=new Q(t,n,o,this.nextTensorId());if(this.incRef(a,s),n==="string"){const c=this.state.tensorInfo.get(o),h=kx(i);this.state.numBytes+=h-c.bytes,c.bytes=h}return a}makeTensorFromDataId(e,t,n,s){n=n||"float32";const i=new Q(t,n,e,this.nextTensorId());return this.incRef(i,s),i}makeVariable(e,t=!0,n,s){n=n||this.nextVariableId().toString(),s!=null&&s!==e.dtype&&(e=e.cast(s));const i=new Wl(e,t,n,this.nextTensorId());if(this.state.registeredVariables[i.name]!=null)throw new Error(`Variable with name ${i.name} was already registered`);return this.state.registeredVariables[i.name]=i,this.incRef(i,this.backend),i}incRef(e,t){const n=this.state.tensorInfo.has(e.dataId)?this.state.tensorInfo.get(e.dataId).refCount:0;if(this.state.numTensors++,e.dtype==="string"&&this.state.numStringTensors++,n===0){this.state.numDataBuffers++;let s=0;e.dtype!=="complex64"&&e.dtype!=="string"&&(s=e.size*ry(e.dtype)),this.state.tensorInfo.set(e.dataId,{backend:t||this.backend,dtype:e.dtype,shape:e.shape,bytes:s,refCount:0}),this.state.numBytes+=s}this.state.tensorInfo.get(e.dataId).refCount++,e instanceof Wl||this.track(e)}disposeTensor(e){if(!this.state.tensorInfo.has(e.dataId))return;this.state.numTensors--,e.dtype==="string"&&this.state.numStringTensors--;const t=this.state.tensorInfo.get(e.dataId),n=t.refCount;n<=1?(e.dtype!=="complex64"&&(this.state.numBytes-=t.bytes),this.state.numDataBuffers--,t.backend.disposeData(e.dataId),this.state.tensorInfo.delete(e.dataId)):this.state.tensorInfo.get(e.dataId).refCount--}disposeVariables(){for(const e in this.state.registeredVariables){const t=this.state.registeredVariables[e];this.disposeVariable(t)}}disposeVariable(e){this.disposeTensor(e),this.state.registeredVariables[e.name]!=null&&delete this.state.registeredVariables[e.name]}memory(){const e=this.backend.memory();return e.numTensors=this.state.numTensors,e.numDataBuffers=this.state.numDataBuffers,e.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(e.unreliable=!0,e.reasons==null&&(e.reasons=[]),e.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),e}async profile(e){this.state.profiling=!0;const t=this.state.numBytes,n=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await e(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(s=>s.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-t,this.state.activeProfile.newTensors=this.state.numTensors-n;for(const s of this.state.activeProfile.kernels)s.kernelTimeMs=await s.kernelTimeMs,s.extraInfo=await s.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(e,t,n,s,i,o){const a={id:this.state.nextTapeNodeId++,kernelName:e,inputs:t,outputs:n,saved:i},c=Qg(e);c!=null&&(s=c.gradFunc),s!=null&&(a.gradient=h=>(h=h.map((d,m)=>{if(d==null){const y=n[m],b=wa(y.size,y.dtype);return this.makeTensor(b,y.shape,y.dtype)}return d}),s(h.length>1?h:h[0],i,o))),this.state.activeTape.push(a)}keep(e){return e.kept=!0,e}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(e){const t={track:[],name:"unnamed scope",id:this.state.nextScopeId++};e&&(t.name=e),this.state.scopeStack.push(t),this.state.activeScope=t}endScope(e){const t=Hi(e),n=new Set(t.map(i=>i.id));for(let i=0;i{!i.kept&&i.scopeId===s.id&&this.track(i)})}gradients(e,t,n,s=!1){if(k(t.length>0,()=>"gradients() received an empty list of xs."),n!=null&&n.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${n.dtype}'`);const i=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",e));k(i instanceof Q,()=>"The result y returned by f() must be a tensor.");const o=xD(this.state.activeTape,t,i);if(!s&&o.length===0&&t.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{const a={};a[i.id]=n==null?kD(i.shape):n,TD(a,o,h=>this.tidy(h),FD);const c=t.map(h=>a[h.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(h=>{for(const d of h.saved)d.dispose()}),this.state.activeTape=null),{value:i,grads:c}})}customGrad(e){return k(xr(e),()=>"The f passed in customGrad(f) must be a function."),(...t)=>{k(t.every(i=>i instanceof Q),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let n;const s={};return t.forEach((i,o)=>{s[o]=i}),this.runKernelFunc((i,o)=>(n=e(...t,o),k(n.value instanceof Q,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),k(xr(n.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),n.value),s,(i,o)=>{const a=n.gradFunc(i,o),c=Array.isArray(a)?a:[a];k(c.length===t.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),k(c.every(d=>d instanceof Q),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");const h={};return c.forEach((d,m)=>{h[m]=()=>d}),h})}}readSync(e){const t=this.state.tensorInfo.get(e);return t.backend.readSync(e)}read(e){const t=this.state.tensorInfo.get(e);return t.backend.read(e)}async time(e){const t=Vn(),n=await this.backend.time(e);return n.wallMs=Vn()-t,n}track(e){return this.state.activeScope!=null&&(e.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(e)),e}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new Gx;for(const e in this.registry)this.disposeRegisteredKernels(e),this.registry[e].dispose(),delete this.registry[e];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}$l.nextTensorId=0,$l.nextVariableId=0;function kD(e){const t=oy(we(e),"float32");return V.makeTensor(t,e,"float32")}function Vx(){const e=ne();if(e._tfengine==null){const t=new S(e);e._tfengine=new $l(t)}return U(e._tfengine.ENV),ND(()=>e._tfengine),e._tfengine}const V=Vx();function FD(e,t){const n={a:e,b:t};return V.runKernelFunc((s,i)=>{const o=s.add(e,t);return i([e,t]),o},n,null,Te)}function _D(){return typeof navigator!="undefined"&&navigator!=null}function Yx(){if(_D()){const e=navigator.userAgent||navigator.vendor||window.opera;return/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i.test(e)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(e.substr(0,4))}return!1}function fy(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}var WD=Object.freeze({__proto__:null,isMobile:Yx,isBrowser:fy});const qi=C();qi.registerFlag("DEBUG",()=>!1,e=>{e&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")}),qi.registerFlag("IS_BROWSER",()=>fy()),qi.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined"),qi.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)),qi.registerFlag("PROD",()=>!1),qi.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>qi.getBool("DEBUG")),qi.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0),qi.registerFlag("IS_TEST",()=>!1);function Ii(e,t){let n=e;if(wn(e))return t==="string"?[]:[e.length];if(!Array.isArray(e))return[];const s=[];for(;Array.isArray(n)||wn(n)&&t!=="string";)s.push(n.length),n=n[0];return Array.isArray(e)&&C().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&&Hx(e,s,[]),s}function Hx(e,t,n){if(n=n||[],!Array.isArray(e)&&!wn(e)){k(t.length===0,()=>`Element arr[${n.join("][")}] is a primitive, but should be an array/TypedArray of ${t[0]} elements`);return}k(t.length>0,()=>`Element arr[${n.join("][")}] should be a primitive, but is an array of ${e.length} elements`),k(e.length===t[0],()=>`Element arr[${n.join("][")}] should have ${t[0]} elements, but has ${e.length} elements`);const s=t.slice(1);for(let i=0;i=0&&(i=s),qx(s,i,t,n),e==null||!wn(e)&&!Array.isArray(e)&&typeof e!="number"&&typeof e!="boolean"&&typeof e!="string"){const h=e==null?"null":e.constructor.name;throw new Error(`Argument '${t}' passed to '${n}' must be a Tensor or TensorLike, but got '${h}'`)}const o=Ii(e,i);!wn(e)&&!Array.isArray(e)&&(e=[e]);const a=!0,c=i!=="string"?Tr(e,i):Yi(e,[],a);return V.makeTensor(c,o,i)}function Ul(e,t,n,s="numeric"){if(!Array.isArray(e))throw new Error(`Argument ${t} passed to ${n} must be a \`Tensor[]\` or \`TensorLike[]\``);const i=e;return i.map((o,a)=>W(o,`${t}[${a}]`,n),s)}const jx="__op";function P(e){const t=Object.keys(e);if(t.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${t.length} keys.`);let n=t[0];const s=e[n];n.endsWith("_")&&(n=n.substring(0,n.length-1)),n=n+jx;const i=(...o)=>{V.startScope(n);try{const a=s(...o);return a instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),V.endScope(a),a}catch(a){throw V.endScope(null),a}};return Object.defineProperty(i,"name",{value:n,configurable:!0}),i}function $D(e,t){const n=W(e,"real","complex"),s=W(t,"imag","complex");dt(n.shape,s.shape,`real and imag shapes, ${n.shape} and ${s.shape}, must match in call to tf.complex().`);const i=a=>a.complex(n,s),o={real:n,imag:s};return V.runKernelFunc(i,o,null,hg)}const xi=P({complex_:$D});function vr(e,t,n,s){if(s==null&&(s=ba(e)),s==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!wn(e)&&!Array.isArray(e)&&typeof e!="number"&&typeof e!="boolean"&&typeof e!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(t!=null){cy(t);const i=we(t),o=we(n);k(i===o,()=>`Based on the provided shape, [${t}], the tensor should have ${i} values but has ${o}`);for(let a=0;a`Error creating a new Tensor. Inferred shape (${n}) does not match the provided shape (${t}). `)}}return!wn(e)&&!Array.isArray(e)&&(e=[e]),t=t||n,e=s!=="string"?Tr(e,s):Yi(e,[],!0),V.makeTensor(e,t,s)}function en(e,t,n){const s=Ii(e,n);return vr(e,t,s,n)}const gy={float32:4,float16:2,int32:4,uint16:2,uint8:1,bool:1,complex64:8};const hd=4;async function yy(e,t){const n=[],s=[],i=Array.isArray(e)?e.map(a=>a.name):Object.keys(e);for(let a=0;a{const b=await h.bytes(),w=b.reduce((A,N)=>A+N.length,0)+hd*b.length,L=new Uint8Array(w);let T=0;for(let A=0;A{if(t+=o.byteLength,n.push(o.byteLength===o.buffer.byteLength?o:new o.constructor(o)),!(o instanceof Float32Array||o instanceof Int32Array||o instanceof Uint8Array))throw new Error(`Unsupported TypedArray subtype: ${o.constructor.name}`)});const s=new Uint8Array(t);let i=0;return n.forEach(o=>{s.set(new Uint8Array(o.buffer),i),i+=o.byteLength}),s.buffer}const by=typeof Buffer!="undefined"&&(typeof Blob=="undefined"||typeof atob=="undefined"||typeof btoa=="undefined");function Kx(e){return by?Buffer.byteLength(e):new Blob([e]).size}function BD(e){if(by)return Buffer.from(e).toString("base64");const t=new Uint8Array(e);let n="";for(let s=0,i=t.length;s{t+=i.byteLength});const n=new Uint8Array(t);let s=0;return e.forEach(i=>{n.set(new Uint8Array(i),s),s+=i.byteLength}),n.buffer}function Xx(e){const t="/";for(e=e.trim();e.endsWith(t);)e=e.slice(0,e.length-1);const n=e.split(t);return n[n.length-1]}function Bl(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("Expected JSON model topology, received ArrayBuffer.");return{dateSaved:new Date,modelTopologyType:"JSON",modelTopologyBytes:e.modelTopology==null?0:Kx(JSON.stringify(e.modelTopology)),weightSpecsBytes:e.weightSpecs==null?0:Kx(JSON.stringify(e.weightSpecs)),weightDataBytes:e.weightData==null?0:e.weightData.byteLength}}function PD(){const e=n=>{let s=n<<13,i=0;for(;(s&8388608)===0;)i-=8388608,s<<=1;return s&=~8388608,i+=947912704,s|i},t=new Uint32Array(2048);t[0]=0;for(let n=1;n<1024;n++)t[n]=e(n);for(let n=1024;n<2048;n++)t[n]=939524096+(n-1024<<13);return t}function zD(){const e=new Uint32Array(64);e[0]=0,e[31]=1199570944,e[32]=2147483648,e[63]=3347054592;for(let t=1;t<31;t++)e[t]=t<<23;for(let t=33;t<63;t++)e[t]=2147483648+(t-32<<23);return e}function GD(){const e=new Uint32Array(64);for(let t=0;t<64;t++)e[t]=1024;return e[0]=e[32]=0,e}function VD(){const e=PD(),t=zD(),n=GD();return s=>{const i=new ArrayBuffer(4*s.length),o=new Uint32Array(i);for(let a=0;a>10]+(c&1023)]+t[c>>10];o[a]=h}return new Float32Array(i)}}class Xt{constructor(){this.saveRouters=[],this.loadRouters=[]}static getInstance(){return Xt.instance==null&&(Xt.instance=new Xt),Xt.instance}static registerSaveRouter(e){Xt.getInstance().saveRouters.push(e)}static registerLoadRouter(e){Xt.getInstance().loadRouters.push(e)}static getSaveHandlers(e){return Xt.getHandlers(e,"save")}static getLoadHandlers(e,t){return Xt.getHandlers(e,"load",t)}static getHandlers(e,t,n){const s=[],i=t==="load"?Xt.getInstance().loadRouters:Xt.getInstance().saveRouters;return i.forEach(o=>{const a=o(e,n);a!==null&&s.push(a)}),s}}const YD=e=>Xt.registerSaveRouter(e),HD=e=>Xt.registerLoadRouter(e),wy=e=>Xt.getSaveHandlers(e),Ly=(e,t)=>Xt.getLoadHandlers(e,t);const pd="tensorflowjs",Sy=1,ho="models_store",Nr="model_info_store";async function kZ(){const e=Iy();return new Promise((t,n)=>{const s=e.deleteDatabase(pd);s.onsuccess=()=>t(),s.onerror=i=>n(i)})}function Iy(){if(!C().getBool("IS_BROWSER"))throw new Error("Failed to obtain IndexedDB factory because the current environmentis not a web browser.");const e=typeof window=="undefined"?self:window,t=e.indexedDB||e.mozIndexedDB||e.webkitIndexedDB||e.msIndexedDB||e.shimIndexedDB;if(t==null)throw new Error("The current browser does not appear to support IndexedDB.");return t}function xy(e){const t=e.result;t.createObjectStore(ho,{keyPath:"modelPath"}),t.createObjectStore(Nr,{keyPath:"modelPath"})}class uo{constructor(e){if(this.indexedDB=Iy(),e==null||!e)throw new Error("For IndexedDB, modelPath must not be null, undefined or empty.");this.modelPath=e}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");return this.databaseAction(this.modelPath,e)}async load(){return this.databaseAction(this.modelPath)}databaseAction(e,t){return new Promise((n,s)=>{const i=this.indexedDB.open(pd,Sy);i.onupgradeneeded=()=>xy(i),i.onsuccess=()=>{const o=i.result;if(t==null){const a=o.transaction(ho,"readonly"),c=a.objectStore(ho),h=c.get(this.modelPath);h.onsuccess=()=>{if(h.result==null)return o.close(),s(new Error(`Cannot find model with path '${this.modelPath}' in IndexedDB.`));n(h.result.modelArtifacts)},h.onerror=d=>(o.close(),s(h.error)),a.oncomplete=()=>o.close()}else{const a=Bl(t),c=o.transaction(Nr,"readwrite");let h=c.objectStore(Nr);const d=h.put({modelPath:this.modelPath,modelArtifactsInfo:a});let m;d.onsuccess=()=>{m=o.transaction(ho,"readwrite");const y=m.objectStore(ho),b=y.put({modelPath:this.modelPath,modelArtifacts:t,modelArtifactsInfo:a});b.onsuccess=()=>n({modelArtifactsInfo:a}),b.onerror=w=>{h=c.objectStore(Nr);const L=h.delete(this.modelPath);L.onsuccess=()=>(o.close(),s(b.error)),L.onerror=T=>(o.close(),s(b.error))}},d.onerror=y=>(o.close(),s(d.error)),c.oncomplete=()=>{m==null?o.close():m.oncomplete=()=>o.close()}}},i.onerror=o=>s(i.error)})}}uo.URL_SCHEME="indexeddb://";const Jx=e=>C().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(uo.URL_SCHEME))?qD(e.slice(uo.URL_SCHEME.length)):null;Xt.registerSaveRouter(Jx),Xt.registerLoadRouter(Jx);function qD(e){return new uo(e)}function jD(e){return e.startsWith(uo.URL_SCHEME)?e.slice(uo.URL_SCHEME.length):e}class KD{constructor(){this.indexedDB=Iy()}async listModels(){return new Promise((e,t)=>{const n=this.indexedDB.open(pd,Sy);n.onupgradeneeded=()=>xy(n),n.onsuccess=()=>{const s=n.result,i=s.transaction(Nr,"readonly"),o=i.objectStore(Nr),a=o.getAll();a.onsuccess=()=>{const c={};for(const h of a.result)c[h.modelPath]=h.modelArtifactsInfo;e(c)},a.onerror=c=>(s.close(),t(a.error)),i.oncomplete=()=>s.close()},n.onerror=s=>t(n.error)})}async removeModel(e){return e=jD(e),new Promise((t,n)=>{const s=this.indexedDB.open(pd,Sy);s.onupgradeneeded=()=>xy(s),s.onsuccess=()=>{const i=s.result,o=i.transaction(Nr,"readwrite"),a=o.objectStore(Nr),c=a.get(e);let h;c.onsuccess=()=>{if(c.result==null)return i.close(),n(new Error(`Cannot find model with path '${e}' in IndexedDB.`));{const d=a.delete(e),m=()=>{h=i.transaction(ho,"readwrite");const y=h.objectStore(ho),b=y.delete(e);b.onsuccess=()=>t(c.result.modelArtifactsInfo),b.onerror=w=>n(c.error)};d.onsuccess=m,d.onerror=y=>(m(),i.close(),n(c.error))}},c.onerror=d=>(i.close(),n(c.error)),o.oncomplete=()=>{h==null?i.close():h.oncomplete=()=>i.close()}},s.onerror=i=>n(s.error)})}}const Ti="/",po="tensorflowjs_models",Zx="info",XD="model_topology",JD="weight_specs",ZD="weight_data",QD="model_metadata";function FZ(){if(!C().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("purgeLocalStorageModels() cannot proceed because local storage is unavailable in the current environment.");const e=window.localStorage,t=[];for(let n=0;ni.length){e.removeItem(s);const o=eT(s);t.indexOf(o)===-1&&t.push(o)}}return t}function Qx(e){return{info:[po,e,Zx].join(Ti),topology:[po,e,XD].join(Ti),weightSpecs:[po,e,JD].join(Ti),weightData:[po,e,ZD].join(Ti),modelMetadata:[po,e,QD].join(Ti)}}function eT(e){const t=e.split(Ti);if(t.length<3)throw new Error(`Invalid key format: ${e}`);return t.slice(1,t.length-1).join(Ti)}function ek(e){return e.startsWith(mo.URL_SCHEME)?e.slice(mo.URL_SCHEME.length):e}class mo{constructor(e){if(!C().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("The current environment does not support local storage.");if(this.LS=window.localStorage,e==null||!e)throw new Error("For local storage, modelPath must not be null, undefined or empty.");this.modelPath=e,this.keys=Qx(this.modelPath)}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");{const t=JSON.stringify(e.modelTopology),n=JSON.stringify(e.weightSpecs),s=Bl(e);try{return this.LS.setItem(this.keys.info,JSON.stringify(s)),this.LS.setItem(this.keys.topology,t),this.LS.setItem(this.keys.weightSpecs,n),this.LS.setItem(this.keys.weightData,BD(e.weightData)),this.LS.setItem(this.keys.modelMetadata,JSON.stringify({format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,userDefinedMetadata:e.userDefinedMetadata})),{modelArtifactsInfo:s}}catch(i){throw this.LS.removeItem(this.keys.info),this.LS.removeItem(this.keys.topology),this.LS.removeItem(this.keys.weightSpecs),this.LS.removeItem(this.keys.weightData),this.LS.removeItem(this.keys.modelMetadata),new Error(`Failed to save model '${this.modelPath}' to local storage: size quota being exceeded is a possible cause of this failure: modelTopologyBytes=${s.modelTopologyBytes}, weightSpecsBytes=${s.weightSpecsBytes}, weightDataBytes=${s.weightDataBytes}.`)}}}async load(){const e=JSON.parse(this.LS.getItem(this.keys.info));if(e==null)throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);if(e.modelTopologyType!=="JSON")throw new Error("BrowserLocalStorage does not support loading non-JSON model topology yet.");const t={},n=JSON.parse(this.LS.getItem(this.keys.topology));if(n==null)throw new Error(`In local storage, the topology of model '${this.modelPath}' is missing.`);t.modelTopology=n;const s=JSON.parse(this.LS.getItem(this.keys.weightSpecs));if(s==null)throw new Error(`In local storage, the weight specs of model '${this.modelPath}' are missing.`);t.weightSpecs=s;const i=this.LS.getItem(this.keys.modelMetadata);if(i!=null){const a=JSON.parse(i);t.format=a.format,t.generatedBy=a.generatedBy,t.convertedBy=a.convertedBy,t.userDefinedMetadata=a.userDefinedMetadata}const o=this.LS.getItem(this.keys.weightData);if(o==null)throw new Error(`In local storage, the binary weight values of model '${this.modelPath}' are missing.`);return t.weightData=MD(o),t}}mo.URL_SCHEME="localstorage://";const tT=e=>C().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(mo.URL_SCHEME))?tk(e.slice(mo.URL_SCHEME.length)):null;Xt.registerSaveRouter(tT),Xt.registerLoadRouter(tT);function tk(e){return new mo(e)}class nk{constructor(){k(C().getBool("IS_BROWSER"),()=>"Current environment is not a web browser"),k(typeof window=="undefined"||typeof window.localStorage!="undefined",()=>"Current browser does not appear to support localStorage"),this.LS=window.localStorage}async listModels(){const e={},t=po+Ti,n=Ti+Zx;for(let s=0;s"scheme must not be undefined or null."),e.endsWith(Ia)&&(e=e.slice(0,e.indexOf(Ia))),k(e.length>0,()=>"scheme must not be an empty string.");const n=bs.getInstance();k(n.managers[e]==null,()=>`A model store manager is already registered for scheme '${e}'.`),n.managers[e]=t}static getManager(e){const t=this.getInstance().managers[e];if(t==null)throw new Error(`Cannot find model manager for scheme '${e}'`);return t}static getSchemes(){return Object.keys(this.getInstance().managers)}}function md(e){if(e.indexOf(Ia)===-1)throw new Error(`The url string provided does not contain a scheme. Supported schemes are: ${bs.getSchemes().join(",")}`);return{scheme:e.split(Ia)[0],path:e.split(Ia)[1]}}async function nT(e,t,n=!1){k(e!==t,()=>`Old path and new path are the same: '${e}'`);const s=Xt.getLoadHandlers(e);k(s.length>0,()=>`Copying failed because no load handler is found for source URL ${e}.`),k(s.length<2,()=>`Copying failed because more than one (${s.length}) load handlers for source URL ${e}.`);const i=s[0],o=Xt.getSaveHandlers(t);k(o.length>0,()=>`Copying failed because no save handler is found for destination URL ${t}.`),k(o.length<2,()=>`Copying failed because more than one (${s.length}) save handlers for destination URL ${t}.`);const a=o[0],c=md(e).scheme,h=md(e).path,d=c===md(e).scheme,m=await i.load();n&&d&&await bs.getManager(c).removeModel(h);const y=await a.save(m);return n&&!d&&await bs.getManager(c).removeModel(h),y.modelArtifactsInfo}async function sk(){const e=bs.getSchemes(),t={};for(const n of e){const s=await bs.getManager(n).listModels();for(const i in s){const o=n+Ia+i;t[o]=s[i]}}return t}async function ik(e){const t=md(e),n=bs.getManager(t.scheme);return n.removeModel(t.path)}async function rk(e,t){const n=!1;return nT(e,t,n)}async function ok(e,t){const n=!0;return nT(e,t,n)}class ak{fetch(e,t){return fetch(e,t)}now(){return performance.now()}encode(e,t){if(t!=="utf-8"&&t!=="utf8")throw new Error(`Browser's encoder only supports utf-8, but got ${t}`);return this.textEncoder==null&&(this.textEncoder=new TextEncoder),this.textEncoder.encode(e)}decode(e,t){return new TextDecoder(t).decode(e)}}if(C().get("IS_BROWSER")){C().setPlatform("browser",new ak);try{bs.registerManager(mo.URL_SCHEME,new nk)}catch(e){}try{bs.registerManager(uo.URL_SCHEME,new KD)}catch(e){}}const ck={importFetch:()=>IC()};let xa;function _Z(){xa=null}function WZ(e){xa=e}function $Z(){return xa}class lk{constructor(){this.util=require("util"),this.textEncoder=new this.util.TextEncoder}fetch(e,t){return C().global.fetch!=null?C().global.fetch(e,t):(xa==null&&(xa=ck.importFetch()),xa(e,t))}now(){const e=process.hrtime();return e[0]*1e3+e[1]/1e6}encode(e,t){if(t!=="utf-8"&&t!=="utf8")throw new Error(`Node built-in encoder only supports utf-8, but got ${t}`);return this.textEncoder.encode(e)}decode(e,t){return e.length===0?"":new this.util.TextDecoder(t).decode(e)}}C().get("IS_NODE")&&C().setPlatform("node",new lk);function Ze(e,t="float32",n){return t=t||"float32",cy(e),new Ar(e,t,n)}function hk(e,t){const n=W(e,"x","cast");if(!Dx(t))throw new Error(`Failed to cast to unknown dtype ${t}`);if(t==="string"&&n.dtype!=="string"||t!=="string"&&n.dtype==="string")throw new Error("Only strings can be casted to strings");const s={x:n},i={dtype:t};return V.runKernelFunc(o=>o.cast(n,t),s,null,Jc,i)}const ve=P({cast_:hk});function uk(e){const t=W(e,"x","clone",null),n=()=>V.makeTensorFromDataId(t.dataId,t.shape,t.dtype),s={x:t};return V.runKernelFunc(n,s,null,al)}const Cr=P({clone_:uk});function sT(e,t=!1){console.log(e.toString(t))}Vx();const dk={buffer:Ze,cast:ve,clone:Cr,print:sT};CD(dk);const pk="model",mk=".json",fk=".weights.bin";function iT(e){return new Promise(t=>setTimeout(t)).then(e)}class Ta{constructor(e){if(!C().getBool("IS_BROWSER"))throw new Error("browserDownloads() cannot proceed because the current environment is not a browser.");e.startsWith(Ta.URL_SCHEME)&&(e=e.slice(Ta.URL_SCHEME.length)),(e==null||e.length===0)&&(e=pk),this.modelTopologyFileName=e+mk,this.weightDataFileName=e+fk}async save(e){if(typeof document=="undefined")throw new Error("Browser downloads are not supported in this environment since `document` is not present");const t=window.URL.createObjectURL(new Blob([e.weightData],{type:"application/octet-stream"}));if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserDownloads.save() does not support saving model topology in binary formats yet.");{const n=[{paths:["./"+this.weightDataFileName],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,weightsManifest:n},i=window.URL.createObjectURL(new Blob([JSON.stringify(s)],{type:"application/json"})),o=this.jsonAnchor==null?document.createElement("a"):this.jsonAnchor;if(o.download=this.modelTopologyFileName,o.href=i,await iT(()=>o.dispatchEvent(new MouseEvent("click"))),e.weightData!=null){const a=this.weightDataAnchor==null?document.createElement("a"):this.weightDataAnchor;a.download=this.weightDataFileName,a.href=t,await iT(()=>a.dispatchEvent(new MouseEvent("click")))}return{modelArtifactsInfo:Bl(e)}}}}Ta.URL_SCHEME="downloads://";class gk{constructor(e){if(e==null||e.length<1)throw new Error(`When calling browserFiles, at least 1 file is required, but received ${e}`);this.files=e}async load(){const e=this.files[0],t=this.files.slice(1);return new Promise((n,s)=>{const i=new FileReader;i.onload=o=>{const a=JSON.parse(o.target.result),c=a.modelTopology;if(c==null){s(new Error(`modelTopology field is missing from file ${e.name}`));return}t.length===0&&n({modelTopology:c});const h=a.weightsManifest;if(h==null){s(new Error(`weightManifest field is missing from file ${e.name}`));return}let d;try{d=this.checkManifestAndWeightFiles(h,t)}catch(w){s(w);return}const m=[],y=[],b=[];h.forEach(w=>{w.paths.forEach(L=>{y.push(L),b.push(null)}),m.push(...w.weights)}),h.forEach(w=>{w.paths.forEach(L=>{const T=new FileReader;T.onload=A=>{const N=A.target.result,E=y.indexOf(L);b[E]=N,b.indexOf(null)===-1&&n({modelTopology:c,weightSpecs:m,weightData:dd(b),format:a.format,generatedBy:a.generatedBy,convertedBy:a.convertedBy,userDefinedMetadata:a.userDefinedMetadata})},T.onerror=A=>s(`Failed to weights data from file of path '${L}'.`),T.readAsArrayBuffer(d[L])})})},i.onerror=o=>s(`Failed to read model topology and weights manifest JSON from file '${e.name}'. BrowserFiles supports loading Keras-style tf.Model artifacts only.`),i.readAsText(e)})}checkManifestAndWeightFiles(e,t){const n=[],s=t.map(o=>Xx(o.name)),i={};for(const o of e)o.paths.forEach(a=>{const c=Xx(a);if(n.indexOf(c)!==-1)throw new Error(`Duplicate file basename found in weights manifest: '${c}'`);if(n.push(c),s.indexOf(c)===-1)throw new Error(`Weight file with basename '${c}' is not provided.`);i[a]=t[s.indexOf(c)]});if(n.length!==t.length)throw new Error(`Mismatch in the number of files in weights manifest (${n.length}) and the number of weight files provided (${t.length}).`);return i}}const yk=e=>C().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(Ta.URL_SCHEME))?bk(e.slice(Ta.URL_SCHEME.length)):null;Xt.registerSaveRouter(yk);function bk(e="model"){return new Ta(e)}function wk(e){return new gk(e)}function rT(e,t,n,s){a(e),n=n==null?0:n,s=s==null?1:s,c(n,s);let i=0;const o=h=>(h.then(d=>{const m=n+ ++i/e.length*(s-n);return t(m),d}),h);function a(h){k(h!=null&&Array.isArray(h)&&h.length>0,()=>"promises must be a none empty array")}function c(h,d){k(h>=0&&h<=1,()=>`Progress fraction must be in range [0, 1], but got startFraction ${h}`),k(d>=0&&d<=1,()=>`Progress fraction must be in range [0, 1], but got endFraction ${d}`),k(d>=h,()=>`startFraction must be no more than endFraction, but got startFraction ${h} and endFraction ${d}`)}return Promise.all(e.map(o))}async function oT(e,t){t==null&&(t={});const n=t.fetchFunc==null?C().platform.fetch:t.fetchFunc,s=e.map(y=>n(y,t.requestInit,{isBinary:!0})),i=0,o=.5,a=t.onProgress==null?await Promise.all(s):await rT(s,t.onProgress,i,o),c=a.map(y=>y.arrayBuffer()),h=.5,d=1,m=t.onProgress==null?await Promise.all(c):await rT(c,t.onProgress,h,d);return m}async function aT(e,t="",n,s){const i=a=>oT(a,{requestInit:s}),o=cT(i);return o(e,t,n)}function cT(e){return async(t,n="",s)=>{const i=t.map(()=>!1),o={},a=s!=null?s.map(()=>!1):[],c=[];if(t.forEach((w,L)=>{let T=0;w.weights.forEach(A=>{const N="quantization"in A?A.quantization.dtype:A.dtype,E=gy[N]*we(A.shape),D=()=>{i[L]=!0,o[L]==null&&(o[L]=[]),o[L].push({manifestEntry:A,groupOffset:T,sizeBytes:E})};s!=null?s.forEach((F,_)=>{F===A.name&&(D(),a[_]=!0)}):D(),c.push(A.name),T+=E})}),!a.every(w=>w)){const w=s.filter((L,T)=>!a[T]);throw new Error(`Could not find weights in manifest with names: ${w.join(", ")}. -Manifest JSON has weights with names: ${c.join(", ")}.`)}const h=i.reduce((w,L,T)=>(L&&w.push(T),w),[]),d=[];h.forEach(w=>{t[w].paths.forEach(L=>{const T=n+(n.endsWith("/")?"":"/")+L;d.push(T)})});const m=await e(d),y={};let b=0;return h.forEach(w=>{const L=t[w].paths.length;let T=0;for(let F=0;F{const _=A.slice(F.groupOffset,F.groupOffset+F.sizeBytes),B=ud(_,[F.manifestEntry]);for(const $ in B)y[$]=B[$]}),b+=L}),y}}const Lk="application/octet-stream",Sk="application/json";class Ty{constructor(e,t){if(this.DEFAULT_METHOD="POST",t==null&&(t={}),this.weightPathPrefix=t.weightPathPrefix,this.onProgress=t.onProgress,this.weightUrlConverter=t.weightUrlConverter,t.fetchFunc!=null?(k(typeof t.fetchFunc=="function",()=>"Must pass a function that matches the signature of `fetch` (see https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)"),this.fetch=t.fetchFunc):this.fetch=C().platform.fetch,k(e!=null&&e.length>0,()=>"URL path for http must not be null, undefined or empty."),Array.isArray(e)&&k(e.length===2,()=>`URL paths for http must have a length of 2, (actual length is ${e.length}).`),this.path=e,t.requestInit!=null&&t.requestInit.body!=null)throw new Error("requestInit is expected to have no pre-existing body, but has one.");this.requestInit=t.requestInit||{}}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserHTTPRequest.save() does not support saving model topology in binary formats yet.");const t=Object.assign({method:this.DEFAULT_METHOD},this.requestInit);t.body=new FormData;const n=[{paths:["./model.weights.bin"],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,userDefinedMetadata:e.userDefinedMetadata,weightsManifest:n};t.body.append("model.json",new Blob([JSON.stringify(s)],{type:Sk}),"model.json"),e.weightData!=null&&t.body.append("model.weights.bin",new Blob([e.weightData],{type:Lk}),"model.weights.bin");const i=await this.fetch(this.path,t);if(i.ok)return{modelArtifactsInfo:Bl(e),responses:[i]};throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ${i.status}.`)}async load(){const e=await this.fetch(this.path,this.requestInit);if(!e.ok)throw new Error(`Request to ${this.path} failed with status code ${e.status}. Please verify this URL points to the model JSON of the model to load.`);let t;try{t=await e.json()}catch(b){let w=`Failed to parse model JSON of response from ${this.path}.`;throw this.path.endsWith(".pb")?w+=" Your path contains a .pb file extension. Support for .pb models have been removed in TensorFlow.js 1.0 in favor of .json models. You can re-convert your Python TensorFlow model using the TensorFlow.js 1.0 conversion scripts or you can convert your.pb models with the 'pb2json'NPM script in the tensorflow/tfjs-converter repository.":w+=" Please make sure the server is serving valid JSON for this request.",new Error(w)}const n=t.modelTopology,s=t.weightsManifest,i=t.generatedBy,o=t.convertedBy,a=t.format,c=t.userDefinedMetadata;if(n==null&&s==null)throw new Error(`The JSON from HTTP path ${this.path} contains neither model topology or manifest for weights.`);let h,d;if(s!=null){const b=await this.loadWeights(s);[h,d]=b}const m={modelTopology:n,weightSpecs:h,weightData:d,userDefinedMetadata:c,generatedBy:i,convertedBy:o,format:a},y=t.modelInitializer;return y&&(m.modelInitializer=y),m}async loadWeights(e){const t=Array.isArray(this.path)?this.path[1]:this.path,[n,s]=Ik(t),i=this.weightPathPrefix||n,o=[];for(const d of e)o.push(...d.weights);const a=[],c=[];for(const d of e)for(const m of d.paths)this.weightUrlConverter!=null?c.push(this.weightUrlConverter(m)):a.push(i+m+s);this.weightUrlConverter&&a.push(...await Promise.all(c));const h=await oT(a,{requestInit:this.requestInit,fetchFunc:this.fetch,onProgress:this.onProgress});return[o,dd(h)]}}Ty.URL_SCHEME_REGEX=/^https?:\/\//;function Ik(e){const t=e.lastIndexOf("/"),n=e.lastIndexOf("?"),s=e.substring(0,t),i=n>t?e.substring(n):"";return[s+"/",i]}function Ay(e){return e.match(Ty.URL_SCHEME_REGEX)!=null}const lT=(e,t)=>{if(typeof fetch=="undefined"&&(t==null||t.fetchFunc==null))return null;{let n=!0;if(Array.isArray(e)?n=e.every(s=>Ay(s)):n=Ay(e),n)return vy(e,t)}return null};Xt.registerSaveRouter(lT),Xt.registerLoadRouter(lT);function vy(e,t){return new Ty(e,t)}function fd(e,t){return vy(e,t)}class Ny{constructor(e){this.modelArtifacts=e}async load(){return this.modelArtifacts}}class xk{constructor(e){this.saveHandler=e}async save(e){return this.saveHandler(e)}}function Tk(e,t,n,s){if(arguments.length===1){const i=e.modelTopology!=null||e.weightSpecs!=null;return i?new Ny(e):(console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Ny({modelTopology:e}))}else return console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Ny({modelTopology:e,weightSpecs:t,weightData:n,trainingConfig:s})}function Ak(e){return new xk(e)}var vk=Object.freeze({__proto__:null,browserFiles:wk,browserHTTPRequest:fd,concatenateArrayBuffers:dd,decodeWeights:ud,encodeWeights:yy,fromMemory:Tk,getLoadHandlers:Ly,getModelArtifactsInfoForJSON:Bl,getSaveHandlers:wy,http:vy,isHTTPScheme:Ay,loadWeights:aT,registerLoadRouter:HD,registerSaveRouter:YD,weightsLoaderFactory:cT,withSaveHandler:Ak,copyModel:rk,listModels:sk,moveModel:ok,removeModel:ik});function Nk(e,t){const n=W(e,"x","reshape",null),s={x:n},i={shape:t},o=(a,c)=>(t=id(t,n.size),k(n.size===we(t),()=>"new shape and old shape must have the same number of elements."),c([n]),a.reshape(n,t));return V.runKernelFunc(o,s,null,yl,i)}const K=P({reshape_:Nk});function Ck(e,t,n=!1,s=!1){let i=W(e,"a","matMul"),o=W(t,"b","matMul");[i,o]=Bt(i,o),k(i.rank>=2&&o.rank>=2&&i.rank===o.rank,()=>`Error in matMul: inputs must have the same rank of at least 2, got ranks ${i.rank} and ${o.rank}.`);const a=n?i.shape[i.rank-2]:i.shape[i.rank-1],c=s?o.shape[o.rank-1]:o.shape[o.rank-2],h=n?i.shape[i.rank-1]:i.shape[i.rank-2],d=s?o.shape[o.rank-2]:o.shape[o.rank-1],m=i.shape.slice(0,-2),y=o.shape.slice(0,-2),b=we(m),w=we(y);k(ot(m,y),()=>`Error in matMul: outer dimensions (${m}) and (${y}) of Tensors with shapes ${i.shape} and ${o.shape} must match.`),k(a===c,()=>`Error in matMul: inner shapes (${a}) and (${c}) of Tensors with shapes ${i.shape} and ${o.shape} and transposeA=${n} and transposeB=${s} must match.`);const L=i.shape.slice(0,-2).concat([h,d]),T=n?K(i,[b,a,h]):K(i,[b,h,a]),A=s?K(o,[w,d,c]):K(o,[w,c,d]),N=(_,B)=>(B([T,A]),_.batchMatMul(T,A,n,s)),E={a:T,b:A},D={transposeA:n,transposeB:s},F=V.runKernelFunc(N,E,null,ag,D);return K(F,L)}const at=P({matMul_:Ck});function Rk(e,t,n=1,s=0){if(t<2)throw new Error(`Error in oneHot: depth must be >=2, but it is ${t}`);const i=W(e,"indices","oneHot","int32"),o=[...i.shape,t],a=(d,m)=>(m([i]),K(d.oneHot(K(i,[i.size]),t,n,s),o)),c={indices:i},h={depth:t,onValue:n,offValue:s};return V.runKernelFunc(a,c,null,Dg,h)}const fo=P({oneHot_:Rk});function Ok(e,t){const n=W(e,"x","transpose");if(t==null&&(t=n.shape.map((o,a)=>a).reverse()),k(n.rank===t.length,()=>`Error in transpose: rank of input ${n.rank} must match length of perm ${t}.`),t.forEach(o=>{k(o>=0&&o`All entries in 'perm' must be between 0 and ${n.rank-1} but got ${t}`)}),n.rank<=1)return n.clone();const s={x:n},i={perm:t};return V.runKernelFunc(o=>o.transpose(n,t),s,null,Cl,i)}const Me=P({transpose_:Ok});function Ek(e,t,n){const s=W(e,"labels","confusionMatrix"),i=W(t,"predictions","confusionMatrix");k(n==null||n>0&&Number.isInteger(n),()=>`If provided, numClasses must be a positive integer, but got ${n}`),k(s.rank===1,()=>`Expected the rank of labels to be 1, but got ${s.rank}`),k(i.rank===1,()=>`Expected the rank of predictions to be 1, but got ${i.rank}`),k(s.shape[0]===i.shape[0],()=>`Mismatch in the number of examples: ${s.shape[0]} vs. ${i.shape[0]}. Labels and predictions should have the same number of elements.`),k(n>0&&Number.isInteger(n),()=>`numClasses is required to be a positive integer, but got ${n}`);const o=fo(ve(s,"int32"),n),a=fo(ve(i,"int32"),n),c=Me(o);return ve(at(c,a),"int32")}const Dk=P({confusionMatrix_:Ek});var kk=Object.freeze({__proto__:null,confusionMatrix:Dk});function hT(e,t,n){if(ao(e),t!=null&&t.length!==3)throw new Error("tensor3d() requires shape to have three numbers");const s=Ii(e,n);if(s.length!==3&&s.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return vr(e,t,s,n)}let Aa;function Fk(e,t=3){if(t>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(e==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let n=!1,s=!1,i=!1,o=!1,a=!1;if(e.data instanceof Uint8Array)n=!0;else if(typeof ImageData!="undefined"&&e instanceof ImageData)s=!0;else if(typeof HTMLVideoElement!="undefined"&&e instanceof HTMLVideoElement)i=!0;else if(typeof HTMLImageElement!="undefined"&&e instanceof HTMLImageElement)o=!0;else if(e.getContext!=null)a=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${e.constructor.name}`);if(i){const w=2;if(i&&e.readyState element.")}const c=Zg(Qu,V.backendName);if(c!=null){const w={pixels:e},L={numChannels:t};return V.runKernel(Qu,w,L)}const[h,d]=i?[e.videoWidth,e.videoHeight]:[e.width,e.height];let m;a?m=e.getContext("2d").getImageData(0,0,h,d).data:s||n?m=e.data:(o||i)&&(Aa==null&&(Aa=document.createElement("canvas").getContext("2d")),Aa.canvas.width=h,Aa.canvas.height=d,Aa.drawImage(e,0,0,h,d),m=Aa.getImageData(0,0,h,d).data);let y;if(t===4)y=new Int32Array(m);else{const w=h*d;y=new Int32Array(w*t);for(let L=0;L4||o===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${o}`);if(n.dtype!=="float32"&&n.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${n.dtype}. Please use float32 or int32 tensors.`);const a=await n.data(),c=n.dtype==="float32"?255:1,h=new Uint8ClampedArray(i*s*4);for(let d=0;d1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${w}.`)}else if(n.dtype==="int32"&&(w<0||w>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${w}.`);o===1?(m[0]=w*c,m[1]=w*c,m[2]=w*c):m[b]=w*c}const y=d*4;h[y+0]=Math.round(m[0]),h[y+1]=Math.round(m[1]),h[y+2]=Math.round(m[2]),h[y+3]=Math.round(m[3])}if(t!=null){t.width=i,t.height=s;const d=t.getContext("2d"),m=new ImageData(h,i,s);d.putImageData(m,0,0)}return n!==e&&n.dispose(),h}const uT=P({fromPixels_:Fk});var Wk=Object.freeze({__proto__:null,toPixels:_k,fromPixels:uT});function gd(e,t){if(e.rank<1)throw new Error(`tf.gatherND() expects the input to be rank 1 or higher, but the rank was ${e.rank}.`);if(t.rank<1)throw new Error(`tf.gatherND() expects the indices to be rank 1 or higher, but the rank was ${t.rank}.`);if(t.dtype!=="int32")throw new Error(`tf.gatherND() expects the indices to be int32 type, but the dtype was ${t.dtype}.`);if(t.shape[t.rank-1]>e.rank)throw new Error(`index innermost dimension length must be <= tensor rank; saw: ${t.shape[t.rank-1]} vs. ${e.rank}`);if(e.size===0)throw new Error(`Requested more than 0 entries, but input is empty. Input shape: ${e.shape}.`);const n=t.shape,s=n[n.length-1];let i=1;for(let d=0;dd/c),1].slice(0,s);return[a,i,c,h]}var $k=Object.freeze({__proto__:null,prepareAndValidate:gd});function Cy(e,t,n){const s=t.rank>1?t.shape[t.rank-1]:1,i=t.rank>1?t.rank-1:1,o=`Must have updates.shape = indices.shape[:batchDim] + shape[sliceDim:], got updates.shape: ${n.shape}, indices.shape: ${t.shape}, shape: ${e}, sliceDim: ${s}, and batchDim: ${i}.`;if(n.rank1?t.shape[s-1]:1,o=n.length;let a=1;for(let y=i;y`Error in slice${s}D: Length of begin ${t} must match the rank of the array (${s}).`),k(s===n.length,()=>`Error in slice${s}D: Length of size ${n} must match the rank of the array (${s}).`);for(let i=0;i`Error in slice${s}D: begin[${i}] + size[${i}] (${t[i]+n[i]}) would overflow input.shape[${i}] (${e.shape[i]})`)}function yd(e){const t=[];let n=0;for(;e>0;)e&1&&t.push(n),e/=2,n++;return t}function bd(e,t,n){const s=[];for(let i=0;i0){const w=t[0],L=n+1;m=gT(a,w,L,s,e),y=yT(c,w,L,i,e),b=dT(o,w,L,e)}else for(let w=0;w-1)o[c]=0;else{const h=pT(t,n,c);let d=s[h];e&1<-1)o[c]=Number.MAX_SAFE_INTEGER;else{const h=pT(t,n,c);let d=s[h];e&1<0?a=Number.MIN_SAFE_INTEGER:a=Number.MAX_SAFE_INTEGER);const h=s[i];return a<0&&(a+=h),a=El(0,a,h-1),a}function LT(e,t,n,s,i,o){let a=t[i];const c=n[i]||1;(e&1<0?a=Number.MAX_SAFE_INTEGER:a=Number.MIN_SAFE_INTEGER);const h=s[i];return a<0&&(a+=h),c>0?a=El(0,a,h):a=El(-1,a,h-1),a}function Ey(e,t,n){let s=n.length;for(let i=0;i1){s=i;break}for(let i=s+1;i0||n[i]!==e[i])return!1;return!0}function Dy(e,t){let n=e.length>0?e[e.length-1]:1;for(let s=0;s{k(a!==-1,()=>"slice() does not support negative begin indexing.")});let o;return n==null?o=new Array(i).fill(-1):typeof n=="number"?o=[n,...new Array(i-1).fill(-1)]:n.lengtha>=0?a:(k(a===-1,()=>`Negative size values should be exactly -1 but got ${a} for the slice() size at index ${c}.`),e.shape[c]-s[c])),[s,o]}var ST=Object.freeze({__proto__:null,assertParamsValid:Oy,maskToAxes:yd,computeOutShape:bd,stridesWithElidedDims:dT,getNormalizedAxes:fT,startIndicesWithElidedDims:gT,stopIndicesWithElidedDims:yT,stridesForAxis:bT,startForAxis:wT,stopForAxis:LT,isSliceContinous:Ey,computeFlatOffset:Dy,parseSliceParams:wd});class go{getClassName(){return this.constructor.className}static fromConfig(e,t){return new e(t)}}class ks{constructor(){this.classNameMap={}}static getMap(){return ks.instance==null&&(ks.instance=new ks),ks.instance}static register(e){ks.getMap().classNameMap[e.className]=[e,e.fromConfig]}}function me(e){k(e.className!=null,()=>"Class being registered does not have the static className property defined."),k(typeof e.className=="string",()=>"className is required to be a string, but got type "+typeof e.className),k(e.className.length>0,()=>"Class being registered has an empty-string as its className, which is disallowed."),ks.register(e)}var Bk=Object.freeze({__proto__:null,Serializable:go,SerializationMap:ks,registerClass:me});const Mk=.001,IT=.1;function Pk(e,t,n){return n==null&&(n=Ld()),ky(e,t,(s,i)=>_y(s,i,n))}function Ld(){return V.backend.floatPrecision()===32?Mk:IT}function ky(e,t,n){let s=!0;if((wn(e)||wn(t))&&(s=!1),wn(e)&&wn(t)&&(s=!0),s){const a=e.constructor.name,c=t.constructor.name;if(a!==c)throw new Error(`Arrays are of different type. Actual: ${a}. Expected: ${c}`)}if(Array.isArray(e)&&Array.isArray(t)){const a=Ii(e),c=Ii(t);if(!ot(a,c))throw new Error(`Arrays have different shapes. Actual: [${a}]. Expected: [${c}]`)}const i=wn(e)?e:Yi(e),o=wn(t)?t:Yi(t);if(i.length!==o.length)throw new Error(`Arrays have different lengths actual: ${i.length} vs expected: ${o.length}. +`)}function rk(e,t,n,s){const i=P(t),o=s[s.length-1],a=new Array(o).fill(0),c=t.length,h=n==="complex64"?Zl(e):e;if(c>1)for(let d=0;dsT){const v=Xl*a;let N=Array.from(e.slice(0,v)),O=Array.from(e.slice((c-Xl)*a,c*a));return n==="complex64"&&(N=Zl(N),O=Zl(O)),["["+N.map((E,k)=>Jl(E,i[k],n)).join(", ")+", ..., "+O.map((E,k)=>Jl(E,i[c-Xl+k],n)).join(", ")+"]"]}const x=n==="complex64"?Zl(e):Array.from(e);return["["+x.map((v,N)=>Jl(v,i[N],n)).join(", ")+"]"]}const d=t.slice(1),m=s.slice(1),f=s[0]*a,b=[];if(c>sT){for(let x=0;x`Length of values '${s}' does not match the size inferred by the shape '${this.size}'.`)}if(t==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=n||ws(t,this.size),this.strides=je(e)}set(e,...t){t.length===0&&(t=[0]),A(t.length===this.rank,()=>`The number of provided coordinates (${t.length}) must match the rank (${this.rank})`);const n=this.locToIndex(t);this.values[n]=e}get(...e){e.length===0&&(e=[0]);let t=0;for(const s of e){if(s<0||s>=this.shape[t]){const i=`Requested out of range element at ${e}. Buffer shape=${this.shape}`;throw new Error(i)}t++}let n=e[e.length-1];for(let s=0;sKl(n))}catch(n){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return e}dataSync(){this.throwIfDisposed();const e=Si().readSync(this.dataId);if(this.dtype==="string")try{return e.map(t=>Kl(t))}catch(t){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return e}async bytes(){this.throwIfDisposed();const e=await Si().read(this.dataId);return this.dtype==="string"?e:new Uint8Array(e.buffer)}dispose(){if(this.isDisposed)return;Si().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(e=!1){return Oa.print(this,e)}clone(){return this.throwIfDisposed(),Oa.clone(this)}toString(e=!1){const t=this.dataSync();return ik(t,this.shape,this.dtype,e)}cast(e){return this.throwIfDisposed(),Oa.cast(this,e)}variable(e=!0,t,n){return this.throwIfDisposed(),Si().makeVariable(this,e,t,n)}}Object.defineProperty(ee,Symbol.hasInstance,{value:e=>!!e&&e.data!=null&&e.dataSync!=null&&e.throwIfDisposed!=null});class Ql extends ee{constructor(e,t,n,s){super(e.shape,e.dtype,e.dataId,s);this.trainable=t,this.name=n}assign(e){if(e.dtype!==this.dtype)throw new Error(`dtype of the new value (${e.dtype}) and previous value (${this.dtype}) must match`);if(!ae(e.shape,this.shape))throw new Error(`shape of the new value (${e.shape}) and previous value (${this.shape}) must match`);Si().disposeTensor(this),this.dataId=e.dataId,Si().incRef(this,null)}dispose(){Si().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty(Ql,Symbol.hasInstance,{value:e=>e instanceof ee&&e.assign!=null&&e.assign instanceof Function});(function(e){e.R0="R0",e.R1="R1",e.R2="R2",e.R3="R3",e.R4="R4",e.R5="R5",e.R6="R6"})(r.Rank||(r.Rank={}));var Fy;(function(e){e.float32="float32",e.int32="int32",e.bool="int32",e.complex64="complex64"})(Fy||(Fy={}));var _y;(function(e){e.float32="float32",e.int32="int32",e.bool="bool",e.complex64="complex64"})(_y||(_y={}));var Wy;(function(e){e.float32="float32",e.int32="float32",e.bool="float32",e.complex64="complex64"})(Wy||(Wy={}));var $y;(function(e){e.float32="complex64",e.int32="complex64",e.bool="complex64",e.complex64="complex64"})($y||($y={}));const lk={float32:Wy,int32:Fy,bool:_y,complex64:$y};function $n(e,t){if(e==="string"||t==="string"){if(e==="string"&&t==="string")return"string";throw new Error(`Can not upcast ${e} with ${t}`)}return lk[e][t]}function Ud(e){return $n(e,"int32")}function Gt(e,t){if(e.dtype===t.dtype)return[e,t];const n=$n(e.dtype,t.dtype);return[e.cast(n),t.cast(n)]}function oT(e,t){A(e.dtype===t.dtype,()=>`The dtypes of the first(${e.dtype}) and second(${t.dtype}) input must match`)}function Bd(e,t){return t.some(n=>n.id===e.id)}function Hi(e){const t=[],n=new Set;return aT(e,t,n),t}function aT(e,t,n){if(e==null)return;if(e instanceof ee){t.push(e);return}if(!hk(e))return;const s=e;for(const i in s){const o=s[i];n.has(o)||(n.add(o),aT(o,t,n))}}function hk(e){return Array.isArray(e)||typeof e=="object"}var uk=Object.freeze({__proto__:null,makeTypesMatch:Gt,assertTypesMatch:oT,isTensorInList:Bd,getTensorsInContainer:Hi});class cT{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(const e in this.registeredVariables)this.registeredVariables[e].dispose()}}class eh{constructor(e){this.ENV=e,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new cT}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;const e=this.getSortedBackends();for(let t=0;t{t.setupFunc!=null&&t.setupFunc(this.backendInstance)})}disposeRegisteredKernels(e){const t=Fd(e);t.forEach(n=>{n.disposeFunc!=null&&n.disposeFunc(this.registry[e])})}initializeBackend(e){const t=this.registryFactory[e];if(t==null)throw new Error(`Cannot initialize backend ${e}, no registration found.`);try{const n=t.factory();if(n&&!(n instanceof y)&&typeof n.then=="function"){const s=++this.pendingBackendInitId,i=n.then(o=>s(sthis.registryFactory[t].priority-this.registryFactory[e].priority)}initializeBackendsAndReturnBest(){const e=this.getSortedBackends();for(let t=0;tthis.startScope(n),()=>this.endScope(s),()=>(s=t(),s instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),s))}scopedRun(e,t,n){e();try{const s=n();return t(),s}catch(s){throw t(),s}}nextTensorId(){return eh.nextTensorId++}nextVariableId(){return eh.nextVariableId++}clone(e){const t=this.makeTensorFromDataId(e.dataId,e.shape,e.dtype),n={x:e},s=o=>({x:()=>{const a="float32",c={x:o},h={dtype:a};return G.runKernelFunc(d=>d.cast(o,a),c,null,Sa,h)}}),i=[];return this.addTapeNode(this.state.activeScope.name,n,[t],s,i,{}),t}runKernel(e,t,n,s,i){const o=null,a=null;return this.runKernelFunc(o,t,a,e,n,s,i)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(e,t,n){const s=this.backend.numDataIds();let i=0;n.forEach(c=>{i+=c.dtype==="complex64"?3:1});const o=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],a=s-t-i-o;if(a>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${a} data ids) after running '${e}'`)}runKernelFunc(e,t,n,s,i,o,a){let c,h=[];const d=this.isTapeOn();s==null&&(s=this.state.activeScope!=null?this.state.activeScope.name:"");const m=this.state.numBytes,f=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let b;const w=Oy(s,this.backendName);let L;if(w!=null)b=()=>{const v=this.backend.numDataIds();L=w.kernelFunc({inputs:t,attrs:i,backend:this.backend});const N=Array.isArray(L)?L:[L];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(s,v,N);const O=N.map(({dataId:E,shape:k,dtype:F})=>this.makeTensorFromDataId(E,k,F));if(d){let E=this.getTensorsForGradient(s,t,O);if(E==null){a==null&&(a=[]);const k=O.filter((F,U)=>a[U]);E=(o||[]).slice().concat(k)}h=this.saveTensorsForBackwardMode(E)}return O};else{const v=N=>{if(!d)return;h=N.map(O=>this.keep(this.clone(O)))};b=()=>{const N=this.backend.numDataIds();L=this.tidy(()=>e(this.backend,v));const O=Array.isArray(L)?L:[L];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(s,N,O),O}}let x;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?c=b():(x=this.profiler.profileKernel(s,t,()=>b()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(x),c=x.outputs)}),d&&this.addTapeNode(s,t,c,n,h,i),this.state.profiling&&this.state.activeProfile.kernels.push({name:s,bytesAdded:this.state.numBytes-m,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-f,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(t).map(v=>t[v]!=null?t[v].shape:null),outputShapes:c.map(v=>v.shape),kernelTimeMs:x.timeMs,extraInfo:x.extraInfo}),Array.isArray(L)?c:c[0]}saveTensorsForBackwardMode(e){const t=e.map(n=>this.keep(this.clone(n)));return t}getTensorsForGradient(e,t,n){const s=Ey(e);if(s!=null){const i=s.inputsToSave||[],o=s.outputsToSave||[];let a;s.saveAllInputs?(A(Array.isArray(t),()=>"saveAllInputs is true, expected inputs to be an array."),a=Object.keys(t).map(h=>t[h])):a=i.map(h=>t[h]);const c=n.filter((h,d)=>o[d]);return a.concat(c)}return null}makeTensor(e,t,n,s){if(e==null)throw new Error("Values passed to engine.makeTensor() are null");n=n||"float32",s=s||this.backend;let i=e;n==="string"&&Yi(e[0])&&(i=e.map(c=>Wd(c)));const o=s.write(i,t,n),a=new ee(t,n,o,this.nextTensorId());if(this.incRef(a,s),n==="string"){const c=this.state.tensorInfo.get(o),h=Ix(i);this.state.numBytes+=h-c.bytes,c.bytes=h}return a}makeTensorFromDataId(e,t,n,s){n=n||"float32";const i=new ee(t,n,e,this.nextTensorId());return this.incRef(i,s),i}makeVariable(e,t=!0,n,s){n=n||this.nextVariableId().toString(),s!=null&&s!==e.dtype&&(e=e.cast(s));const i=new Ql(e,t,n,this.nextTensorId());if(this.state.registeredVariables[i.name]!=null)throw new Error(`Variable with name ${i.name} was already registered`);return this.state.registeredVariables[i.name]=i,this.incRef(i,this.backend),i}incRef(e,t){const n=this.state.tensorInfo.has(e.dataId)?this.state.tensorInfo.get(e.dataId).refCount:0;if(this.state.numTensors++,e.dtype==="string"&&this.state.numStringTensors++,n===0){this.state.numDataBuffers++;let s=0;e.dtype!=="complex64"&&e.dtype!=="string"&&(s=e.size*Bg(e.dtype)),this.state.tensorInfo.set(e.dataId,{backend:t||this.backend,dtype:e.dtype,shape:e.shape,bytes:s,refCount:0}),this.state.numBytes+=s}this.state.tensorInfo.get(e.dataId).refCount++,e instanceof Ql||this.track(e)}disposeTensor(e){if(!this.state.tensorInfo.has(e.dataId))return;this.state.numTensors--,e.dtype==="string"&&this.state.numStringTensors--;const t=this.state.tensorInfo.get(e.dataId),n=t.refCount;n<=1?(e.dtype!=="complex64"&&(this.state.numBytes-=t.bytes),this.state.numDataBuffers--,t.backend.disposeData(e.dataId),this.state.tensorInfo.delete(e.dataId)):this.state.tensorInfo.get(e.dataId).refCount--}disposeVariables(){for(const e in this.state.registeredVariables){const t=this.state.registeredVariables[e];this.disposeVariable(t)}}disposeVariable(e){this.disposeTensor(e),this.state.registeredVariables[e.name]!=null&&delete this.state.registeredVariables[e.name]}memory(){const e=this.backend.memory();return e.numTensors=this.state.numTensors,e.numDataBuffers=this.state.numDataBuffers,e.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(e.unreliable=!0,e.reasons==null&&(e.reasons=[]),e.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),e}async profile(e){this.state.profiling=!0;const t=this.state.numBytes,n=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await e(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(s=>s.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-t,this.state.activeProfile.newTensors=this.state.numTensors-n;for(const s of this.state.activeProfile.kernels)s.kernelTimeMs=await s.kernelTimeMs,s.extraInfo=await s.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(e,t,n,s,i,o){const a={id:this.state.nextTapeNodeId++,kernelName:e,inputs:t,outputs:n,saved:i},c=Ey(e);c!=null&&(s=c.gradFunc),s!=null&&(a.gradient=h=>(h=h.map((d,m)=>{if(d==null){const f=n[m],b=La(f.size,f.dtype);return this.makeTensor(b,f.shape,f.dtype)}return d}),s(h.length>1?h:h[0],i,o))),this.state.activeTape.push(a)}keep(e){return e.kept=!0,e}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(e){const t={track:[],name:"unnamed scope",id:this.state.nextScopeId++};e&&(t.name=e),this.state.scopeStack.push(t),this.state.activeScope=t}endScope(e){const t=Hi(e),n=new Set(t.map(i=>i.id));for(let i=0;i{!i.kept&&i.scopeId===s.id&&this.track(i)})}gradients(e,t,n,s=!1){if(A(t.length>0,()=>"gradients() received an empty list of xs."),n!=null&&n.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${n.dtype}'`);const i=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",e));A(i instanceof ee,()=>"The result y returned by f() must be a tensor.");const o=nk(this.state.activeTape,t,i);if(!s&&o.length===0&&t.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{const a={};a[i.id]=n==null?dk(i.shape):n,sk(a,o,h=>this.tidy(h),pk);const c=t.map(h=>a[h.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(h=>{for(const d of h.saved)d.dispose()}),this.state.activeTape=null),{value:i,grads:c}})}customGrad(e){return A(Rr(e),()=>"The f passed in customGrad(f) must be a function."),(...t)=>{A(t.every(i=>i instanceof ee),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let n;const s={};return t.forEach((i,o)=>{s[o]=i}),this.runKernelFunc((i,o)=>(n=e(...t,o),A(n.value instanceof ee,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),A(Rr(n.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),n.value),s,(i,o)=>{const a=n.gradFunc(i,o),c=Array.isArray(a)?a:[a];A(c.length===t.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),A(c.every(d=>d instanceof ee),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");const h={};return c.forEach((d,m)=>{h[m]=()=>d}),h})}}readSync(e){const t=this.state.tensorInfo.get(e);return t.backend.readSync(e)}read(e){const t=this.state.tensorInfo.get(e);return t.backend.read(e)}async time(e){const t=jn(),n=await this.backend.time(e);return n.wallMs=jn()-t,n}track(e){return this.state.activeScope!=null&&(e.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(e)),e}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new cT;for(const e in this.registry)this.disposeRegisteredKernels(e),this.registry[e].dispose(),delete this.registry[e];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}eh.nextTensorId=0,eh.nextVariableId=0;function dk(e){const t=Mg(P(e),"float32");return G.makeTensor(t,e,"float32")}function lT(){const e=Nx();if(e._tfengine==null){const t=new vx(e);e._tfengine=new eh(t)}return YD(e._tfengine.ENV),ok(()=>e._tfengine),e._tfengine}const G=lT();function pk(e,t){const n={a:e,b:t};return G.runKernelFunc((s,i)=>{const o=s.add(e,t);return i([e,t]),o},n,null,wo)}function mk(){return typeof navigator!="undefined"&&navigator!=null}function hT(){if(mk()){const e=navigator.userAgent||navigator.vendor||window.opera;return/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i.test(e)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(e.substr(0,4))}return!1}function Uy(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}var fk=Object.freeze({__proto__:null,isMobile:hT,isBrowser:Uy});const qi=oe();qi.registerFlag("DEBUG",()=>!1,e=>{e&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")}),qi.registerFlag("IS_BROWSER",()=>Uy()),qi.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined"),qi.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)),qi.registerFlag("PROD",()=>!1),qi.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>qi.getBool("DEBUG")),qi.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0),qi.registerFlag("IS_TEST",()=>!1);function Ii(e,t){let n=e;if(hn(e))return t==="string"?[]:[e.length];if(!Array.isArray(e))return[];const s=[];for(;Array.isArray(n)||hn(n)&&t!=="string";)s.push(n.length),n=n[0];return Array.isArray(e)&&oe().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&&uT(e,s,[]),s}function uT(e,t,n){if(n=n||[],!Array.isArray(e)&&!hn(e)){A(t.length===0,()=>`Element arr[${n.join("][")}] is a primitive, but should be an array/TypedArray of ${t[0]} elements`);return}A(t.length>0,()=>`Element arr[${n.join("][")}] should be a primitive, but is an array of ${e.length} elements`),A(e.length===t[0],()=>`Element arr[${n.join("][")}] should have ${t[0]} elements, but has ${e.length} elements`);const s=t.slice(1);for(let i=0;i=0&&(i=s),dT(s,i,t,n),e==null||!hn(e)&&!Array.isArray(e)&&typeof e!="number"&&typeof e!="boolean"&&typeof e!="string"){const h=e==null?"null":e.constructor.name;throw new Error(`Argument '${t}' passed to '${n}' must be a Tensor or TensorLike, but got '${h}'`)}const o=Ii(e,i);!hn(e)&&!Array.isArray(e)&&(e=[e]);const a=!0,c=i!=="string"?Or(e,i):te(e,[],a);return G.makeTensor(c,o,i)}function th(e,t,n,s="numeric"){if(!Array.isArray(e))throw new Error(`Argument ${t} passed to ${n} must be a \`Tensor[]\` or \`TensorLike[]\``);const i=e;return i.map((o,a)=>W(o,`${t}[${a}]`,n),s)}const pT="__op";function z(e){const t=Object.keys(e);if(t.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${t.length} keys.`);let n=t[0];const s=e[n];n.endsWith("_")&&(n=n.substring(0,n.length-1)),n=n+pT;const i=(...o)=>{G.startScope(n);try{const a=s(...o);return bo(a)&&console.error("Cannot return a Promise inside of tidy."),G.endScope(a),a}catch(a){throw G.endScope(null),a}};return Object.defineProperty(i,"name",{value:n,configurable:!0}),i}function gk(e,t){const n=W(e,"real","complex"),s=W(t,"imag","complex");B(n.shape,s.shape,`real and imag shapes, ${n.shape} and ${s.shape}, must match in call to tf.complex().`);const i=a=>a.complex(n,s),o={real:n,imag:s};return G.runKernelFunc(i,o,null,rd)}const ji=z({complex_:gk});function Er(e,t,n,s){if(s==null&&(s=wa(e)),s==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!hn(e)&&!Array.isArray(e)&&typeof e!="number"&&typeof e!="boolean"&&typeof e!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(t!=null){zg(t);const i=P(t),o=P(n);A(i===o,()=>`Based on the provided shape, [${t}], the tensor should have ${i} values but has ${o}`);for(let a=0;a`Error creating a new Tensor. Inferred shape (${n}) does not match the provided shape (${t}). `)}}return!hn(e)&&!Array.isArray(e)&&(e=[e]),t=t||n,e=s!=="string"?Or(e,s):te(e,[],!0),G.makeTensor(e,t,s)}function sn(e,t,n){const s=Ii(e,n);return Er(e,t,s,n)}const By={float32:4,float16:2,int32:4,uint16:2,uint8:1,bool:1,complex64:8};const Md=4;async function My(e,t){const n=[],s=[],i=Array.isArray(e)?e.map(a=>a.name):Object.keys(e);for(let a=0;a{const b=await h.bytes(),w=b.reduce((v,N)=>v+N.length,0)+Md*b.length,L=new Uint8Array(w);let x=0;for(let v=0;v{if(t+=o.byteLength,n.push(o.byteLength===o.buffer.byteLength?o:new o.constructor(o)),!(o instanceof Float32Array||o instanceof Int32Array||o instanceof Uint8Array))throw new Error(`Unsupported TypedArray subtype: ${o.constructor.name}`)});const s=new Uint8Array(t);let i=0;return n.forEach(o=>{s.set(new Uint8Array(o.buffer),i),i+=o.byteLength}),s.buffer}const Py=typeof Buffer!="undefined"&&(typeof Blob=="undefined"||typeof atob=="undefined"||typeof btoa=="undefined");function mT(e){return Py?Buffer.byteLength(e):new Blob([e]).size}function bk(e){if(Py)return Buffer.from(e).toString("base64");const t=new Uint8Array(e);let n="";for(let s=0,i=t.length;s{t+=i.byteLength});const n=new Uint8Array(t);let s=0;return e.forEach(i=>{n.set(new Uint8Array(i),s),s+=i.byteLength}),n.buffer}function fT(e){const t="/";for(e=e.trim();e.endsWith(t);)e=e.slice(0,e.length-1);const n=e.split(t);return n[n.length-1]}function nh(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("Expected JSON model topology, received ArrayBuffer.");return{dateSaved:new Date,modelTopologyType:"JSON",modelTopologyBytes:e.modelTopology==null?0:mT(JSON.stringify(e.modelTopology)),weightSpecsBytes:e.weightSpecs==null?0:mT(JSON.stringify(e.weightSpecs)),weightDataBytes:e.weightData==null?0:e.weightData.byteLength}}function Lk(){const e=n=>{let s=n<<13,i=0;for(;(s&8388608)===0;)i-=8388608,s<<=1;return s&=~8388608,i+=947912704,s|i},t=new Uint32Array(2048);t[0]=0;for(let n=1;n<1024;n++)t[n]=e(n);for(let n=1024;n<2048;n++)t[n]=939524096+(n-1024<<13);return t}function Sk(){const e=new Uint32Array(64);e[0]=0,e[31]=1199570944,e[32]=2147483648,e[63]=3347054592;for(let t=1;t<31;t++)e[t]=t<<23;for(let t=33;t<63;t++)e[t]=2147483648+(t-32<<23);return e}function Ik(){const e=new Uint32Array(64);for(let t=0;t<64;t++)e[t]=1024;return e[0]=e[32]=0,e}function xk(){const e=Lk(),t=Sk(),n=Ik();return s=>{const i=new ArrayBuffer(4*s.length),o=new Uint32Array(i);for(let a=0;a>10]+(c&1023)]+t[c>>10];o[a]=h}return new Float32Array(i)}}class en{constructor(){this.saveRouters=[],this.loadRouters=[]}static getInstance(){return en.instance==null&&(en.instance=new en),en.instance}static registerSaveRouter(e){en.getInstance().saveRouters.push(e)}static registerLoadRouter(e){en.getInstance().loadRouters.push(e)}static getSaveHandlers(e){return en.getHandlers(e,"save")}static getLoadHandlers(e,t){return en.getHandlers(e,"load",t)}static getHandlers(e,t,n){const s=[],i=t==="load"?en.getInstance().loadRouters:en.getInstance().saveRouters;return i.forEach(o=>{const a=o(e,n);a!==null&&s.push(a)}),s}}const Tk=e=>en.registerSaveRouter(e),Ak=e=>en.registerLoadRouter(e),zy=e=>en.getSaveHandlers(e),Vy=(e,t)=>en.getLoadHandlers(e,t);const Vd="tensorflowjs",Gy=1,Lo="models_store",Dr="model_info_store";async function bee(){const e=Yy();return new Promise((t,n)=>{const s=e.deleteDatabase(Vd);s.onsuccess=()=>t(),s.onerror=i=>n(i)})}function Yy(){if(!oe().getBool("IS_BROWSER"))throw new Error("Failed to obtain IndexedDB factory because the current environmentis not a web browser.");const e=typeof window=="undefined"?self:window,t=e.indexedDB||e.mozIndexedDB||e.webkitIndexedDB||e.msIndexedDB||e.shimIndexedDB;if(t==null)throw new Error("The current browser does not appear to support IndexedDB.");return t}function Hy(e){const t=e.result;t.createObjectStore(Lo,{keyPath:"modelPath"}),t.createObjectStore(Dr,{keyPath:"modelPath"})}class So{constructor(e){if(this.indexedDB=Yy(),e==null||!e)throw new Error("For IndexedDB, modelPath must not be null, undefined or empty.");this.modelPath=e}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");return this.databaseAction(this.modelPath,e)}async load(){return this.databaseAction(this.modelPath)}databaseAction(e,t){return new Promise((n,s)=>{const i=this.indexedDB.open(Vd,Gy);i.onupgradeneeded=()=>Hy(i),i.onsuccess=()=>{const o=i.result;if(t==null){const a=o.transaction(Lo,"readonly"),c=a.objectStore(Lo),h=c.get(this.modelPath);h.onsuccess=()=>{if(h.result==null)return o.close(),s(new Error(`Cannot find model with path '${this.modelPath}' in IndexedDB.`));n(h.result.modelArtifacts)},h.onerror=d=>(o.close(),s(h.error)),a.oncomplete=()=>o.close()}else{const a=nh(t),c=o.transaction(Dr,"readwrite");let h=c.objectStore(Dr);const d=h.put({modelPath:this.modelPath,modelArtifactsInfo:a});let m;d.onsuccess=()=>{m=o.transaction(Lo,"readwrite");const f=m.objectStore(Lo),b=f.put({modelPath:this.modelPath,modelArtifacts:t,modelArtifactsInfo:a});b.onsuccess=()=>n({modelArtifactsInfo:a}),b.onerror=w=>{h=c.objectStore(Dr);const L=h.delete(this.modelPath);L.onsuccess=()=>(o.close(),s(b.error)),L.onerror=x=>(o.close(),s(b.error))}},d.onerror=f=>(o.close(),s(d.error)),c.oncomplete=()=>{m==null?o.close():m.oncomplete=()=>o.close()}}},i.onerror=o=>s(i.error)})}}So.URL_SCHEME="indexeddb://";const gT=e=>oe().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(So.URL_SCHEME))?vk(e.slice(So.URL_SCHEME.length)):null;en.registerSaveRouter(gT),en.registerLoadRouter(gT);function vk(e){return new So(e)}function Nk(e){return e.startsWith(So.URL_SCHEME)?e.slice(So.URL_SCHEME.length):e}class Ck{constructor(){this.indexedDB=Yy()}async listModels(){return new Promise((e,t)=>{const n=this.indexedDB.open(Vd,Gy);n.onupgradeneeded=()=>Hy(n),n.onsuccess=()=>{const s=n.result,i=s.transaction(Dr,"readonly"),o=i.objectStore(Dr),a=o.getAll();a.onsuccess=()=>{const c={};for(const h of a.result)c[h.modelPath]=h.modelArtifactsInfo;e(c)},a.onerror=c=>(s.close(),t(a.error)),i.oncomplete=()=>s.close()},n.onerror=s=>t(n.error)})}async removeModel(e){return e=Nk(e),new Promise((t,n)=>{const s=this.indexedDB.open(Vd,Gy);s.onupgradeneeded=()=>Hy(s),s.onsuccess=()=>{const i=s.result,o=i.transaction(Dr,"readwrite"),a=o.objectStore(Dr),c=a.get(e);let h;c.onsuccess=()=>{if(c.result==null)return i.close(),n(new Error(`Cannot find model with path '${e}' in IndexedDB.`));{const d=a.delete(e),m=()=>{h=i.transaction(Lo,"readwrite");const f=h.objectStore(Lo),b=f.delete(e);b.onsuccess=()=>t(c.result.modelArtifactsInfo),b.onerror=w=>n(c.error)};d.onsuccess=m,d.onerror=f=>(m(),i.close(),n(c.error))}},c.onerror=d=>(i.close(),n(c.error)),o.oncomplete=()=>{h==null?i.close():h.oncomplete=()=>i.close()}},s.onerror=i=>n(s.error)})}}const xi="/",Io="tensorflowjs_models",yT="info",Rk="model_topology",Ok="weight_specs",Ek="weight_data",Dk="model_metadata";function wee(){if(!oe().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("purgeLocalStorageModels() cannot proceed because local storage is unavailable in the current environment.");const e=window.localStorage,t=[];for(let n=0;ni.length){e.removeItem(s);const o=wT(s);t.indexOf(o)===-1&&t.push(o)}}return t}function bT(e){return{info:[Io,e,yT].join(xi),topology:[Io,e,Rk].join(xi),weightSpecs:[Io,e,Ok].join(xi),weightData:[Io,e,Ek].join(xi),modelMetadata:[Io,e,Dk].join(xi)}}function wT(e){const t=e.split(xi);if(t.length<3)throw new Error(`Invalid key format: ${e}`);return t.slice(1,t.length-1).join(xi)}function kk(e){return e.startsWith(xo.URL_SCHEME)?e.slice(xo.URL_SCHEME.length):e}class xo{constructor(e){if(!oe().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("The current environment does not support local storage.");if(this.LS=window.localStorage,e==null||!e)throw new Error("For local storage, modelPath must not be null, undefined or empty.");this.modelPath=e,this.keys=bT(this.modelPath)}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");{const t=JSON.stringify(e.modelTopology),n=JSON.stringify(e.weightSpecs),s=nh(e);try{return this.LS.setItem(this.keys.info,JSON.stringify(s)),this.LS.setItem(this.keys.topology,t),this.LS.setItem(this.keys.weightSpecs,n),this.LS.setItem(this.keys.weightData,bk(e.weightData)),this.LS.setItem(this.keys.modelMetadata,JSON.stringify({format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,userDefinedMetadata:e.userDefinedMetadata})),{modelArtifactsInfo:s}}catch(i){throw this.LS.removeItem(this.keys.info),this.LS.removeItem(this.keys.topology),this.LS.removeItem(this.keys.weightSpecs),this.LS.removeItem(this.keys.weightData),this.LS.removeItem(this.keys.modelMetadata),new Error(`Failed to save model '${this.modelPath}' to local storage: size quota being exceeded is a possible cause of this failure: modelTopologyBytes=${s.modelTopologyBytes}, weightSpecsBytes=${s.weightSpecsBytes}, weightDataBytes=${s.weightDataBytes}.`)}}}async load(){const e=JSON.parse(this.LS.getItem(this.keys.info));if(e==null)throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);if(e.modelTopologyType!=="JSON")throw new Error("BrowserLocalStorage does not support loading non-JSON model topology yet.");const t={},n=JSON.parse(this.LS.getItem(this.keys.topology));if(n==null)throw new Error(`In local storage, the topology of model '${this.modelPath}' is missing.`);t.modelTopology=n;const s=JSON.parse(this.LS.getItem(this.keys.weightSpecs));if(s==null)throw new Error(`In local storage, the weight specs of model '${this.modelPath}' are missing.`);t.weightSpecs=s;const i=this.LS.getItem(this.keys.modelMetadata);if(i!=null){const a=JSON.parse(i);t.format=a.format,t.generatedBy=a.generatedBy,t.convertedBy=a.convertedBy,t.userDefinedMetadata=a.userDefinedMetadata}const o=this.LS.getItem(this.keys.weightData);if(o==null)throw new Error(`In local storage, the binary weight values of model '${this.modelPath}' are missing.`);return t.weightData=wk(o),t}}xo.URL_SCHEME="localstorage://";const LT=e=>oe().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(xo.URL_SCHEME))?Fk(e.slice(xo.URL_SCHEME.length)):null;en.registerSaveRouter(LT),en.registerLoadRouter(LT);function Fk(e){return new xo(e)}class _k{constructor(){A(oe().getBool("IS_BROWSER"),()=>"Current environment is not a web browser"),A(typeof window=="undefined"||typeof window.localStorage!="undefined",()=>"Current browser does not appear to support localStorage"),this.LS=window.localStorage}async listModels(){const e={},t=Io+xi,n=xi+yT;for(let s=0;s"scheme must not be undefined or null."),e.endsWith(Ea)&&(e=e.slice(0,e.indexOf(Ea))),A(e.length>0,()=>"scheme must not be an empty string.");const n=Ss.getInstance();A(n.managers[e]==null,()=>`A model store manager is already registered for scheme '${e}'.`),n.managers[e]=t}static getManager(e){const t=this.getInstance().managers[e];if(t==null)throw new Error(`Cannot find model manager for scheme '${e}'`);return t}static getSchemes(){return Object.keys(this.getInstance().managers)}}function Gd(e){if(e.indexOf(Ea)===-1)throw new Error(`The url string provided does not contain a scheme. Supported schemes are: ${Ss.getSchemes().join(",")}`);return{scheme:e.split(Ea)[0],path:e.split(Ea)[1]}}async function ST(e,t,n=!1){A(e!==t,()=>`Old path and new path are the same: '${e}'`);const s=en.getLoadHandlers(e);A(s.length>0,()=>`Copying failed because no load handler is found for source URL ${e}.`),A(s.length<2,()=>`Copying failed because more than one (${s.length}) load handlers for source URL ${e}.`);const i=s[0],o=en.getSaveHandlers(t);A(o.length>0,()=>`Copying failed because no save handler is found for destination URL ${t}.`),A(o.length<2,()=>`Copying failed because more than one (${s.length}) save handlers for destination URL ${t}.`);const a=o[0],c=Gd(e).scheme,h=Gd(e).path,d=c===Gd(e).scheme,m=await i.load();n&&d&&await Ss.getManager(c).removeModel(h);const f=await a.save(m);return n&&!d&&await Ss.getManager(c).removeModel(h),f.modelArtifactsInfo}async function Wk(){const e=Ss.getSchemes(),t={};for(const n of e){const s=await Ss.getManager(n).listModels();for(const i in s){const o=n+Ea+i;t[o]=s[i]}}return t}async function $k(e){const t=Gd(e),n=Ss.getManager(t.scheme);return n.removeModel(t.path)}async function Uk(e,t){const n=!1;return ST(e,t,n)}async function Bk(e,t){const n=!0;return ST(e,t,n)}class Mk{fetch(e,t){return fetch(e,t)}now(){return performance.now()}encode(e,t){if(t!=="utf-8"&&t!=="utf8")throw new Error(`Browser's encoder only supports utf-8, but got ${t}`);return this.textEncoder==null&&(this.textEncoder=new TextEncoder),this.textEncoder.encode(e)}decode(e,t){return new TextDecoder(t).decode(e)}}if(oe().get("IS_BROWSER")){oe().setPlatform("browser",new Mk);try{Ss.registerManager(xo.URL_SCHEME,new _k)}catch(e){}try{Ss.registerManager(So.URL_SCHEME,new Ck)}catch(e){}}const Pk={importFetch:()=>ZC()};let Da;function Lee(){Da=null}function See(e){Da=e}function Iee(){return Da}class zk{constructor(){this.util=require("util"),this.textEncoder=new this.util.TextEncoder}fetch(e,t){return oe().global.fetch!=null?oe().global.fetch(e,t):(Da==null&&(Da=Pk.importFetch()),Da(e,t))}now(){const e=process.hrtime();return e[0]*1e3+e[1]/1e6}encode(e,t){if(t!=="utf-8"&&t!=="utf8")throw new Error(`Node built-in encoder only supports utf-8, but got ${t}`);return this.textEncoder.encode(e)}decode(e,t){return e.length===0?"":new this.util.TextDecoder(t).decode(e)}}oe().get("IS_NODE")&&oe().setPlatform("node",new zk);function wt(e,t="float32",n){return t=t||"float32",zg(e),new an(e,t,n)}function Vk(e,t){const n=W(e,"x","cast");if(!Cr(t))throw new Error(`Failed to cast to unknown dtype ${t}`);if(t==="string"&&n.dtype!=="string"||t!=="string"&&n.dtype==="string")throw new Error("Only strings can be casted to strings");const s={x:n},i={dtype:t};return G.runKernelFunc(o=>o.cast(n,t),s,null,Sa,i)}const Ae=z({cast_:Vk});function Gk(e){const t=W(e,"x","clone",null),n=()=>G.makeTensorFromDataId(t.dataId,t.shape,t.dtype),s={x:t};return G.runKernelFunc(n,s,null,xl)}const kr=z({clone_:Gk});function IT(e,t=!1){console.log(e.toString(t))}lT();const Yk={buffer:wt,cast:Ae,clone:kr,print:IT};ak(Yk);const Hk="model",qk=".json",jk=".weights.bin";function xT(e){return new Promise(t=>setTimeout(t)).then(e)}class ka{constructor(e){if(!oe().getBool("IS_BROWSER"))throw new Error("browserDownloads() cannot proceed because the current environment is not a browser.");e.startsWith(ka.URL_SCHEME)&&(e=e.slice(ka.URL_SCHEME.length)),(e==null||e.length===0)&&(e=Hk),this.modelTopologyFileName=e+qk,this.weightDataFileName=e+jk}async save(e){if(typeof document=="undefined")throw new Error("Browser downloads are not supported in this environment since `document` is not present");const t=window.URL.createObjectURL(new Blob([e.weightData],{type:"application/octet-stream"}));if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserDownloads.save() does not support saving model topology in binary formats yet.");{const n=[{paths:["./"+this.weightDataFileName],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,weightsManifest:n},i=window.URL.createObjectURL(new Blob([JSON.stringify(s)],{type:"application/json"})),o=this.jsonAnchor==null?document.createElement("a"):this.jsonAnchor;if(o.download=this.modelTopologyFileName,o.href=i,await xT(()=>o.dispatchEvent(new MouseEvent("click"))),e.weightData!=null){const a=this.weightDataAnchor==null?document.createElement("a"):this.weightDataAnchor;a.download=this.weightDataFileName,a.href=t,await xT(()=>a.dispatchEvent(new MouseEvent("click")))}return{modelArtifactsInfo:nh(e)}}}}ka.URL_SCHEME="downloads://";class Kk{constructor(e){if(e==null||e.length<1)throw new Error(`When calling browserFiles, at least 1 file is required, but received ${e}`);this.files=e}async load(){const e=this.files[0],t=this.files.slice(1);return new Promise((n,s)=>{const i=new FileReader;i.onload=o=>{const a=JSON.parse(o.target.result),c=a.modelTopology;if(c==null){s(new Error(`modelTopology field is missing from file ${e.name}`));return}t.length===0&&n({modelTopology:c});const h=a.weightsManifest;if(h==null){s(new Error(`weightManifest field is missing from file ${e.name}`));return}let d;try{d=this.checkManifestAndWeightFiles(h,t)}catch(w){s(w);return}const m=[],f=[],b=[];h.forEach(w=>{w.paths.forEach(L=>{f.push(L),b.push(null)}),m.push(...w.weights)}),h.forEach(w=>{w.paths.forEach(L=>{const x=new FileReader;x.onload=v=>{const N=v.target.result,O=f.indexOf(L);b[O]=N,b.indexOf(null)===-1&&n({modelTopology:c,weightSpecs:m,weightData:zd(b),format:a.format,generatedBy:a.generatedBy,convertedBy:a.convertedBy,userDefinedMetadata:a.userDefinedMetadata})},x.onerror=v=>s(`Failed to weights data from file of path '${L}'.`),x.readAsArrayBuffer(d[L])})})},i.onerror=o=>s(`Failed to read model topology and weights manifest JSON from file '${e.name}'. BrowserFiles supports loading Keras-style tf.Model artifacts only.`),i.readAsText(e)})}checkManifestAndWeightFiles(e,t){const n=[],s=t.map(o=>fT(o.name)),i={};for(const o of e)o.paths.forEach(a=>{const c=fT(a);if(n.indexOf(c)!==-1)throw new Error(`Duplicate file basename found in weights manifest: '${c}'`);if(n.push(c),s.indexOf(c)===-1)throw new Error(`Weight file with basename '${c}' is not provided.`);i[a]=t[s.indexOf(c)]});if(n.length!==t.length)throw new Error(`Mismatch in the number of files in weights manifest (${n.length}) and the number of weight files provided (${t.length}).`);return i}}const Xk=e=>oe().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(ka.URL_SCHEME))?Jk(e.slice(ka.URL_SCHEME.length)):null;en.registerSaveRouter(Xk);function Jk(e="model"){return new ka(e)}function Zk(e){return new Kk(e)}function TT(e,t,n,s){a(e),n=n==null?0:n,s=s==null?1:s,c(n,s);let i=0;const o=h=>(h.then(d=>{const m=n+ ++i/e.length*(s-n);return t(m),d}),h);function a(h){A(h!=null&&Array.isArray(h)&&h.length>0,()=>"promises must be a none empty array")}function c(h,d){A(h>=0&&h<=1,()=>`Progress fraction must be in range [0, 1], but got startFraction ${h}`),A(d>=0&&d<=1,()=>`Progress fraction must be in range [0, 1], but got endFraction ${d}`),A(d>=h,()=>`startFraction must be no more than endFraction, but got startFraction ${h} and endFraction ${d}`)}return Promise.all(e.map(o))}async function AT(e,t){t==null&&(t={});const n=t.fetchFunc==null?oe().platform.fetch:t.fetchFunc,s=e.map(f=>n(f,t.requestInit,{isBinary:!0})),i=0,o=.5,a=t.onProgress==null?await Promise.all(s):await TT(s,t.onProgress,i,o),c=a.map(f=>f.arrayBuffer()),h=.5,d=1,m=t.onProgress==null?await Promise.all(c):await TT(c,t.onProgress,h,d);return m}async function vT(e,t="",n,s){const i=a=>AT(a,{requestInit:s}),o=NT(i);return o(e,t,n)}function NT(e){return async(t,n="",s)=>{const i=t.map(()=>!1),o={},a=s!=null?s.map(()=>!1):[],c=[];if(t.forEach((w,L)=>{let x=0;w.weights.forEach(v=>{const N="quantization"in v?v.quantization.dtype:v.dtype,O=By[N]*P(v.shape),E=()=>{i[L]=!0,o[L]==null&&(o[L]=[]),o[L].push({manifestEntry:v,groupOffset:x,sizeBytes:O})};s!=null?s.forEach((k,F)=>{k===v.name&&(E(),a[F]=!0)}):E(),c.push(v.name),x+=O})}),!a.every(w=>w)){const w=s.filter((L,x)=>!a[x]);throw new Error(`Could not find weights in manifest with names: ${w.join(", ")}. +Manifest JSON has weights with names: ${c.join(", ")}.`)}const h=i.reduce((w,L,x)=>(L&&w.push(x),w),[]),d=[];h.forEach(w=>{t[w].paths.forEach(L=>{const x=n+(n.endsWith("/")?"":"/")+L;d.push(x)})});const m=await e(d),f={};let b=0;return h.forEach(w=>{const L=t[w].paths.length;let x=0;for(let k=0;k{const F=v.slice(k.groupOffset,k.groupOffset+k.sizeBytes),U=Pd(F,[k.manifestEntry]);for(const $ in U)f[$]=U[$]}),b+=L}),f}}const Qk="application/octet-stream",eF="application/json";class qy{constructor(e,t){if(this.DEFAULT_METHOD="POST",t==null&&(t={}),this.weightPathPrefix=t.weightPathPrefix,this.onProgress=t.onProgress,this.weightUrlConverter=t.weightUrlConverter,t.fetchFunc!=null?(A(typeof t.fetchFunc=="function",()=>"Must pass a function that matches the signature of `fetch` (see https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)"),this.fetch=t.fetchFunc):this.fetch=oe().platform.fetch,A(e!=null&&e.length>0,()=>"URL path for http must not be null, undefined or empty."),Array.isArray(e)&&A(e.length===2,()=>`URL paths for http must have a length of 2, (actual length is ${e.length}).`),this.path=e,t.requestInit!=null&&t.requestInit.body!=null)throw new Error("requestInit is expected to have no pre-existing body, but has one.");this.requestInit=t.requestInit||{}}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserHTTPRequest.save() does not support saving model topology in binary formats yet.");const t=Object.assign({method:this.DEFAULT_METHOD},this.requestInit);t.body=new FormData;const n=[{paths:["./model.weights.bin"],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,userDefinedMetadata:e.userDefinedMetadata,weightsManifest:n};t.body.append("model.json",new Blob([JSON.stringify(s)],{type:eF}),"model.json"),e.weightData!=null&&t.body.append("model.weights.bin",new Blob([e.weightData],{type:Qk}),"model.weights.bin");const i=await this.fetch(this.path,t);if(i.ok)return{modelArtifactsInfo:nh(e),responses:[i]};throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ${i.status}.`)}async load(){const e=await this.fetch(this.path,this.requestInit);if(!e.ok)throw new Error(`Request to ${this.path} failed with status code ${e.status}. Please verify this URL points to the model JSON of the model to load.`);let t;try{t=await e.json()}catch(b){let w=`Failed to parse model JSON of response from ${this.path}.`;throw this.path.endsWith(".pb")?w+=" Your path contains a .pb file extension. Support for .pb models have been removed in TensorFlow.js 1.0 in favor of .json models. You can re-convert your Python TensorFlow model using the TensorFlow.js 1.0 conversion scripts or you can convert your.pb models with the 'pb2json'NPM script in the tensorflow/tfjs-converter repository.":w+=" Please make sure the server is serving valid JSON for this request.",new Error(w)}const n=t.modelTopology,s=t.weightsManifest,i=t.generatedBy,o=t.convertedBy,a=t.format,c=t.userDefinedMetadata;if(n==null&&s==null)throw new Error(`The JSON from HTTP path ${this.path} contains neither model topology or manifest for weights.`);let h,d;if(s!=null){const b=await this.loadWeights(s);[h,d]=b}const m={modelTopology:n,weightSpecs:h,weightData:d,userDefinedMetadata:c,generatedBy:i,convertedBy:o,format:a},f=t.modelInitializer;return f&&(m.modelInitializer=f),m}async loadWeights(e){const t=Array.isArray(this.path)?this.path[1]:this.path,[n,s]=tF(t),i=this.weightPathPrefix||n,o=[];for(const d of e)o.push(...d.weights);const a=[],c=[];for(const d of e)for(const m of d.paths)this.weightUrlConverter!=null?c.push(this.weightUrlConverter(m)):a.push(i+m+s);this.weightUrlConverter&&a.push(...await Promise.all(c));const h=await AT(a,{requestInit:this.requestInit,fetchFunc:this.fetch,onProgress:this.onProgress});return[o,zd(h)]}}qy.URL_SCHEME_REGEX=/^https?:\/\//;function tF(e){const t=e.lastIndexOf("/"),n=e.lastIndexOf("?"),s=e.substring(0,t),i=n>t?e.substring(n):"";return[s+"/",i]}function jy(e){return e.match(qy.URL_SCHEME_REGEX)!=null}const CT=(e,t)=>{if(typeof fetch=="undefined"&&(t==null||t.fetchFunc==null))return null;{let n=!0;if(Array.isArray(e)?n=e.every(s=>jy(s)):n=jy(e),n)return Ky(e,t)}return null};en.registerSaveRouter(CT),en.registerLoadRouter(CT);function Ky(e,t){return new qy(e,t)}function Yd(e,t){return Ky(e,t)}class Xy{constructor(e){this.modelArtifacts=e}async load(){return this.modelArtifacts}}class nF{constructor(e){this.saveHandler=e}async save(e){return this.saveHandler(e)}}function sF(e,t,n,s){if(arguments.length===1){const i=e.modelTopology!=null||e.weightSpecs!=null;return i?new Xy(e):(console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Xy({modelTopology:e}))}else return console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Xy({modelTopology:e,weightSpecs:t,weightData:n,trainingConfig:s})}function iF(e){return new nF(e)}var rF=Object.freeze({__proto__:null,browserFiles:Zk,browserHTTPRequest:Yd,concatenateArrayBuffers:zd,decodeWeights:Pd,encodeWeights:My,fromMemory:sF,getLoadHandlers:Vy,getModelArtifactsInfoForJSON:nh,getSaveHandlers:zy,http:Ky,isHTTPScheme:jy,loadWeights:vT,registerLoadRouter:Ak,registerSaveRouter:Tk,weightsLoaderFactory:NT,withSaveHandler:iF,copyModel:Uk,listModels:Wk,moveModel:Bk,removeModel:$k});function oF(e,t){const n=W(e,"x","reshape",null),s={x:n},i={shape:t},o=(a,c)=>(t=Vt(t,n.size),A(n.size===P(t),()=>"new shape and old shape must have the same number of elements."),c([n]),a.reshape(n,t));return G.runKernelFunc(o,s,null,_l,i)}const K=z({reshape_:oF});function aF(e,t,n=!1,s=!1){let i=W(e,"a","matMul"),o=W(t,"b","matMul");[i,o]=Gt(i,o);const a=(d,m)=>{m([i,o]);const f=n?i.shape[i.rank-2]:i.shape[i.rank-1],b=s?o.shape[o.rank-1]:o.shape[o.rank-2],w=n?i.shape[i.rank-1]:i.shape[i.rank-2],L=s?o.shape[o.rank-2]:o.shape[o.rank-1],x=i.shape.slice(0,-2),v=o.shape.slice(0,-2),N=P(x),O=P(v),E=N===O||N===1||O===1;A(i.rank>=2&&o.rank>=2&&E,()=>`Error in matMul: the input batch dimensions must either be the same or at least one input batch dimension must be 1. Got input batch dimensions of (${x}) and (${v}).`),A(f===b,()=>`Error in matMul: inner shapes (${f}) and (${b}) of Tensors with shapes ${i.shape} and ${o.shape} and transposeA=${n} and transposeB=${s} must match.`);const k=N>O?x:v,F=k.concat([w,L]),U=n?K(i,[N,f,w]):K(i,[N,w,f]),$=s?K(o,[O,L,b]):K(o,[O,b,L]),Y=d.batchMatMul(U,$,n,s);return K(Y,F)},c={a:i,b:o},h={transposeA:n,transposeB:s};return G.runKernelFunc(a,c,null,id,h)}const ct=z({matMul_:aF});function cF(e,t,n=1,s=0){if(t<2)throw new Error(`Error in oneHot: depth must be >=2, but it is ${t}`);const i=W(e,"indices","oneHot","int32"),o=[...i.shape,t],a=(d,m)=>(m([i]),K(d.oneHot(K(i,[i.size]),t,n,s),o)),c={indices:i},h={depth:t,onValue:n,offValue:s};return G.runKernelFunc(a,c,null,yy,h)}const To=z({oneHot_:cF});function lF(e,t){const n=W(e,"x","transpose");if(t==null&&(t=n.shape.map((o,a)=>a).reverse()),A(n.rank===t.length,()=>`Error in transpose: rank of input ${n.rank} must match length of perm ${t}.`),t.forEach(o=>{A(o>=0&&o`All entries in 'perm' must be between 0 and ${n.rank-1} but got ${t}`)}),n.rank<=1)return n.clone();const s={x:n},i={perm:t};return G.runKernelFunc(o=>o.transpose(n,t),s,null,Hl,i)}const Ye=z({transpose_:lF});function hF(e,t,n){const s=W(e,"labels","confusionMatrix"),i=W(t,"predictions","confusionMatrix");A(n==null||n>0&&Number.isInteger(n),()=>`If provided, numClasses must be a positive integer, but got ${n}`),A(s.rank===1,()=>`Expected the rank of labels to be 1, but got ${s.rank}`),A(i.rank===1,()=>`Expected the rank of predictions to be 1, but got ${i.rank}`),A(s.shape[0]===i.shape[0],()=>`Mismatch in the number of examples: ${s.shape[0]} vs. ${i.shape[0]}. Labels and predictions should have the same number of elements.`),A(n>0&&Number.isInteger(n),()=>`numClasses is required to be a positive integer, but got ${n}`);const o=To(Ae(s,"int32"),n),a=To(Ae(i,"int32"),n),c=Ye(o),h=ct(c,a);return Ae(h,"int32")}const uF=z({confusionMatrix_:hF});var dF=Object.freeze({__proto__:null,confusionMatrix:uF});function RT(e,t,n){if(ne(e),t!=null&&t.length!==3)throw new Error("tensor3d() requires shape to have three numbers");const s=Ii(e,n);if(s.length!==3&&s.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return Er(e,t,s,n)}let Fa;function pF(e,t=3){if(t>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(e==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let n=!1,s=!1,i=!1,o=!1,a=!1;if(e.data instanceof Uint8Array)n=!0;else if(typeof ImageData!="undefined"&&e instanceof ImageData)s=!0;else if(typeof HTMLVideoElement!="undefined"&&e instanceof HTMLVideoElement)i=!0;else if(typeof HTMLImageElement!="undefined"&&e instanceof HTMLImageElement)o=!0;else if(e.getContext!=null)a=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${e.constructor.name}`);if(i){const w=2;if(i&&e.readyState element.")}const c=Oy(Rd,G.backendName);if(c!=null){const w={pixels:e},L={numChannels:t};return G.runKernel(Rd,w,L)}const[h,d]=i?[e.videoWidth,e.videoHeight]:[e.width,e.height];let m;a?m=e.getContext("2d").getImageData(0,0,h,d).data:s||n?m=e.data:(o||i)&&(Fa==null&&(Fa=document.createElement("canvas").getContext("2d")),Fa.canvas.width=h,Fa.canvas.height=d,Fa.drawImage(e,0,0,h,d),m=Fa.getImageData(0,0,h,d).data);let f;if(t===4)f=new Int32Array(m);else{const w=h*d;f=new Int32Array(w*t);for(let L=0;L4||o===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${o}`);if(n.dtype!=="float32"&&n.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${n.dtype}. Please use float32 or int32 tensors.`);const a=await n.data(),c=n.dtype==="float32"?255:1,h=new Uint8ClampedArray(i*s*4);for(let d=0;d1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${w}.`)}else if(n.dtype==="int32"&&(w<0||w>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${w}.`);o===1?(m[0]=w*c,m[1]=w*c,m[2]=w*c):m[b]=w*c}const f=d*4;h[f+0]=Math.round(m[0]),h[f+1]=Math.round(m[1]),h[f+2]=Math.round(m[2]),h[f+3]=Math.round(m[3])}if(t!=null){t.width=i,t.height=s;const d=t.getContext("2d"),m=new ImageData(h,i,s);d.putImageData(m,0,0)}return n!==e&&n.dispose(),h}const OT=z({fromPixels_:pF});var fF=Object.freeze({__proto__:null,toPixels:mF,fromPixels:OT});function Hd(e,t){if(e.rank<1)throw new Error(`tf.gatherND() expects the input to be rank 1 or higher, but the rank was ${e.rank}.`);if(t.rank<1)throw new Error(`tf.gatherND() expects the indices to be rank 1 or higher, but the rank was ${t.rank}.`);if(t.dtype!=="int32")throw new Error(`tf.gatherND() expects the indices to be int32 type, but the dtype was ${t.dtype}.`);if(t.shape[t.rank-1]>e.rank)throw new Error(`index innermost dimension length must be <= tensor rank; saw: ${t.shape[t.rank-1]} vs. ${e.rank}`);if(e.size===0)throw new Error(`Requested more than 0 entries, but input is empty. Input shape: ${e.shape}.`);const n=t.shape,s=n[n.length-1];let i=1;for(let d=0;dd/c),1].slice(0,s);return[a,i,c,h]}var gF=Object.freeze({__proto__:null,prepareAndValidate:Hd});function Jy(e,t,n){const s=t.rank>1?t.shape[t.rank-1]:1,i=t.rank>1?t.rank-1:1,o=`Must have updates.shape = indices.shape[:batchDim] + shape[sliceDim:], got updates.shape: ${n.shape}, indices.shape: ${t.shape}, shape: ${e}, sliceDim: ${s}, and batchDim: ${i}.`;if(n.rank1?t.shape[s-1]:1,o=n.length;let a=1;for(let f=i;f`Error in slice${s}D: Length of begin ${t} must match the rank of the array (${s}).`),A(s===n.length,()=>`Error in slice${s}D: Length of size ${n} must match the rank of the array (${s}).`);for(let i=0;i`Error in slice${s}D: begin[${i}] + size[${i}] (${t[i]+n[i]}) would overflow input.shape[${i}] (${e.shape[i]})`)}function qd(e){const t=[];let n=0;for(;e>0;)e&1&&t.push(n),e/=2,n++;return t}function jd(e,t,n){const s=[];for(let i=0;i0){const w=t[0],L=n+1;m=_T(a,w,L,s,e),f=WT(c,w,L,i,e),b=ET(o,w,L,e)}else for(let w=0;w-1)o[c]=0;else{const h=DT(t,n,c);let d=s[h];e&1<-1)o[c]=Number.MAX_SAFE_INTEGER;else{const h=DT(t,n,c);let d=s[h];e&1<0?a=Number.MIN_SAFE_INTEGER:a=Number.MAX_SAFE_INTEGER);const h=s[i];return a<0&&(a+=h),a=S(0,a,h-1),a}function BT(e,t,n,s,i,o){let a=t[i];const c=n[i]||1;(e&1<0?a=Number.MAX_SAFE_INTEGER:a=Number.MIN_SAFE_INTEGER);const h=s[i];return a<0&&(a+=h),c>0?a=S(0,a,h):a=S(-1,a,h-1),a}function eb(e,t,n){let s=n.length;for(let i=0;i1){s=i;break}for(let i=s+1;i0||n[i]!==e[i])return!1;return!0}function tb(e,t){let n=e.length>0?e[e.length-1]:1;for(let s=0;s{A(a!==-1,()=>"slice() does not support negative begin indexing.")});let o;return n==null?o=new Array(i).fill(-1):typeof n=="number"?o=[n,...new Array(i-1).fill(-1)]:n.lengtha>=0?a:(A(a===-1,()=>`Negative size values should be exactly -1 but got ${a} for the slice() size at index ${c}.`),e.shape[c]-s[c])),[s,o]}var MT=Object.freeze({__proto__:null,assertParamsValid:Qy,maskToAxes:qd,computeOutShape:jd,stridesWithElidedDims:ET,getNormalizedAxes:FT,startIndicesWithElidedDims:_T,stopIndicesWithElidedDims:WT,stridesForAxis:$T,startForAxis:UT,stopForAxis:BT,isSliceContinous:eb,computeFlatOffset:tb,parseSliceParams:Kd});class Ao{getClassName(){return this.constructor.className}static fromConfig(e,t){return new e(t)}}class Ws{constructor(){this.classNameMap={}}static getMap(){return Ws.instance==null&&(Ws.instance=new Ws),Ws.instance}static register(e){Ws.getMap().classNameMap[e.className]=[e,e.fromConfig]}}function fe(e){A(e.className!=null,()=>"Class being registered does not have the static className property defined."),A(typeof e.className=="string",()=>"className is required to be a string, but got type "+typeof e.className),A(e.className.length>0,()=>"Class being registered has an empty-string as its className, which is disallowed."),Ws.register(e)}var bF=Object.freeze({__proto__:null,Serializable:Ao,SerializationMap:Ws,registerClass:fe});const wF=.001,PT=.1;function LF(e,t,n){return n==null&&(n=Xd()),nb(e,t,(s,i)=>ib(s,i,n))}function Xd(){return G.backend.floatPrecision()===32?wF:PT}function nb(e,t,n){let s=!0;if((hn(e)||hn(t))&&(s=!1),hn(e)&&hn(t)&&(s=!0),s){const a=e.constructor.name,c=t.constructor.name;if(a!==c)throw new Error(`Arrays are of different type. Actual: ${a}. Expected: ${c}`)}if(Array.isArray(e)&&Array.isArray(t)){const a=Ii(e),c=Ii(t);if(!ae(a,c))throw new Error(`Arrays have different shapes. Actual: [${a}]. Expected: [${c}]`)}const i=hn(e)?e:te(e),o=hn(t)?t:te(t);if(i.length!==o.length)throw new Error(`Arrays have different lengths actual: ${i.length} vs expected: ${o.length}. Actual: ${i}. Expected: ${o}.`);for(let a=0;at.fail(),()=>t())}function Gk(e,t){const n=typeof t=="string"||typeof t=="number"||typeof t=="boolean"?[t]:t;return Ir(e)||Ir(e[0])||Ir(t)||Ir(t[0])?ky(e,n,(s,i)=>s==i):ky(e,t,(s,i)=>_y(s,i,0))}function Fy(e,t,n){if(n==null&&(n=Ld()),!_y(e,t,n))throw new Error(`Numbers differ: actual === ${e}, expected === ${t}`)}function _y(e,t,n){return!isFinite(e)&&!isFinite(t)?!0:!(isNaN(e)||isNaN(t)||Math.abs(e-t)>n)}function Vk(e,t,n){for(let s=0;sn)throw new Error(`Value out of range:${e[s]} low: ${t}, high: ${n}`)}function Yk(e,t){expect(new Float32Array(e)).toEqual(new Float32Array(t))}var Hk=Object.freeze({__proto__:null,TEST_EPSILON_FLOAT16:IT,expectArraysClose:Pk,testEpsilon:Ld,expectPromiseToFail:zk,expectArraysEqual:Gk,expectNumbersClose:Fy,expectValuesInRange:Vk,expectArrayBuffersEqual:Yk});const xT="2.6.0";function qk(){C().set("PROD",!0)}function jk(){C().set("DEBUG",!0)}function Kk(){C().set("DEPRECATION_WARNINGS_ENABLED",!1),console.warn("TensorFlow.js deprecation warnings have been disabled.")}function nn(e){C().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(e+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}RD(nn);function Xk(){V.disposeVariables()}function Fs(){return V}function Sd(){return V.memory()}function Jk(e){return V.profile(e)}function ee(e,t){return V.tidy(e,t)}function He(e){const t=Hi(e);t.forEach(n=>n.dispose())}function Nn(e){return V.keep(e)}function Zk(e){return V.time(e)}function TT(e){return V.setBackend(e)}function Qk(){return V.ready()}function eF(){return V.backendName}function tF(e){V.removeBackend(e)}function nF(e){return V.findBackend(e)}function sF(e){return V.findBackendFactory(e)}function Wy(e,t,n=1){return V.registerBackend(e,t,n)}function AT(){return V.backend}function iF(e,t){C().setPlatform(e,t)}function rF(e,t){let n=W(e,"a","add"),s=W(t,"b","add");[n,s]=Bt(n,s);const i=(a,c)=>{const h=a.add(n,s);return c([n,s]),h},o={a:n,b:s};return V.runKernelFunc(i,o,null,Te)}const be=P({add_:rF});function oF(e,t){let n=W(e,"a","floorDiv"),s=W(t,"b","floorDiv");[n,s]=Bt(n,s);const i=(a,c)=>{const h=a.floorDiv(n,s);return c([n,s]),h},o={a:n,b:s};return V.runKernelFunc(i,o,null,yg)}const Id=P({floorDiv_:oF});function aF(e,t){let n=W(e,"a","div"),s=W(t,"b","div");if([n,s]=Bt(n,s),n.dtype==="int32"&&s.dtype==="int32")return Id(n,s);const i=(c,h)=>{const d=c.realDivide(n,s);return h([n,s]),d},o={a:n,b:s},a={};return V.runKernelFunc(i,o,null,pa,a)}const _e=P({div_:aF});function cF(e,t){let n=W(e,"a","mul"),s=W(t,"b","mul");[n,s]=Bt(n,s);const i=(a,c)=>{const h=a.multiply(n,s);return c([n,s]),h},o={a:n,b:s};return V.runKernelFunc(i,o,null,fl)}const X=P({mul_:cF});function lF(e){const t=W(e,"x","abs"),n={x:t};return V.runKernelFunc((s,i)=>(i([t]),t.dtype==="complex64"?s.complexAbs(t):s.abs(t)),n,null,ge)}const sn=P({abs_:lF});function hF(e){const t=W(e,"x","acos"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.acos(t);return i([t]),o},n,null,fe)}const $y=P({acos_:hF});function uF(e){const t=W(e,"x","acosh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.acosh(t);return i([t]),o},n,null,Ae)}const Uy=P({acosh_:uF});function dF(e){k(Array.isArray(e),()=>"The argument passed to tf.addN() must be a list of tensors"),k(e.length>=1,()=>`Must pass at least one tensor to tf.addN(), but got ${e.length}`);const t=e.map((o,a)=>W(o,`tensors${a}`,"addN")),n=t[0];t.forEach(o=>{if(o.dtype!==n.dtype)throw new Error("All tensors passed to tf.addN() must have the same dtype")}),t.forEach(o=>{if(!ot(o.shape,n.shape))throw new Error("All tensors passed to tf.addN() must have the same shape")});const s=(o,a)=>{const c=o.addN(t);return a(t),c},i=t;return V.runKernelFunc(s,i,null,Ve)}const vT=P({addN_:dF});function By(e,t){for(let n=0;ne[o]);return[n,i]}function Rn(e,t){const n=t.map(s=>1);return NT(e,n,t)}function es(e,t,n){k(By(t,n),()=>`${e} supports only inner-most axes for now. Got axes ${t} and rank-${n} input.`)}function kn(e,t){if(By(e,t))return null;const n=[];for(let s=0;sn.push(s)),n}function Ml(e){return e.map((t,n)=>[n,t]).sort((t,n)=>t[1]-n[1]).map(t=>t[0])}function ws(e,t){const n=[];for(let s=t-e;s{const h=ft(t,s.shape);let d=h;const m=kn(d,s.rank);m!=null&&(s=Me(s,m),d=ws(d.length,s.rank));const y=c.all(s,d);if(n){const b=Rn(y.shape,h);return K(y,b)}return y},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,rt,a)}const xd=P({all_:pF});function mF(e,t=null,n=!1){let s=W(e,"x","any","bool");const i=c=>{const h=ft(t,s.shape);let d=h;const m=kn(d,s.rank);m!=null&&(s=Me(s,m),d=ws(d.length,s.rank));const y=c.any(s,d);if(n){const b=Rn(y.shape,h);return K(y,b)}return y},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,vt,a)}const Pl=P({any_:mF});function fF(e,t=0){let n=W(e,"x","argMax");const s=(a,c)=>{c([n]);let h=ft(t,n.shape);const d=kn(h,n.rank);return d!=null&&(n=Me(n,d),h=ws(h.length,n.rank)),a.argMax(n,h[0])},i={x:n},o={axis:t};return V.runKernelFunc(s,i,null,$t,o)}const zl=P({argMax_:fF});function gF(e,t=0){let n=W(e,"x","argMin");const s=(a,c)=>{c([n]),t==null&&(t=0);let h=ft(t,n.shape);const d=kn(h,n.rank);return d!=null&&(n=Me(n,d),h=ws(h.length,n.rank)),a.argMin(n,h[0])},i={x:n},o={axis:t};return V.runKernelFunc(s,i,null,Kt,o)}const My=P({argMin_:gF});function yF(e){const t=W(e,"x","asin"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.asin(t);return i([t]),o},n,null,Dn)}const Py=P({asin_:yF});function bF(e){const t=W(e,"x","asinh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.asinh(t);return i([t]),o},n,null,Tn)}const zy=P({asinh_:bF});function wF(e){const t=W(e,"x","atan"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.atan(t);return i([t]),o},n,null,An)}const Gy=P({atan_:wF});function LF(e,t){let n=W(e,"a","atan2"),s=W(t,"b","atan2");[n,s]=Bt(n,s);const i=(a,c)=>{const h=a.atan2(n,s);return c([n,s]),h},o={a:n,b:s};return V.runKernelFunc(i,o,null,Li)}const Vy=P({atan2_:LF});function SF(e){const t=W(e,"x","atanh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.atanh(t);return i([t]),o},n,null,Ks)}const Yy=P({atanh_:SF});function Td(e,t,n,s,i="NHWC",o){const a=e[3],c=[...t,a],h=Yl(i);return Ai(e,c,n,o,s,null,null,h)}function Fn(e,t,n,s,i,o,a="channelsLast"){const[c,h]=Ad(t);let d;if(a==="channelsLast")d=[c,h,e[3],e[3]];else if(a==="channelsFirst")d=[c,h,e[1],e[1]];else throw new Error(`Unknown dataFormat ${a}`);return Ai(e,d,n,s,i,o,!1,a)}function Gl(e,t,n,s,i,o,a="NDHWC"){const[c,h,d]=qy(t);let m,y;if(a==="NDHWC")y="channelsLast",m=[c,h,d,e[4],e[4]];else if(a==="NCDHW")y="channelsFirst",m=[c,h,d,e[1],e[1]];else throw new Error(`Unknown dataFormat ${a}`);return Vl(e,m,n,s,i,!1,y,o)}function Ai(e,t,n,s,i,o,a=!1,c="channelsLast"){let[h,d,m,y]=[-1,-1,-1,-1];if(c==="channelsLast")[h,d,m,y]=e;else if(c==="channelsFirst")[h,y,d,m]=e;else throw new Error(`Unknown dataFormat ${c}`);const[b,w,,L]=t,[T,A]=Ad(n),[N,E]=Ad(s),D=Na(b,N),F=Na(w,E),{padInfo:_,outHeight:B,outWidth:$}=TF(i,d,m,T,A,D,F,o,c),H=a?L*y:L;let q;return c==="channelsFirst"?q=[h,H,B,$]:c==="channelsLast"&&(q=[h,B,$,H]),{batchSize:h,dataFormat:c,inHeight:d,inWidth:m,inChannels:y,outHeight:B,outWidth:$,outChannels:H,padInfo:_,strideHeight:T,strideWidth:A,filterHeight:b,filterWidth:w,effectiveFilterHeight:D,effectiveFilterWidth:F,dilationHeight:N,dilationWidth:E,inShape:e,outShape:q,filterShape:t}}function Vl(e,t,n,s,i,o=!1,a="channelsLast",c){let[h,d,m,y,b]=[-1,-1,-1,-1,-1];if(a==="channelsLast")[h,d,m,y,b]=e;else if(a==="channelsFirst")[h,b,d,m,y]=e;else throw new Error(`Unknown dataFormat ${a}`);const[w,L,T,,A]=t,[N,E,D]=qy(n),[F,_,B]=qy(s),$=Na(w,F),H=Na(L,_),q=Na(T,B),{padInfo:J,outDepth:re,outHeight:ce,outWidth:ue}=AF(i,d,m,y,N,E,D,$,H,q,c),he=o?A*b:A;let de;return a==="channelsFirst"?de=[h,he,re,ce,ue]:a==="channelsLast"&&(de=[h,re,ce,ue,he]),{batchSize:h,dataFormat:a,inDepth:d,inHeight:m,inWidth:y,inChannels:b,outDepth:re,outHeight:ce,outWidth:ue,outChannels:he,padInfo:J,strideDepth:N,strideHeight:E,strideWidth:D,filterDepth:w,filterHeight:L,filterWidth:T,effectiveFilterDepth:$,effectiveFilterHeight:H,effectiveFilterWidth:q,dilationDepth:F,dilationHeight:_,dilationWidth:B,inShape:e,outShape:de,filterShape:t}}function IF(e,t,n,s,i){s==null&&(s=Hy(e,t,n));const o=e[0],a=e[1],c=yo((o-t+2*s)/n+1,i);k(Ut(c),()=>`The output # of rows (${c}) must be an integer. Change the stride and/or zero pad parameters`);const h=yo((a-t+2*s)/n+1,i);return k(Ut(h),()=>`The output # of columns (${h}) must be an integer. Change the stride and/or zero pad parameters`),[c,h]}function xF(e,t,n,s,i,o){i==null&&(i=Hy(e,t,s));const a=e[0],c=e[1],h=e[2],d=yo((a-t+2*i)/s+1,o);k(Ut(d),()=>`The output # of depths (${d}) must be an integer. Change the stride and/or zero pad parameters`);const m=yo((c-t+2*i)/s+1,o);k(Ut(m),()=>`The output # of rows (${m}) must be an integer. Change the stride and/or zero pad parameters`);const y=yo((h-t+2*i)/s+1,o);return k(Ut(y),()=>`The output # of columns (${y}) must be an integer. Change the stride and/or zero pad parameters`),[d,m,y,n]}function Hy(e,t,n,s=1){const i=Na(t,s);return Math.floor((e[0]*(n-1)-n+i)/2)}function Ad(e){return typeof e=="number"?[e,e,e]:e.length===2?[e[0],e[1],1]:e}function qy(e){return typeof e=="number"?[e,e,e]:e}function Na(e,t){return t<=1?e:e+(e-1)*(t-1)}function TF(e,t,n,s,i,o,a,c,h){let d,m,y;if(typeof e=="number"){const b=e===0?"VALID":"NUMBER";d={top:e,bottom:e,left:e,right:e,type:b};const w=IF([t,n],o,s,e,c);m=w[0],y=w[1]}else if(e==="same"){m=Math.ceil(t/s),y=Math.ceil(n/i);const b=Math.max(0,(m-1)*s+o-t),w=Math.max(0,(y-1)*i+a-n),L=Math.floor(b/2),T=b-L,A=Math.floor(w/2),N=w-A;d={top:L,bottom:T,left:A,right:N,type:"SAME"}}else if(e==="valid")d={top:0,bottom:0,left:0,right:0,type:"VALID"},m=Math.ceil((t-o+1)/s),y=Math.ceil((n-a+1)/i);else if(typeof e=="object"){const b=h==="channelsLast"?e[1][0]:e[2][0],w=h==="channelsLast"?e[1][1]:e[2][1],L=h==="channelsLast"?e[2][0]:e[3][0],T=h==="channelsLast"?e[2][1]:e[3][1],A=b===0&&w===0&&L===0&&T===0?"VALID":"EXPLICIT";d={top:b,bottom:w,left:L,right:T,type:A},m=yo((t-o+b+w)/s+1,c),y=yo((n-a+L+T)/i+1,c)}else throw Error(`Unknown padding parameter: ${e}`);return{padInfo:d,outHeight:m,outWidth:y}}function AF(e,t,n,s,i,o,a,c,h,d,m){let y,b,w,L;if(typeof e=="number"){const T=e===0?"VALID":"NUMBER";y={top:e,bottom:e,left:e,right:e,front:e,back:e,type:T};const A=xF([t,n,s,1],c,1,i,e,m);b=A[0],w=A[1],L=A[2]}else if(e==="same"){b=Math.ceil(t/i),w=Math.ceil(n/o),L=Math.ceil(s/a);const T=(b-1)*i+c-t,A=(w-1)*o+h-n,N=(L-1)*a+d-s,E=Math.floor(T/2),D=T-E,F=Math.floor(A/2),_=A-F,B=Math.floor(N/2),$=N-B;y={top:F,bottom:_,left:B,right:$,front:E,back:D,type:"SAME"}}else if(e==="valid")y={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},b=Math.ceil((t-c+1)/i),w=Math.ceil((n-h+1)/o),L=Math.ceil((s-d+1)/a);else throw Error(`Unknown padding parameter: ${e}`);return{padInfo:y,outDepth:b,outHeight:w,outWidth:L}}function yo(e,t){if(!t)return e;switch(t){case"round":return Math.round(e);case"ceil":return Math.ceil(e);case"floor":return Math.floor(e);default:throw new Error(`Unknown roundingMode ${t}`)}}function Rr(e){const[t,n,s]=Ad(e);return t===1&&n===1&&s===1}function rn(e,t){return Rr(e)||Rr(t)}function Yl(e){if(e==="NHWC")return"channelsLast";if(e==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${e}`)}function vF(e,t,n,s,i){const o=W(e,"x","avgPool","float32"),a=1;k(rn(n,a),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`);let c=o,h=!1;o.rank===3&&(h=!0,c=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),k(c.rank===4,()=>`Error in avgPool: x must be rank 4 but got rank ${c.rank}.`),i!=null&&k(Ut(s),()=>`Error in avgPool: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const d=(w,L)=>{const T=Fn(c.shape,t,n,1,s,i);return L([c]),T.filterWidth===1&&T.filterHeight===1&&ot(T.inShape,T.outShape)?c.clone():w.avgPool(c,T)},m={x:c},y={filterSize:t,strides:n,pad:s,dimRoundingMode:i};let b=V.runKernelFunc(d,m,null,Xs,y);return b=ve(b,o.dtype),h?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const Hl=P({avgPool_:vF});function NF(e,t,n,s,i,o="NDHWC",a){a==null?a=[1,1,1]:nn("dilations is deprecated, this field will be gone in v3.0.0.");const c=W(e,"x","avgPool3d","float32");let h=c,d=!1;c.rank===4&&(d=!0,h=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]])),k(h.rank===5,()=>`Error in avgPool3d: x must be rank 5 but got rank ${h.rank}.`),k(o==="NDHWC",()=>`Error in avgPool3d: Only NDHWC is currently supported, but got dataFormat of ${o}`),k(rn(n,a),()=>`Error in avgPool3d: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&k(Ut(s),()=>`Error in avgPool3d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const m=(L,T)=>{a==null&&(a=[1,1,1]);const A=Gl(h.shape,t,n,a,s,i,o);return T([h]),L.avgPool3d(h,A)},y={x:h},b={filterSize:t,strides:n,pad:s,dimRoundingMode:i,dataFormat:o,dilations:a};let w=V.runKernelFunc(m,y,null,Xc,b);return w=ve(w,h.dtype),d?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const jy=P({avgPool3d_:NF});function Ky(e,t){const n=e[0].length;e.forEach((i,o)=>{k(i.length===n,()=>`Error in concat${n}D: rank of tensors[${o}] must be the same as the rank of the rest (${n})`)}),k(t>=0&&t`Error in concat${n}D: axis must be between 0 and ${n-1}.`);const s=e[0];e.forEach((i,o)=>{for(let a=0;a`Error in concat${n}D: Shape of tensors[${o}] (${i}) does not match the shape of the rest (${s}) along the non-concatenated axis ${o}.`)})}function Or(e,t){const n=e[0].slice();for(let s=1;s=1,()=>"Pass at least one tensor to concat");let n=Ul(e,"tensors","concat");n[0].dtype==="complex64"&&n.forEach(a=>{if(a.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor - with dtype ${a.dtype}. `)});const s=(a,c)=>{const h=ft(t,n[0].shape)[0],d=Or(n.map(b=>b.shape),h);if(we(d)===0)return en([],d);if(n=n.filter(b=>b.size>0),n.length===1)return n[0];const m=n.map(b=>b.shape);Ky(m,h);const y=a.concat(n,h);return c(n),y},i=n,o={axis:t};return V.runKernelFunc(s,i,null,$u,o)}const Mt=P({concat_:CF});function RF(e){const t=W(e,"x","sigmoid"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sigmoid(t);return i([o]),o},n,null,xl)}const vi=P({sigmoid_:RF});function OF(e,t,n){const s=W(e,"x","slice");if(s.rank===0)throw new Error("Slicing scalar is not possible");const i=(c,h)=>{const[d,m]=wd(s,t,n);return Oy(s,d,m),h([s]),c.slice(s,d,m)},o={x:s},a={begin:t,size:n};return V.runKernelFunc(i,o,null,Ku,a)}const nt=P({slice_:OF});function EF(e){const t=W(e,"x","tanh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.tanh(t);return i([o]),o},n,null,Nl)}const Ca=P({tanh_:EF});function DF(e,t,n,s,i,o){const a=W(e,"forgetBias","basicLSTMCell"),c=W(t,"lstmKernel","basicLSTMCell"),h=W(n,"lstmBias","basicLSTMCell"),d=W(s,"data","basicLSTMCell"),m=W(i,"c","basicLSTMCell"),y=W(o,"h","basicLSTMCell"),b=Mt([d,y],1),w=at(b,c),L=be(w,h),T=L.shape[0],A=L.shape[1]/4,N=[T,A],E=nt(L,[0,0],N),D=nt(L,[0,A],N),F=nt(L,[0,A*2],N),_=nt(L,[0,A*3],N),B=be(X(vi(E),Ca(D)),X(m,vi(be(a,F)))),$=X(Ca(B),vi(_));return[B,$]}const kF=P({basicLSTMCell_:DF});function FF(e,t,n){const s=W(e,"x","batchToSpaceND"),i=t.reduce((h,d)=>h*d);k(s.rank>=1+t.length,()=>`input rank is ${s.rank} but should be > than blockShape.length ${t.length}`),k(n.length===t.length,()=>`crops.length is ${n.length} but should be equal to blockShape.length ${t.length}`),k(s.shape[0]%i===0,()=>`input tensor batch is ${s.shape[0]} but is not divisible by the product of the elements of blockShape ${t.join(" * ")} === ${i}`);const o=h=>h.batchToSpaceND(s,t,n),a={x:s},c={blockShape:t,crops:n};return V.runKernelFunc(o,a,null,cg,c)}const ql=P({batchToSpaceND_:FF});function _F(e){let t;return e.rank===0||e.rank===1?t=K(e,[1,1,1,e.size]):e.rank===2?t=K(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?t=K(e,[1,e.shape[0],e.shape[1],e.shape[2]]):t=e,t}function WF(e,t,n,s,i,o){o==null&&(o=.001);const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;s!=null&&(m=W(s,"offset","batchNorm")),k(c.rank===h.rank,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),k(m==null||c.rank===m.rank,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),k(d==null||c.rank===d.rank,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");const y=_F(a),b=(A,N)=>(N([y,c,h,d]),A.batchNorm(y,vd(c),vd(h),vd(m),vd(d),o)),w={x:y,scale:d,offset:m,mean:c,variance:h},L={varianceEpsilon:o},T=V.runKernelFunc(b,w,null,ol,L);return K(T,a.shape)}function vd(e){return e==null?null:e.rank===0?K(e,[e.size]):e.rank===1?e:e.rank===2?K(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?K(e,[1,e.shape[0],e.shape[1],e.shape[2]]):e}const bo=P({batchNorm_:WF});function $F(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),k(a.rank===2,()=>`Error in batchNorm2D: x must be rank 2 but got rank ${a.rank}.`),k(c.rank===2||c.rank===1,()=>`Error in batchNorm2D: mean must be rank 2 or rank 1 but got rank ${c.rank}.`),k(h.rank===2||h.rank===1,()=>`Error in batchNorm2D: variance must be rank 2 or rank 1 but got rank ${h.rank}.`),d!=null&&k(d.rank===2||d.rank===1,()=>`Error in batchNorm2D: scale must be rank 2 or rank 1 but got rank ${d.rank}.`),m!=null&&k(m.rank===2||m.rank===1,()=>`Error in batchNorm2D: offset must be rank 2 or rank 1 but got rank ${m.rank}.`),bo(a,c,h,m,d,o)}const CT=P({batchNorm2d_:$F});function UF(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),k(a.rank===3,()=>`Error in batchNorm3D: x must be rank 3 but got rank ${a.rank}.`),k(c.rank===3||c.rank===1,()=>`Error in batchNorm3D: mean must be rank 3 or rank 1 but got rank ${c.rank}.`),k(h.rank===3||h.rank===1,()=>`Error in batchNorm3D: variance must be rank 3 or rank 1 but got rank ${h.rank}.`),d!=null&&k(d.rank===3||d.rank===1,()=>`Error in batchNorm3D: scale must be rank 3 or rank 1 but got rank ${d.rank}.`),m!=null&&k(m.rank===3||m.rank===1,()=>`Error in batchNorm3D: offset must be rank 3 or rank 1 but got rank ${m.rank}.`),bo(a,c,h,m,d,o)}const RT=P({batchNorm3d_:UF});function BF(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),k(a.rank===4,()=>`Error in batchNorm4D: x must be rank 4 but got rank ${a.rank}.`),k(c.rank===4||c.rank===1,()=>`Error in batchNorm4D: mean must be rank 4 or rank 1 but got rank ${c.rank}.`),k(h.rank===4||h.rank===1,()=>`Error in batchNorm4D: variance must be rank 4 or rank 1 but got rank ${h.rank}.`),d!=null&&k(d.rank===4||d.rank===1,()=>`Error in batchNorm4D: scale must be rank 4 or rank 1 but got rank ${d.rank}.`),m!=null&&k(m.rank===4||m.rank===1,()=>`Error in batchNorm4D: offset must be rank 4 or rank 1 but got rank ${m.rank}.`),bo(a,c,h,m,d,o)}const OT=P({batchNorm4d_:BF});function MF(e,t){let n=W(e,"broadcastTo","x");const s=n.shape;if(t.some(m=>!(m>0)||m%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${t}].`);if(t.lengthn.rank){const m=n.shape.slice();for(;m.length=0;m--)if(i[m]===t[m])o[m]=1;else if(n.shape[m]!==1)throw new Error(`broadcastTo(): [${s}] cannot be broadcast to [${t}].`);const a=o.map((m,y)=>m>1?y:-1).filter(m=>m>=0);if(a.length===0)return Cr(n);const c=m=>m.tile(n,o),h={x:n},d={shape:t,inputShape:i};return V.runKernelFunc(c,h,null,lg,d)}const jl=P({broadcastTo_:MF});function PF(e){const t=W(e,"x","ceil"),n={x:t};return V.runKernelFunc(s=>s.ceil(t),n,null,Zc)}const Xy=P({ceil_:PF});function zF(e,t,n){const s=W(e,"x","clipByValue");k(t<=n,()=>`Error in clip: min (${t}) must be less than or equal to max (${n}).`);const i={x:s},o={clipValueMin:t,clipValueMax:n};return V.runKernelFunc((a,c)=>{const h=a.clip(s,t,n);return c([s]),h},i,null,Qc,o)}const Yn=P({clipByValue_:zF});function GF(e){return Mt(e,0)}const ET=P({concat1d_:GF});function VF(e,t){return Mt(e,t)}const DT=P({concat2d_:VF});function YF(e,t){return Mt(e,t)}const kT=P({concat3d_:YF});function HF(e,t){return Mt(e,t)}const FT=P({concat4d_:HF});function qF(e,t,n,s,i="NHWC",o=[1,1],a){const c=W(e,"x","conv2d"),h=W(t,"filter","conv2d");let d=c,m=!1;c.rank===3&&(m=!0,d=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),k(d.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${d.rank}.`),k(h.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${h.rank}.`),a!=null&&k(Ut(s),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`);const y=i==="NHWC"?d.shape[3]:d.shape[1];k(y===h.shape[2],()=>`Error in conv2d: depth of input (${y}) must match input depth for filter ${h.shape[2]}.`),k(rn(n,o),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`);const b=(A,N)=>{const E=Yl(i),D=Ai(d.shape,h.shape,n,o,s,a,!1,E),F=A.conv2d(d,h,D);return N([d,h]),F},w={x:d,filter:h},L={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a},T=V.runKernelFunc(b,w,null,ug,L);return m?K(T,[T.shape[1],T.shape[2],T.shape[3]]):T}const ji=P({conv2d_:qF});function jF(e,t,n,s,i="NWC",o=1,a){const c=W(e,"x","conv1d"),h=W(t,"filter","conv1d");let d=c,m=!1;c.rank===2&&(m=!0,d=K(c,[1,c.shape[0],c.shape[1]])),k(d.rank===3,()=>`Error in conv1d: input must be rank 3, but got rank ${d.rank}.`),k(h.rank===3,()=>`Error in conv1d: filter must be rank 3, but got rank ${h.rank}.`),a!=null&&k(Ut(s),()=>`Error in conv1d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`),k(d.shape[2]===h.shape[1],()=>`Error in conv1d: depth of input (${d.shape[2]}) must match input depth for filter ${h.shape[1]}.`),k(rn(n,o),()=>`Error in conv1D: Either stride or dilation must be 1. Got stride ${n} and dilation '${o}'`),k(i==="NWC",()=>`Error in conv1d: got dataFormat of ${i} but only NWC is currently supported.`);const y=K(h,[1,h.shape[0],h.shape[1],h.shape[2]]),b=K(d,[d.shape[0],1,d.shape[1],d.shape[2]]),w=[1,n],L=[1,o],T="NHWC",A=ji(b,y,w,s,T,L,a);return m?K(A,[A.shape[2],A.shape[3]]):K(A,[A.shape[0],A.shape[2],A.shape[3]])}const Nd=P({conv1d_:jF});function KF(e,t,n,s,i,o="NHWC",a){k(e.length===t.rank,()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`);let c=e,h=t,d=!1;t.rank===3&&(d=!0,h=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]),c=[1,e[0],e[1],e[2]]),k(c.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${c.length}.`),k(h.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${h.rank}`),k(n.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${n.rank}`);const m=o==="NHWC"?c[3]:c[1],y=o==="NHWC"?h.shape[3]:h.shape[1];k(m===n.shape[2],()=>`Error in conv2dDerInput: depth of input (${m}) must match input depth for filter ${n.shape[2]}.`),k(y===n.shape[3],()=>`Error in conv2dDerInput: depth of output (${y}) must match output depth for filter ${n.shape[3]}.`),a!=null&&k(Ut(i),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${a} but got pad ${i}.`);const b=(A,N)=>{const E=1,D=Yl(o),F=Ai(c,n.shape,s,E,i,a,!1,D),_=A.conv2dDerInput(h,n,F);return N([h,n]),_},w={dy:h,filter:n},L={strides:s,pad:i,dataFormat:o,dimRoundingMode:a,inputShape:c},T=V.runKernelFunc(b,w,null,dg,L);return d?K(T,[T.shape[1],T.shape[2],T.shape[3]]):T}const Jy=P({conv2DBackpropInput_:KF});function XF(e,t,n,s,i,o){const a=W(e,"x","conv2dTranspose"),c=W(t,"filter","conv2dTranspose");return Jy(n,a,c,s,i,"NHWC",o)}const Cd=P({conv2dTranspose_:XF});function JF(e,t,n,s,i="NDHWC",o=[1,1,1]){const a=W(e,"x","conv3d"),c=W(t,"filter","conv3d");let h=a,d=!1;a.rank===4&&(d=!0,h=K(a,[1,a.shape[0],a.shape[1],a.shape[2],a.shape[3]])),k(h.rank===5,()=>`Error in conv3d: input must be rank 5, but got rank ${h.rank}.`),k(c.rank===5,()=>`Error in conv3d: filter must be rank 5, but got rank ${c.rank}.`),k(h.shape[4]===c.shape[3],()=>`Error in conv3d: depth of input (${h.shape[4]}) must match input depth for filter ${c.shape[3]}.`),k(rn(n,o),()=>`Error in conv3D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),k(i==="NDHWC",()=>`Error in conv3d: got dataFormat of ${i} but only NDHWC is currently supported.`);const m=(L,T)=>{const A=Vl(h.shape,c.shape,n,o,s),N=L.conv3d(h,c,A);return T([h,c]),N},y={x:h,filter:c},b={strides:n,pad:s,dataFormat:i,dilations:o},w=V.runKernelFunc(m,y,null,pg,b);return d?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const Zy=P({conv3d_:JF});function ZF(e,t,n,s,i){k(e.length===t.rank,()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`);let o=e,a=t,c=!1;t.rank===4&&(c=!0,a=K(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]]),o=[1,e[0],e[1],e[2],e[3]]);const h=o[4],d=a.shape[4];k(o.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${o.length}.`),k(a.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${a.rank}`),k(n.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${n.rank}`),k(h===n.shape[3],()=>`Error in conv3dDerInput: depth of input (${h}) must match input depth for filter ${n.shape[3]}.`),k(d===n.shape[4],()=>`Error in conv3dDerInput: depth of output (${d}) must match output depth for filter ${n.shape[4]}.`);const m=L=>{const T=1,A=Vl(o,n.shape,s,T,i);return L.conv3dDerInput(a,n,A)},y={dy:a},b={pad:i},w=V.runKernelFunc(m,y,null,sx,b);return c?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const _T=P({conv3DBackpropInput_:ZF});function QF(e,t,n,s,i){const o=W(e,"x","conv3dTranspose"),a=W(t,"filter","conv3dTranspose");return _T(n,o,a,s,i)}const e_=P({conv3dTranspose_:QF});function t_(e){const t=W(e,"x","cos"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.cos(t);return i([t]),o},n,null,da)}const Kl=P({cos_:t_});function n_(e){const t=W(e,"x","cosh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.cosh(t);return i([t]),o},n,null,el)}const Rd=P({cosh_:n_});function s_(e,t=0,n=!1,s=!1){const i=W(e,"x","cumsum"),o=(h,d)=>{const m=kn([t],i.rank);let y=i;m!=null&&(y=Me(i,m));const b=ws(1,i.rank)[0];let w=h.cumsum(y,b,n,s);if(d([i]),m!=null){const L=Ml(m);w=Me(w,L)}return w},a={x:i},c={axis:t,exclusive:n,reverse:s};return V.runKernelFunc(o,a,null,mg,c)}const Od=P({cumsum_:s_});function i_(e,t,n="NHWC"){const s=W(e,"x","depthToSpace"),i=n==="NHWC"?s.shape[1]:s.shape[2],o=n==="NHWC"?s.shape[2]:s.shape[3],a=n==="NHWC"?s.shape[3]:s.shape[1];k(i*t>=0,()=>`Negative dimension size caused by overflow when multiplying +Expected: ${o}.`)}}function SF(e,t){e().then(()=>t.fail(),()=>t())}function IF(e,t){const n=typeof t=="string"||typeof t=="number"||typeof t=="boolean"?[t]:t;return Yi(e)||Yi(e[0])||Yi(t)||Yi(t[0])?nb(e,n,(s,i)=>s==i):nb(e,t,(s,i)=>ib(s,i,0))}function sb(e,t,n){if(n==null&&(n=Xd()),!ib(e,t,n))throw new Error(`Numbers differ: actual === ${e}, expected === ${t}`)}function ib(e,t,n){return!isFinite(e)&&!isFinite(t)?!0:!(isNaN(e)||isNaN(t)||Math.abs(e-t)>n)}function xF(e,t,n){for(let s=0;sn)throw new Error(`Value out of range:${e[s]} low: ${t}, high: ${n}`)}function TF(e,t){expect(new Float32Array(e)).toEqual(new Float32Array(t))}var AF=Object.freeze({__proto__:null,TEST_EPSILON_FLOAT16:PT,expectArraysClose:LF,testEpsilon:Xd,expectPromiseToFail:SF,expectArraysEqual:IF,expectNumbersClose:sb,expectValuesInRange:xF,expectArrayBuffersEqual:TF});const zT="2.7.0";function vF(){oe().set("PROD",!0)}function NF(){oe().set("DEBUG",!0)}function CF(){oe().set("DEPRECATION_WARNINGS_ENABLED",!1),console.warn("TensorFlow.js deprecation warnings have been disabled.")}function un(e){oe().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(e+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}ck(un);function RF(){G.disposeVariables()}function Ki(){return G}function Jd(){return G.memory()}function OF(e){return G.profile(e)}function Q(e,t){return G.tidy(e,t)}function He(e){const t=Hi(e);t.forEach(n=>n.dispose())}function bn(e){return G.keep(e)}function EF(e){return G.time(e)}function VT(e){return G.setBackend(e)}function DF(){return G.ready()}function kF(){return G.backendName}function FF(e){G.removeBackend(e)}function _F(e){return G.findBackend(e)}function WF(e){return G.findBackendFactory(e)}function rb(e,t,n=1){return G.registerBackend(e,t,n)}function GT(){return G.backend}function $F(e,t){oe().setPlatform(e,t)}function UF(e,t){let n=W(e,"a","add"),s=W(t,"b","add");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.add(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,wo)}const be=z({add_:UF});function BF(e,t){let n=W(e,"a","floorDiv"),s=W(t,"b","floorDiv");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.floorDiv(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,sy)}const Zd=z({floorDiv_:BF});function MF(e,t){let n=W(e,"a","div"),s=W(t,"b","div");if([n,s]=Gt(n,s),n.dtype==="int32"&&s.dtype==="int32")return Zd(n,s);const i=(c,h)=>{const d=c.realDivide(n,s);return h([n,s]),d},o={a:n,b:s},a={};return G.runKernelFunc(i,o,null,xa,a)}const We=z({div_:MF});function PF(e,t){let n=W(e,"a","mul"),s=W(t,"b","mul");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.multiply(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,Ta)}const X=z({mul_:PF});function zF(e){const t=W(e,"x","abs"),n={x:t};return G.runKernelFunc((s,i)=>(i([t]),t.dtype==="complex64"?s.complexAbs(t):s.abs(t)),n,null,td)}const dn=z({abs_:zF});function VF(e){const t=W(e,"x","acos"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.acos(t);return i([t]),o},n,null,ol)}const ob=z({acos_:VF});function GF(e){const t=W(e,"x","acosh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.acosh(t);return i([t]),o},n,null,al)}const ab=z({acosh_:GF});function YF(e){A(Array.isArray(e),()=>"The argument passed to tf.addN() must be a list of tensors"),A(e.length>=1,()=>`Must pass at least one tensor to tf.addN(), but got ${e.length}`);const t=e.map((o,a)=>W(o,`tensors${a}`,"addN")),n=t[0];t.forEach(o=>{if(o.dtype!==n.dtype)throw new Error("All tensors passed to tf.addN() must have the same dtype")}),t.forEach(o=>{if(!ae(o.shape,n.shape))throw new Error("All tensors passed to tf.addN() must have the same shape")});const s=(o,a)=>{const c=o.addN(t);return a(t),c},i=t;return G.runKernelFunc(s,i,null,Gg)}const YT=z({addN_:YF});function cb(e,t){for(let n=0;ne[o]);return[n,i]}function vn(e,t){const n=t.map(s=>1);return HT(e,n,t)}function Kn(e,t,n){A(cb(t,n),()=>`${e} supports only inner-most axes for now. Got axes ${t} and rank-${n} input.`)}function Xn(e,t){if(cb(e,t))return null;const n=[];for(let s=0;sn.push(s)),n}function sh(e){return e.map((t,n)=>[n,t]).sort((t,n)=>t[1]-n[1]).map(t=>t[0])}function as(e,t){const n=[];for(let s=t-e;s{const h=qe(t,s.shape);let d=h;const m=Xn(d,s.rank);m!=null&&(s=Ye(s,m),d=as(d.length,s.rank));const f=c.all(s,d);if(n){const b=vn(f.shape,h);return K(f,b)}return f},o={x:s},a={axis:t,keepDims:n};return G.runKernelFunc(i,o,null,Rx,a)}const Qd=z({all_:HF});function qF(e,t=null,n=!1){let s=W(e,"x","any","bool");const i=c=>{const h=qe(t,s.shape);let d=h;const m=Xn(d,s.rank);m!=null&&(s=Ye(s,m),d=as(d.length,s.rank));const f=c.any(s,d);if(n){const b=vn(f.shape,h);return K(f,b)}return f},o={x:s},a={axis:t,keepDims:n};return G.runKernelFunc(i,o,null,Ox,a)}const ih=z({any_:qF});function jF(e,t=0){let n=W(e,"x","argMax");const s=(a,c)=>{c([n]);let h=qe(t,n.shape);const d=Xn(h,n.rank);return d!=null&&(n=Ye(n,d),h=as(h.length,n.rank)),a.argMax(n,h[0])},i={x:n},o={axis:t};return G.runKernelFunc(s,i,null,Yg,o)}const rh=z({argMax_:jF});function KF(e,t=0){let n=W(e,"x","argMin");const s=(a,c)=>{c([n]),t==null&&(t=0);let h=qe(t,n.shape);const d=Xn(h,n.rank);return d!=null&&(n=Ye(n,d),h=as(h.length,n.rank)),a.argMin(n,h[0])},i={x:n},o={axis:t};return G.runKernelFunc(s,i,null,Hg,o)}const lb=z({argMin_:KF});function XF(e){const t=W(e,"x","asin"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.asin(t);return i([t]),o},n,null,cl)}const hb=z({asin_:XF});function JF(e){const t=W(e,"x","asinh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.asinh(t);return i([t]),o},n,null,ll)}const ub=z({asinh_:JF});function ZF(e){const t=W(e,"x","atan"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.atan(t);return i([t]),o},n,null,hl)}const db=z({atan_:ZF});function QF(e,t){let n=W(e,"a","atan2"),s=W(t,"b","atan2");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.atan2(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,nd)}const pb=z({atan2_:QF});function e_(e){const t=W(e,"x","atanh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.atanh(t);return i([t]),o},n,null,ul)}const mb=z({atanh_:e_});function ep(e,t,n,s,i="NHWC",o){const a=e[3],c=[...t,a],h=Wr(i);return kn(e,c,n,o,s,null,null,h)}function Un(e,t,n,s,i,o,a="channelsLast"){const[c,h]=tp(t);let d;if(a==="channelsLast")d=[c,h,e[3],e[3]];else if(a==="channelsFirst")d=[c,h,e[1],e[1]];else throw new Error(`Unknown dataFormat ${a}`);return kn(e,d,n,s,i,o,!1,a)}function oh(e,t,n,s,i,o,a="NDHWC"){const[c,h,d]=gb(t);let m,f;if(a==="NDHWC")f="channelsLast",m=[c,h,d,e[4],e[4]];else if(a==="NCDHW")f="channelsFirst",m=[c,h,d,e[1],e[1]];else throw new Error(`Unknown dataFormat ${a}`);return Fr(e,m,n,s,i,!1,f,o)}function kn(e,t,n,s,i,o,a=!1,c="channelsLast"){let[h,d,m,f]=[-1,-1,-1,-1];if(c==="channelsLast")[h,d,m,f]=e;else if(c==="channelsFirst")[h,f,d,m]=e;else throw new Error(`Unknown dataFormat ${c}`);const[b,w,,L]=t,[x,v]=tp(n),[N,O]=tp(s),E=Wa(b,N),k=Wa(w,O),{padInfo:F,outHeight:U,outWidth:$}=s_(i,d,m,x,v,E,k,o,c),Y=a?L*f:L;let j;return c==="channelsFirst"?j=[h,Y,U,$]:c==="channelsLast"&&(j=[h,U,$,Y]),{batchSize:h,dataFormat:c,inHeight:d,inWidth:m,inChannels:f,outHeight:U,outWidth:$,outChannels:Y,padInfo:F,strideHeight:x,strideWidth:v,filterHeight:b,filterWidth:w,effectiveFilterHeight:E,effectiveFilterWidth:k,dilationHeight:N,dilationWidth:O,inShape:e,outShape:j,filterShape:t}}function Fr(e,t,n,s,i,o=!1,a="channelsLast",c){let[h,d,m,f,b]=[-1,-1,-1,-1,-1];if(a==="channelsLast")[h,d,m,f,b]=e;else if(a==="channelsFirst")[h,b,d,m,f]=e;else throw new Error(`Unknown dataFormat ${a}`);const[w,L,x,,v]=t,[N,O,E]=gb(n),[k,F,U]=gb(s),$=Wa(w,k),Y=Wa(L,F),j=Wa(x,U),{padInfo:Z,outDepth:ie,outHeight:de,outWidth:he}=i_(i,d,m,f,N,O,E,$,Y,j,c),ue=o?v*b:v;let me;return a==="channelsFirst"?me=[h,ue,ie,de,he]:a==="channelsLast"&&(me=[h,ie,de,he,ue]),{batchSize:h,dataFormat:a,inDepth:d,inHeight:m,inWidth:f,inChannels:b,outDepth:ie,outHeight:de,outWidth:he,outChannels:ue,padInfo:Z,strideDepth:N,strideHeight:O,strideWidth:E,filterDepth:w,filterHeight:L,filterWidth:x,effectiveFilterDepth:$,effectiveFilterHeight:Y,effectiveFilterWidth:j,dilationDepth:k,dilationHeight:F,dilationWidth:U,inShape:e,outShape:me,filterShape:t}}function t_(e,t,n,s,i){s==null&&(s=fb(e,t,n));const o=e[0],a=e[1],c=vo((o-t+2*s)/n+1,i);A(Le(c),()=>`The output # of rows (${c}) must be an integer. Change the stride and/or zero pad parameters`);const h=vo((a-t+2*s)/n+1,i);return A(Le(h),()=>`The output # of columns (${h}) must be an integer. Change the stride and/or zero pad parameters`),[c,h]}function n_(e,t,n,s,i,o){i==null&&(i=fb(e,t,s));const a=e[0],c=e[1],h=e[2],d=vo((a-t+2*i)/s+1,o);A(Le(d),()=>`The output # of depths (${d}) must be an integer. Change the stride and/or zero pad parameters`);const m=vo((c-t+2*i)/s+1,o);A(Le(m),()=>`The output # of rows (${m}) must be an integer. Change the stride and/or zero pad parameters`);const f=vo((h-t+2*i)/s+1,o);return A(Le(f),()=>`The output # of columns (${f}) must be an integer. Change the stride and/or zero pad parameters`),[d,m,f,n]}function fb(e,t,n,s=1){const i=Wa(t,s);return Math.floor((e[0]*(n-1)-n+i)/2)}function tp(e){return typeof e=="number"?[e,e,e]:e.length===2?[e[0],e[1],1]:e}function gb(e){return typeof e=="number"?[e,e,e]:e}function Wa(e,t){return t<=1?e:e+(e-1)*(t-1)}function s_(e,t,n,s,i,o,a,c,h){let d,m,f;if(typeof e=="number"){const b=e===0?"VALID":"NUMBER";d={top:e,bottom:e,left:e,right:e,type:b};const w=t_([t,n],o,s,e,c);m=w[0],f=w[1]}else if(e==="same"){m=Math.ceil(t/s),f=Math.ceil(n/i);const b=Math.max(0,(m-1)*s+o-t),w=Math.max(0,(f-1)*i+a-n),L=Math.floor(b/2),x=b-L,v=Math.floor(w/2),N=w-v;d={top:L,bottom:x,left:v,right:N,type:"SAME"}}else if(e==="valid")d={top:0,bottom:0,left:0,right:0,type:"VALID"},m=Math.ceil((t-o+1)/s),f=Math.ceil((n-a+1)/i);else if(typeof e=="object"){const b=h==="channelsLast"?e[1][0]:e[2][0],w=h==="channelsLast"?e[1][1]:e[2][1],L=h==="channelsLast"?e[2][0]:e[3][0],x=h==="channelsLast"?e[2][1]:e[3][1],v=b===0&&w===0&&L===0&&x===0?"VALID":"EXPLICIT";d={top:b,bottom:w,left:L,right:x,type:v},m=vo((t-o+b+w)/s+1,c),f=vo((n-a+L+x)/i+1,c)}else throw Error(`Unknown padding parameter: ${e}`);return{padInfo:d,outHeight:m,outWidth:f}}function i_(e,t,n,s,i,o,a,c,h,d,m){let f,b,w,L;if(typeof e=="number"){const x=e===0?"VALID":"NUMBER";f={top:e,bottom:e,left:e,right:e,front:e,back:e,type:x};const v=n_([t,n,s,1],c,1,i,e,m);b=v[0],w=v[1],L=v[2]}else if(e==="same"){b=Math.ceil(t/i),w=Math.ceil(n/o),L=Math.ceil(s/a);const x=(b-1)*i+c-t,v=(w-1)*o+h-n,N=(L-1)*a+d-s,O=Math.floor(x/2),E=x-O,k=Math.floor(v/2),F=v-k,U=Math.floor(N/2),$=N-U;f={top:k,bottom:F,left:U,right:$,front:O,back:E,type:"SAME"}}else if(e==="valid")f={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},b=Math.ceil((t-c+1)/i),w=Math.ceil((n-h+1)/o),L=Math.ceil((s-d+1)/a);else throw Error(`Unknown padding parameter: ${e}`);return{padInfo:f,outDepth:b,outHeight:w,outWidth:L}}function vo(e,t){if(!t)return e;switch(t){case"round":return Math.round(e);case"ceil":return Math.ceil(e);case"floor":return Math.floor(e);default:throw new Error(`Unknown roundingMode ${t}`)}}function _r(e){const[t,n,s]=tp(e);return t===1&&n===1&&s===1}function cn(e,t){return _r(e)||_r(t)}function Wr(e){if(e==="NHWC")return"channelsLast";if(e==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${e}`)}function r_(e,t,n,s,i){const o=W(e,"x","avgPool","float32"),a=1;A(cn(n,a),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`);let c=o,h=!1;o.rank===3&&(h=!0,c=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),A(c.rank===4,()=>`Error in avgPool: x must be rank 4 but got rank ${c.rank}.`),i!=null&&A(Le(s),()=>`Error in avgPool: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const d=(w,L)=>{const x=Un(c.shape,t,n,1,s,i);return L([c]),x.filterWidth===1&&x.filterHeight===1&&ae(x.inShape,x.outShape)?c.clone():w.avgPool(c,x)},m={x:c},f={filterSize:t,strides:n,pad:s,dimRoundingMode:i};let b=G.runKernelFunc(d,m,null,dl,f);return b=Ae(b,o.dtype),h?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const ah=z({avgPool_:r_});function o_(e,t,n,s,i,o="NDHWC",a){a==null?a=[1,1,1]:un("dilations is deprecated, this field will be gone in v3.0.0.");const c=W(e,"x","avgPool3d","float32");let h=c,d=!1;c.rank===4&&(d=!0,h=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]])),A(h.rank===5,()=>`Error in avgPool3d: x must be rank 5 but got rank ${h.rank}.`),A(o==="NDHWC",()=>`Error in avgPool3d: Only NDHWC is currently supported, but got dataFormat of ${o}`),A(cn(n,a),()=>`Error in avgPool3d: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&A(Le(s),()=>`Error in avgPool3d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const m=(L,x)=>{a==null&&(a=[1,1,1]);const v=oh(h.shape,t,n,a,s,i,o);return x([h]),L.avgPool3d(h,v)},f={x:h},b={filterSize:t,strides:n,pad:s,dimRoundingMode:i,dataFormat:o,dilations:a};let w=G.runKernelFunc(m,f,null,qg,b);return w=Ae(w,h.dtype),d?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const yb=z({avgPool3d_:o_});function np(e,t){const n=e[0].length;e.forEach((i,o)=>{A(i.length===n,()=>`Error in concat${n}D: rank of tensors[${o}] must be the same as the rank of the rest (${n})`)}),A(t>=0&&t`Error in concat${n}D: axis must be between 0 and ${n-1}.`);const s=e[0];e.forEach((i,o)=>{for(let a=0;a`Error in concat${n}D: Shape of tensors[${o}] (${i}) does not match the shape of the rest (${s}) along the non-concatenated axis ${o}.`)})}function Xi(e,t){const n=e[0].slice();for(let s=1;s=1,()=>"Pass at least one tensor to concat");let n=th(e,"tensors","concat");n[0].dtype==="complex64"&&n.forEach(a=>{if(a.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor + with dtype ${a.dtype}. `)});const s=(a,c)=>{const h=qe(t,n[0].shape)[0],d=Xi(n.map(b=>b.shape),h);if(P(d)===0)return sn([],d);if(n=n.filter(b=>b.size>0),n.length===1)return n[0];const m=n.map(b=>b.shape);np(m,h);const f=a.concat(n,h);return c(n),f},i=n,o={axis:t};return G.runKernelFunc(s,i,null,fl,o)}const Yt=z({concat_:a_});function c_(e){const t=W(e,"x","sigmoid"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.sigmoid(t);return i([o]),o},n,null,zl)}const Ti=z({sigmoid_:c_});function l_(e,t,n){const s=W(e,"x","slice");if(s.rank===0)throw new Error("Slicing scalar is not possible");const i=(c,h)=>{const[d,m]=Kd(s,t,n);return Qy(s,d,m),h([s]),c.slice(s,d,m)},o={x:s},a={begin:t,size:n};return G.runKernelFunc(i,o,null,Ad,a)}const tt=z({slice_:l_});function h_(e){const t=W(e,"x","tanh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.tanh(t);return i([o]),o},n,null,Yl)}const $a=z({tanh_:h_});function u_(e,t,n,s,i,o){const a=W(e,"forgetBias","basicLSTMCell"),c=W(t,"lstmKernel","basicLSTMCell"),h=W(n,"lstmBias","basicLSTMCell"),d=W(s,"data","basicLSTMCell"),m=W(i,"c","basicLSTMCell"),f=W(o,"h","basicLSTMCell"),b=Yt([d,f],1),w=ct(b,c),L=be(w,h),x=L.shape[0],v=L.shape[1]/4,N=[x,v],O=tt(L,[0,0],N),E=tt(L,[0,v],N),k=tt(L,[0,v*2],N),F=tt(L,[0,v*3],N),U=be(X(Ti(O),$a(E)),X(m,Ti(be(a,k)))),$=X($a(U),Ti(F));return[U,$]}const d_=z({basicLSTMCell_:u_});function p_(e,t,n){const s=W(e,"x","batchToSpaceND"),i=t.reduce((h,d)=>h*d);A(s.rank>=1+t.length,()=>`input rank is ${s.rank} but should be > than blockShape.length ${t.length}`),A(n.length===t.length,()=>`crops.length is ${n.length} but should be equal to blockShape.length ${t.length}`),A(s.shape[0]%i===0,()=>`input tensor batch is ${s.shape[0]} but is not divisible by the product of the elements of blockShape ${t.join(" * ")} === ${i}`);const o=h=>h.batchToSpaceND(s,t,n),a={x:s},c={blockShape:t,crops:n};return G.runKernelFunc(o,a,null,jg,c)}const ch=z({batchToSpaceND_:p_});function m_(e){let t;return e.rank===0||e.rank===1?t=K(e,[1,1,1,e.size]):e.rank===2?t=K(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?t=K(e,[1,e.shape[0],e.shape[1],e.shape[2]]):t=e,t}function f_(e,t,n,s,i,o){o==null&&(o=.001);const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;s!=null&&(m=W(s,"offset","batchNorm")),A(c.rank===h.rank,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),A(m==null||c.rank===m.rank,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),A(d==null||c.rank===d.rank,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");const f=m_(a),b=(v,N)=>(N([f,c,h,d]),v.batchNorm(f,sp(c),sp(h),sp(m),sp(d),o)),w={x:f,scale:d,offset:m,mean:c,variance:h},L={varianceEpsilon:o},x=G.runKernelFunc(b,w,null,Il,L);return K(x,a.shape)}function sp(e){return e==null?null:e.rank===0?K(e,[e.size]):e.rank===1?e:e.rank===2?K(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?K(e,[1,e.shape[0],e.shape[1],e.shape[2]]):e}const No=z({batchNorm_:f_});function g_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),A(a.rank===2,()=>`Error in batchNorm2D: x must be rank 2 but got rank ${a.rank}.`),A(c.rank===2||c.rank===1,()=>`Error in batchNorm2D: mean must be rank 2 or rank 1 but got rank ${c.rank}.`),A(h.rank===2||h.rank===1,()=>`Error in batchNorm2D: variance must be rank 2 or rank 1 but got rank ${h.rank}.`),d!=null&&A(d.rank===2||d.rank===1,()=>`Error in batchNorm2D: scale must be rank 2 or rank 1 but got rank ${d.rank}.`),m!=null&&A(m.rank===2||m.rank===1,()=>`Error in batchNorm2D: offset must be rank 2 or rank 1 but got rank ${m.rank}.`),No(a,c,h,m,d,o)}const qT=z({batchNorm2d_:g_});function y_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),A(a.rank===3,()=>`Error in batchNorm3D: x must be rank 3 but got rank ${a.rank}.`),A(c.rank===3||c.rank===1,()=>`Error in batchNorm3D: mean must be rank 3 or rank 1 but got rank ${c.rank}.`),A(h.rank===3||h.rank===1,()=>`Error in batchNorm3D: variance must be rank 3 or rank 1 but got rank ${h.rank}.`),d!=null&&A(d.rank===3||d.rank===1,()=>`Error in batchNorm3D: scale must be rank 3 or rank 1 but got rank ${d.rank}.`),m!=null&&A(m.rank===3||m.rank===1,()=>`Error in batchNorm3D: offset must be rank 3 or rank 1 but got rank ${m.rank}.`),No(a,c,h,m,d,o)}const jT=z({batchNorm3d_:y_});function b_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),A(a.rank===4,()=>`Error in batchNorm4D: x must be rank 4 but got rank ${a.rank}.`),A(c.rank===4||c.rank===1,()=>`Error in batchNorm4D: mean must be rank 4 or rank 1 but got rank ${c.rank}.`),A(h.rank===4||h.rank===1,()=>`Error in batchNorm4D: variance must be rank 4 or rank 1 but got rank ${h.rank}.`),d!=null&&A(d.rank===4||d.rank===1,()=>`Error in batchNorm4D: scale must be rank 4 or rank 1 but got rank ${d.rank}.`),m!=null&&A(m.rank===4||m.rank===1,()=>`Error in batchNorm4D: offset must be rank 4 or rank 1 but got rank ${m.rank}.`),No(a,c,h,m,d,o)}const KT=z({batchNorm4d_:b_});function w_(e,t){let n=W(e,"broadcastTo","x");const s=n.shape;if(t.some(m=>!(m>0)||m%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${t}].`);if(t.lengthn.rank){const m=n.shape.slice();for(;m.length=0;m--)if(i[m]===t[m])o[m]=1;else if(n.shape[m]!==1)throw new Error(`broadcastTo(): [${s}] cannot be broadcast to [${t}].`);const a=o.map((m,f)=>m>1?f:-1).filter(m=>m>=0);if(a.length===0)return kr(n);const c=m=>m.tile(n,o),h={x:n},d={shape:t,inputShape:i};return G.runKernelFunc(c,h,null,Kg,d)}const lh=z({broadcastTo_:w_});function L_(e){const t=W(e,"x","ceil"),n={x:t};return G.runKernelFunc(s=>s.ceil(t),n,null,pl)}const bb=z({ceil_:L_});function S_(e,t,n){const s=W(e,"x","clipByValue");A(t<=n,()=>`Error in clip: min (${t}) must be less than or equal to max (${n}).`);const i={x:s},o={clipValueMin:t,clipValueMax:n};return G.runKernelFunc((a,c)=>{const h=a.clip(s,t,n);return c([s]),h},i,null,ml,o)}const Jn=z({clipByValue_:S_});function I_(e){return Yt(e,0)}const XT=z({concat1d_:I_});function x_(e,t){return Yt(e,t)}const JT=z({concat2d_:x_});function T_(e,t){return Yt(e,t)}const ZT=z({concat3d_:T_});function A_(e,t){return Yt(e,t)}const QT=z({concat4d_:A_});function v_(e,t,n,s,i="NHWC",o=[1,1],a){const c=W(e,"x","conv2d"),h=W(t,"filter","conv2d");let d=c,m=!1;c.rank===3&&(m=!0,d=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),A(d.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${d.rank}.`),A(h.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${h.rank}.`),a!=null&&A(Le(s),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`);const f=i==="NHWC"?d.shape[3]:d.shape[1];A(f===h.shape[2],()=>`Error in conv2d: depth of input (${f}) must match input depth for filter ${h.shape[2]}.`),A(cn(n,o),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`);const b=(v,N)=>{const O=Wr(i),E=kn(d.shape,h.shape,n,o,s,a,!1,O),k=v.conv2d(d,h,E);return N([d,h]),k},w={x:d,filter:h},L={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a},x=G.runKernelFunc(b,w,null,od,L);return m?K(x,[x.shape[1],x.shape[2],x.shape[3]]):x}const Ji=z({conv2d_:v_});function N_(e,t,n,s,i="NWC",o=1,a){const c=W(e,"x","conv1d"),h=W(t,"filter","conv1d");let d=c,m=!1;c.rank===2&&(m=!0,d=K(c,[1,c.shape[0],c.shape[1]])),A(d.rank===3,()=>`Error in conv1d: input must be rank 3, but got rank ${d.rank}.`),A(h.rank===3,()=>`Error in conv1d: filter must be rank 3, but got rank ${h.rank}.`),a!=null&&A(Le(s),()=>`Error in conv1d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`),A(d.shape[2]===h.shape[1],()=>`Error in conv1d: depth of input (${d.shape[2]}) must match input depth for filter ${h.shape[1]}.`),A(cn(n,o),()=>`Error in conv1D: Either stride or dilation must be 1. Got stride ${n} and dilation '${o}'`),A(i==="NWC",()=>`Error in conv1d: got dataFormat of ${i} but only NWC is currently supported.`);const f=K(h,[1,h.shape[0],h.shape[1],h.shape[2]]),b=K(d,[d.shape[0],1,d.shape[1],d.shape[2]]),w=[1,n],L=[1,o],x="NHWC",v=Ji(b,f,w,s,x,L,a);return m?K(v,[v.shape[2],v.shape[3]]):K(v,[v.shape[0],v.shape[2],v.shape[3]])}const ip=z({conv1d_:N_});function C_(e,t,n,s,i,o="NHWC",a){A(e.length===t.rank,()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`);let c=e,h=t,d=!1;t.rank===3&&(d=!0,h=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]),c=[1,e[0],e[1],e[2]]),A(c.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${c.length}.`),A(h.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${h.rank}`),A(n.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${n.rank}`);const m=o==="NHWC"?c[3]:c[1],f=o==="NHWC"?h.shape[3]:h.shape[1];A(m===n.shape[2],()=>`Error in conv2dDerInput: depth of input (${m}) must match input depth for filter ${n.shape[2]}.`),A(f===n.shape[3],()=>`Error in conv2dDerInput: depth of output (${f}) must match output depth for filter ${n.shape[3]}.`),a!=null&&A(Le(i),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${a} but got pad ${i}.`);const b=(v,N)=>{const O=1,E=Wr(o),k=kn(c,n.shape,s,O,i,a,!1,E),F=v.conv2dDerInput(h,n,k);return N([h,n]),F},w={dy:h,filter:n},L={strides:s,pad:i,dataFormat:o,dimRoundingMode:a,inputShape:c},x=G.runKernelFunc(b,w,null,ad,L);return d?K(x,[x.shape[1],x.shape[2],x.shape[3]]):x}const wb=z({conv2DBackpropInput_:C_});function R_(e,t,n,s,i,o){const a=W(e,"x","conv2dTranspose"),c=W(t,"filter","conv2dTranspose");return wb(n,a,c,s,i,"NHWC",o)}const rp=z({conv2dTranspose_:R_});function O_(e,t,n,s,i="NDHWC",o=[1,1,1]){const a=W(e,"x","conv3d"),c=W(t,"filter","conv3d");let h=a,d=!1;a.rank===4&&(d=!0,h=K(a,[1,a.shape[0],a.shape[1],a.shape[2],a.shape[3]])),A(h.rank===5,()=>`Error in conv3d: input must be rank 5, but got rank ${h.rank}.`),A(c.rank===5,()=>`Error in conv3d: filter must be rank 5, but got rank ${c.rank}.`),A(h.shape[4]===c.shape[3],()=>`Error in conv3d: depth of input (${h.shape[4]}) must match input depth for filter ${c.shape[3]}.`),A(cn(n,o),()=>`Error in conv3D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),A(i==="NDHWC",()=>`Error in conv3d: got dataFormat of ${i} but only NDHWC is currently supported.`);const m=(L,x)=>{const v=Fr(h.shape,c.shape,n,o,s),N=L.conv3d(h,c,v);return x([h,c]),N},f={x:h,filter:c},b={strides:n,pad:s,dataFormat:i,dilations:o},w=G.runKernelFunc(m,f,null,cd,b);return d?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const Lb=z({conv3d_:O_});function E_(e,t,n,s,i){A(e.length===t.rank,()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`);let o=e,a=t,c=!1;t.rank===4&&(c=!0,a=K(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]]),o=[1,e[0],e[1],e[2],e[3]]);const h=o[4],d=a.shape[4];A(o.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${o.length}.`),A(a.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${a.rank}`),A(n.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${n.rank}`),A(h===n.shape[3],()=>`Error in conv3dDerInput: depth of input (${h}) must match input depth for filter ${n.shape[3]}.`),A(d===n.shape[4],()=>`Error in conv3dDerInput: depth of output (${d}) must match output depth for filter ${n.shape[4]}.`);const m=L=>{const x=1,v=Fr(o,n.shape,s,x,i);return L.conv3dDerInput(a,n,v)},f={dy:a,filter:n},b={pad:i,strides:s,inputShape:o},w=G.runKernelFunc(m,f,null,Zg,b);return c?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const eA=z({conv3DBackpropInput_:E_});function D_(e,t,n,s,i){const o=W(e,"x","conv3dTranspose"),a=W(t,"filter","conv3dTranspose");return eA(n,o,a,s,i)}const k_=z({conv3dTranspose_:D_});function F_(e){const t=W(e,"x","cos"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.cos(t);return i([t]),o},n,null,Ia)}const hh=z({cos_:F_});function __(e){const t=W(e,"x","cosh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.cosh(t);return i([t]),o},n,null,gl)}const op=z({cosh_:__});function W_(e,t=0,n=!1,s=!1){const i=W(e,"x","cumsum"),o=(h,d)=>{const m=Xn([t],i.rank);let f=i;m!=null&&(f=Ye(i,m));const b=as(1,i.rank)[0];let w=h.cumsum(f,b,n,s);if(d([i]),m!=null){const L=sh(m);w=Ye(w,L)}return w},a={x:i},c={axis:t,exclusive:n,reverse:s};return G.runKernelFunc(o,a,null,Qg,c)}const ap=z({cumsum_:W_});function $_(e,t,n="NHWC"){const s=W(e,"x","depthToSpace"),i=n==="NHWC"?s.shape[1]:s.shape[2],o=n==="NHWC"?s.shape[2]:s.shape[3],a=n==="NHWC"?s.shape[3]:s.shape[1];A(i*t>=0,()=>`Negative dimension size caused by overflow when multiplying ${i} and ${t} for depthToSpace with input shape - ${s.shape}`),k(o*t>=0,()=>`Negative dimension size caused by overflow when multiplying + ${s.shape}`),A(o*t>=0,()=>`Negative dimension size caused by overflow when multiplying ${o} and ${t} for depthToSpace with input shape - ${s.shape}`),k(a%(t*t)===0,()=>`Dimension size must be evenly divisible by ${t*t} but is ${a} for depthToSpace with input shape ${s.shape}`);const c=m=>m.depthToSpace(s,t,n),h={x:s},d={blockSize:t,dataFormat:n};return V.runKernelFunc(c,h,null,rx,d)}const Qy=P({depthToSpace_:i_});function r_(e,t,n,s,i="NHWC",o=[1,1],a){const c=W(e,"x","depthwiseConv2d"),h=W(t,"filter","depthwiseConv2d");let d=c,m=!1;c.rank===3&&(m=!0,d=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),k(d.rank===4,()=>`Error in depthwiseConv2d: input must be rank 4, but got rank ${d.rank}.`),k(h.rank===4,()=>`Error in depthwiseConv2d: filter must be rank 4, but got rank ${h.rank}.`),k(d.shape[3]===h.shape[2],()=>`Error in depthwiseConv2d: number of input channels (${d.shape[3]}) must match the inChannels dimension in filter ${h.shape[2]}.`),a!=null&&k(Ut(s),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`);const y=(T,A)=>{o==null&&(o=[1,1]),k(rn(n,o),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`);const N=Ai(d.shape,h.shape,n,o,s,a,!0),E=T.depthwiseConv2D(d,h,N);return A([d,h]),E},b={x:d,filter:h},w={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a},L=V.runKernelFunc(y,b,null,fg,w);return m?K(L,[L.shape[1],L.shape[2],L.shape[3]]):L}const wo=P({depthwiseConv2d_:r_});function o_(e){const t=W(e,"x","diag"),n=i=>{const o=K(t,[t.size]),a=i.diag(o),c=[...e.shape,...e.shape];return K(a,c)},s={x:t};return V.runKernelFunc(n,s,null,cx)}const a_=P({diag_:o_});function c_(e,t,n,s,i=[1,1],o="NHWC"){const a=W(e,"x","dilation2d"),c=W(t,"filter","dilation2d");k(a.rank===3||a.rank===4,()=>`Error in dilation2d: input must be rank 3 or 4, but got rank ${a.rank}.`),k(c.rank===3,()=>`Error in dilation2d: filter must be rank 3, but got rank ${c.rank}.`),k(o==="NHWC",()=>`Error in dilation2d: Only NHWC is currently supported, but got dataFormat of ${o}`);let h=a,d=!1;a.rank===3&&(h=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]),d=!0);const m={x:h,filter:c},y={strides:n,pad:s,dilations:i},b=V.runKernel(Uu,m,y);return d?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const eb=P({dilation2d_:c_});function Lo(e,t){const n=e.length,s=[];for(let i=0;i1&&a===1&&s.unshift(o)}return s}function on(e,t){const n=[];for(let s=0;s1)&&n.unshift(o)}return n}function tt(e,t){const n=[],s=Math.max(e.length,t.length);for(let i=0;ia.equal(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,hx)}const Zs=P({equal_:l_});function h_(e,t,n){const s=W(t,"a","where"),i=W(n,"b","where"),o=W(e,"condition","where","bool"),a=tt(s.shape,i.shape),c=jl(s,a),h=jl(i,a);o.rank===1&&k(o.shape[0]===s.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),o.rank!==1&&dt(o.shape,h.shape,"Error in where: ");const d=(y,b)=>{const w=y.select(o,c,h);return b([o]),w},m={condition:o,t:c,e:h};return V.runKernelFunc(d,m,null,Pg)}const _n=P({where_:h_});function u_(e){const t=W(e,"x","zerosLike"),n={x:t};return V.runKernelFunc(s=>s.zerosLike(t),n,null,jg)}const Qe=P({zerosLike_:u_});function d_(e,t){let n=W(e,"a","div"),s=W(t,"b","div");[n,s]=Bt(n,s);const i=_e(n,s),o=Qe(i),a=Zs(s,o);return _n(a,o,i)}const tb=P({divNoNan_:d_});function p_(e,t){const n=W(e,"t1","dot"),s=W(t,"t2","dot");k((n.rank===1||n.rank===2)&&(s.rank===1||s.rank===2),()=>`Error in dot: inputs must all be rank 1 or 2, but got ranks ${n.rank} and ${s.rank}.`);const i=n.rank===1?n.size:n.shape[1],o=s.rank===1?s.size:s.shape[0];if(k(i===o,()=>`Error in dot: inner dimensions of inputs must match, but got ${i} and ${o}.`),n.rank===1&&s.rank===1){const a=K(n,[1,-1]),c=K(s,[-1,1]),h=at(a,c);return K(h,[])}else if(n.rank===1&&s.rank===2){const a=K(n,[1,-1]),c=K(s,[s.shape[0],s.shape[1]]),h=at(a,c);return K(h,[h.size])}else if(n.rank===2&&s.rank===1){const a=K(s,[-1,1]),c=at(n,a);return K(c,[c.size])}else{const a=K(s,[s.shape[0],s.shape[1]]),c=at(n,a);return c}}const WT=P({dot_:p_});function m_(e){const t=W(e,"x","elu"),n=(i,o)=>{const a=i.elu(t);return o([a]),a},s={x:t};return V.runKernelFunc(n,s,null,tl)}const So=P({elu_:m_});function f_(e){let t=W(e,"x","erf");k(t.dtype==="int32"||t.dtype==="float32",()=>"Input dtype must be `int32` or `float32`."),t.dtype==="int32"&&(t=ve(t,"float32"));const n={x:t};return V.runKernelFunc((s,i)=>{const o=s.erf(t);return i([t]),o},n,null,nl)}const nb=P({erf_:f_});function g_(e){const t=W(e,"x","exp"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.exp(t);return i([o]),o},n,null,sl)}const Ls=P({exp_:g_});function y_(e,t=0){const n=null,s=W(e,"x","expandDims",n);k(t<=s.rank,()=>"Axis must be <= rank of the tensor");const i=s.shape.slice();return t<0&&(k(-(s.rank+1)<=t,()=>`Axis must be in the interval [${-(s.rank+1)}, ${s.rank}]`),t=s.rank+t+1),i.splice(t,0,1),K(s,i)}const Hn=P({expandDims_:y_});function b_(e){const t=W(e,"x","expm1"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.expm1(t);return i([t]),o},n,null,il)}const sb=P({expm1_:b_});function w_(e,t){const n=null,s=W(e,"x","tile",n);k(s.rank===t.length,()=>`Error in transpose: rank of input ${s.rank} must match length of reps ${t}.`);const i=(h,d)=>{const m=h.tile(s,t);return d([s]),m},o=[s],a={x:s},c={reps:t};return V.runKernelFunc(i,a,null,Yg,c,o)}const Er=P({tile_:w_});function L_(e,t,n,s="float32"){t==null&&(t=e);const i=Ze([e,t],s),o=e<=t?e:t;for(let c=0;ci.fill(e,t,n),{},null,ux,s)}function S_(e){const t=W(e,"x","floor"),n={x:t};return V.runKernelFunc(s=>s.floor(t),n,null,rl)}const Ra=P({floor_:S_});const ib=30;function Jl(e){return e<=ib?e:od(e,Math.floor(Math.sqrt(e)))}function I_(e,t){let n=!1,s;for(e<=ib?(s=e,n=!0):s=od(e,Math.floor(Math.sqrt(e)));!n;)s>t||s===e?n=!0:s=od(e,s+1);return s}function x_(e,t,n){const s=[],i=e.length;for(let o=0;o{const m=ft(n,s.shape)[0],y=$T(s,i,m),b=h.gather(s,K(i,[i.size]),m);return d([s,i]),K(b,y.outputShape)};return V.runKernelFunc(c,o,null,bg,a)}const Oa=P({gather_:A_});function v_(e,t){let n=W(e,"a","greater"),s=W(t,"b","greater");[n,s]=Bt(n,s),tt(n.shape,s.shape);const i=a=>a.greater(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,px)}const Ss=P({greater_:v_});function N_(e,t){let n=W(e,"a","greaterEqual"),s=W(t,"b","greaterEqual");[n,s]=Bt(n,s),tt(n.shape,s.shape);const i=(a,c)=>{const h=a.greaterEqual(n,s);return c([n,s]),h},o={a:n,b:s};return V.runKernelFunc(i,o,null,wg)}const Ki=P({greaterEqual_:N_});function C_(e){const t=W(e,"input","imag"),n=i=>i.imag(t),s={input:t};return V.runKernelFunc(n,s,null,Sg)}const Ea=P({imag_:C_});function R_(e){const t=W(e,"x","isFinite"),n={x:t};return V.runKernelFunc(s=>s.isFinite(t),n,null,cl)}const UT=P({isFinite_:R_});function O_(e){const t=W(e,"x","isInf"),n={x:t};return V.runKernelFunc(s=>s.isInf(t),n,null,ll)}const BT=P({isInf_:O_});function E_(e){const t=W(e,"x","isNaN"),n={x:t};return V.runKernelFunc(s=>s.isNaN(t),n,null,hl)}const MT=P({isNaN_:E_});function D_(e,t){let n=W(e,"a","maximum"),s=W(t,"b","maximum");[n,s]=Bt(n,s),n.dtype==="bool"&&(n=ve(n,"int32"),s=ve(s,"int32")),tt(n.shape,s.shape);const i=(a,c)=>{const h=a.maximum(n,s);return c([n,s]),h},o={a:n,b:s};return V.runKernelFunc(i,o,null,Tg)}const _s=P({maximum_:D_});function Ne(e,t){if((wn(e)&&t!=="string"||Array.isArray(e))&&t!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(t==="string"&&wn(e)&&!(e instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");const n=[],s=[];return vr(e,n,s,t)}function k_(e,t=.2){const n=W(e,"x","leakyRelu");return _s(X(Ne(t),n),n)}const Dd=P({leakyRelu_:k_});function F_(e,t){let n=W(e,"a","less"),s=W(t,"b","less");[n,s]=Bt(n,s),tt(n.shape,s.shape);const i=a=>a.less(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,mx)}const Zl=P({less_:F_});function __(e,t){let n=W(e,"a","lessEqual"),s=W(t,"b","lessEqual");[n,s]=Bt(n,s),tt(n.shape,s.shape);const i=(a,c)=>{const h=a.lessEqual(n,s);return c([n,s]),h},o={a:n,b:s};return V.runKernelFunc(i,o,null,fx)}const Dr=P({lessEqual_:__});function PT(e,t,n){if(n<=0)throw new Error("The number of values should be positive.");const s={start:e,stop:t,num:n};return V.runKernelFunc(i=>i.linspace(e,t,n),{},null,gx,s)}function W_(e,t=5,n=1,s=1,i=.5){const o=W(e,"x","localResponseNormalization");k(o.rank===4||o.rank===3,()=>`Error in localResponseNormalization: x must be rank 3 or 4 but got - rank ${o.rank}.`),k(Ut(t),()=>`Error in localResponseNormalization: depthRadius must be an integer but got depthRadius ${t}.`);let a=o,c=!1;o.rank===3&&(c=!0,a=K(o,[1,o.shape[0],o.shape[1],o.shape[2]]));const h=(b,w)=>{const L=b.localResponseNormalization4D(a,t,n,s,i);return w([a,L]),L},d={x:a},m={depthRadius:t,bias:n,alpha:s,beta:i},y=V.runKernelFunc(h,d,null,xg,m);return c?K(y,[y.shape[1],y.shape[2],y.shape[3]]):y}const rb=P({localResponseNormalization_:W_});function $_(e){const t=W(e,"x","log"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.log(t);return i([t]),o},n,null,ul)}const ts=P({log_:$_});function U_(e){const t=W(e,"x","log1p"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.log1p(t);return i([t]),o},n,null,dl)}const kd=P({log1p_:U_});function B_(e){return k(xr(e),()=>"The f passed in grad(f) must be a function"),(t,n)=>{const s=W(t,"x","tf.grad",null),i=n!=null?W(n,"dy","tf.grad"):null;return V.tidy(()=>{const{value:o,grads:a}=V.gradients(()=>e(s),[s],i);return i!=null&&dt(o.shape,i.shape,"The shape of dy passed in grad(f)(x, dy) must match the shape returned by f(x)"),Fd(a),a[0]})}}function M_(e){return k(xr(e),()=>"The f passed in grads(f) must be a function"),(t,n)=>{k(Array.isArray(t),()=>"The args passed in grads(f)(args) must be an array of `Tensor`s or `TensorLike`s");const s=Ul(t,"args","tf.grads",null),i=n!=null?W(n,"dy","tf.grads"):null;return V.tidy(()=>{const{value:o,grads:a}=V.gradients(()=>e(...s),s,i);return i!=null&&dt(o.shape,i.shape,"The shape of dy passed in grads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),Fd(a),a})}}function P_(e){return k(xr(e),()=>"The f passed in valueAndGrad(f) must be a function"),(t,n)=>{k(t instanceof Q,()=>"The x passed in valueAndGrad(f)(x) must be a tensor"),k(n==null||n instanceof Q,()=>"The dy passed in valueAndGrad(f)(x, dy) must be a tensor");const{grads:s,value:i}=V.gradients(()=>e(t),[t],n);return Fd(s),{grad:s[0],value:i}}}function z_(e){return k(xr(e),()=>"The f passed in valueAndGrads(f) must be a function"),(t,n)=>{k(Array.isArray(t)&&t.every(i=>i instanceof Q),()=>"The args passed in valueAndGrads(f)(args) must be array of tensors"),k(n==null||n instanceof Q,()=>"The dy passed in valueAndGrads(f)(args, dy) must be a tensor");const s=V.gradients(()=>e(...t),t,n);return n!=null&&dt(s.value.shape,n.shape,"The shape of dy passed in valueAndGrads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),Fd(s.grads),s}}function ob(e,t){k(xr(e),()=>"The f passed in variableGrads(f) must be a function"),k(t==null||Array.isArray(t)&&t.every(d=>d instanceof Wl),()=>"The varList passed in variableGrads(f, varList) must be an array of variables");const n=t!=null;if(!n){t=[];for(const d in V.registeredVariables)t.push(V.registeredVariables[d])}const s=n?t.filter(d=>!d.trainable):null,i=t.length;t=t.filter(d=>d.trainable),k(t.length>0,()=>`variableGrads() expects at least one of the input variables to be trainable, but none of the ${i} variables is trainable.`);const o=!0,{value:a,grads:c}=V.gradients(e,t,null,o);k(c.some(d=>d!=null),()=>"Cannot find a connection between any variable and the result of the loss function y=f(x). Please make sure the operations that use variables are inside the function f passed to minimize()."),k(a.rank===0,()=>`The f passed in variableGrads(f) must return a scalar, but it returned a rank-${a.rank} tensor`);const h={};return t.forEach((d,m)=>{c[m]!=null&&(h[d.name]=c[m])}),s!=null&&s.forEach(d=>h[d.name]=null),{value:a,grads:h}}function Ni(e){return V.customGrad(e)}function Fd(e){const t=e.filter(n=>n==null).length;if(t>0)throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that - the f you passed encloses all operations that lead from x to y.`)}function G_(e){const t=W(e,"x","neg"),n={x:t};return V.runKernelFunc(s=>s.neg(t),n,null,Rg)}const Pt=P({neg_:G_});function V_(e){const t=W(e,"x","softplus"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.softplus(t);return i([t]),o},n,null,Tl)}const Da=P({softplus_:V_});function Y_(e){const t=W(e,"x","logSigmoid"),n=Ni(s=>{const i=Pt(Da(Pt(s))),o=a=>{const c=X(a,vi(Pt(s)));return c};return{value:i,gradFunc:o}});return n(t)}const zT=P({logSigmoid_:Y_});function H_(e,t=null,n=!1){const s=W(e,"x","max"),i=(c,h)=>{const d=ft(t,s.shape);let m=d;const y=kn(m,s.rank);let b=s;y!=null&&(b=Me(s,y),m=ws(m.length,b.rank));const w=c.max(b,m);y!=null&&b.dispose();let L=w;if(n){const T=Rn(L.shape,ft(t,s.shape));L=K(L,T),w.dispose()}return h([s,L]),L},o={x:s},a={reductionIndices:t,keepDims:n};return V.runKernelFunc(i,o,null,pl,a)}const qn=P({max_:H_});function q_(e,t){let n=W(e,"a","sub"),s=W(t,"b","sub");[n,s]=Bt(n,s);const i=(a,c)=>{const h=a.subtract(n,s);return c([n,s]),h},o={a:n,b:s};return V.runKernelFunc(i,o,null,vl)}const Ce=P({sub_:q_});function j_(e,t=null,n=!1){let s=W(e,"x","sum");s.dtype==="bool"&&(s=ve(s,"int32"));const i=(c,h)=>{h([s]);const d=ft(t,s.shape),m=kn(d,s.rank);let y=d,b=s;m!=null&&(b=Me(s,m),y=ws(y.length,s.rank));let w=c.sum(b,y);if(n){const L=Rn(w.shape,d);w=K(w,L)}return w},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,zg,a)}const Ue=P({sum_:j_});function K_(e,t=-1){const n=W(e,"logits","logSoftmax");if(t===-1&&(t=n.rank-1),t!==n.rank-1)throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and axis was ${t}`);const s=(a,c)=>{const h=!0,d=qn(e,t,!0),m=Ce(e,d),y=Ce(ve(m,"float32"),ts(Ue(Ls(m),t,h)));return c([y]),y},i={logits:n},o={axis:t};return V.runKernelFunc(s,i,null,Ig,o)}const _d=P({logSoftmax_:K_});function X_(e,t=null,n=!1){const s=W(e,"x","logSumExp"),i=ft(t,s.shape),o=qn(s,i,!0),a=Ce(s,o),c=Ls(a),h=Ue(c,i),d=ts(h),m=be(K(o,d.shape),d);if(n){const y=Rn(m.shape,i);return K(m,y)}return m}const ab=P({logSumExp_:X_});function J_(e,t){const n=W(e,"a","logicalAnd","bool"),s=W(t,"b","logicalAnd","bool");tt(n.shape,s.shape);const i={a:n,b:s};return V.runKernelFunc(o=>o.logicalAnd(n,s),i,null,yx)}const Ws=P({logicalAnd_:J_});function Z_(e){const t=W(e,"x","logicalNot","bool"),n={x:t};return V.runKernelFunc(s=>s.logicalNot(t),n,null,zu)}const Ql=P({logicalNot_:Z_});function Q_(e,t){const n=W(e,"a","logicalOr","bool"),s=W(t,"b","logicalOr","bool");tt(n.shape,s.shape);const i={a:n,b:s};return V.runKernelFunc(o=>o.logicalOr(n,s),i,null,bx)}const Wd=P({logicalOr_:Q_});function eW(e,t){const n=W(e,"a","logicalXor","bool"),s=W(t,"b","logicalXor","bool");return tt(n.shape,s.shape),Ws(Wd(e,t),Ql(Ws(e,t)))}const GT=P({logicalXor_:eW});function tW(e,t,n,s,i){const o=W(e,"x","maxPool"),a=1;let c=o,h=!1;o.rank===3&&(h=!0,c=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),k(c.rank===4,()=>`Error in maxPool: input must be rank 4 but got rank ${c.rank}.`),k(rn(n,a),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&k(Ut(s),()=>`Error in maxPool: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const d=(w,L)=>{const T=Fn(c.shape,t,n,1,s,i);let A;return T.filterWidth===1&&T.filterHeight===1&&ot(T.inShape,T.outShape)?A=c.clone():A=w.maxPool(c,T),L([c,A]),A},m={x:c},y={filterSize:t,strides:n,pad:s,dimRoundingMode:i},b=V.runKernelFunc(d,m,null,ml,y);return h?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const eh=P({maxPool_:tW});function nW(e,t=[1,1,1],n,s,i,o="NDHWC",a){a==null?a=[1,1,1]:nn("dilations is deprecated, this field will be gone in v3.0.0.");const c=W(e,"x","maxPool3d");let h=c,d=!1;c.rank===4&&(d=!0,h=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]])),k(h.rank===5,()=>`Error in maxPool3d: x must be rank 5 but got rank ${h.rank}.`),k(o==="NDHWC",()=>`Error in maxPool3d: Only NDHWC is currently supported, but got dataFormat of ${o}`),k(rn(n,a),()=>`Error in maxPool3d: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&k(Ut(s),()=>`Error in maxPool3d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const m=(L,T)=>{a==null&&(a=[1,1,1]);const A=Gl(h.shape,t,n,a,s,i,o),N=L.maxPool3d(h,A);return T([h,N]),N},y={x:h},b={filterSize:t,strides:n,pad:s,dimRoundingMode:i,dataFormat:o,dilations:a},w=V.runKernelFunc(m,y,null,Ag,b);return d?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const cb=P({maxPool3d_:nW});function sW(e,t,n,s,i=!1){const o=W(e,"x","maxPoolWithArgmax"),a={x:o},c={filterSize:t,strides:n,pad:s,includeBatchInIndex:i},h=V.runKernel(Vu,a,c);return{result:h[0],indexes:h[1]}}const VT=P({maxPoolWithArgmax_:sW});function ct(e,t="float32"){if(t==="complex64"){const s=ct(e,"float32"),i=ct(e,"float32");return xi(s,i)}const n=wa(we(e),t);return V.makeTensor(n,e,t)}function Qs(e,t="float32"){if(t==="complex64"){const s=Qs(e,"float32"),i=ct(e,"float32");return xi(s,i)}const n=oy(we(e),t);return V.makeTensor(n,e,t)}function iW(e,t=null,n=!1){const s=W(e,"x","mean"),i=ft(t,s.shape),o=Cn(s.shape,i),a=o[1],c=we(a),h=Ni(d=>{const m=Ne(c),y=m.dtype===d.dtype?d:ve(d,m.dtype),b=_e(y,m),w=Ue(b,t,n),L=T=>{const A=d.shape.slice();i.forEach(D=>{A[D]=1});const N=K(T,A),E=_e(X(N,Qs(d.shape,"float32")),c);return E};return{value:w,gradFunc:L}});return h(s)}const zt=P({mean_:iW});function rW(e,t=null,n=!1){const s=W(e,"x","min"),i=(c,h)=>{const d=ft(t,s.shape);let m=d;const y=kn(m,s.rank);let b=s;y!=null&&(b=Me(s,y),m=ws(m.length,s.rank));const w=c.min(b,m);y!=null&&b.dispose();let L=w;if(n){const T=Rn(L.shape,d);L=K(w,T),w.dispose()}return h([s,L]),L},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,vg,a)}const ka=P({min_:rW});function oW(e,t){let n=W(e,"a","minimum"),s=W(t,"b","minimum");[n,s]=Bt(n,s),n.dtype==="bool"&&(n=ve(n,"int32"),s=ve(s,"int32")),tt(n.shape,s.shape);const i=(a,c)=>{const h=a.minimum(n,s);return c([n,s]),h},o={a:n,b:s};return V.runKernelFunc(i,o,null,Ng)}const Io=P({minimum_:oW});function aW(e,t){let n=W(e,"a","mod"),s=W(t,"b","mod");[n,s]=Bt(n,s);const i=(a,c)=>{const h=a.mod(n,s);return c([n,s]),h},o={a:n,b:s};return V.runKernelFunc(i,o,null,Cg)}const $d=P({mod_:aW});function cW(e){const t=W(e,"x","square"),n={},s=[t],i=[];return V.runKernelFunc((o,a)=>(a([t]),o.square(t)),{x:t},null,"Square",n,s,i)}const wt=P({square_:cW});function lW(e,t=null,n=!1){e=W(e,"x","moments");const s=ft(t,e.shape),i=zt(e,s,n);let o=i.shape;n||(o=Rn(i.shape,s));const a=wt(Ce(ve(e,"float32"),K(i,o))),c=zt(a,s,n);return{mean:i,variance:c}}const Ud=P({moments_:lW});function hW(e,t,n,s){const i=W(t,"data","multiRNNCell"),o=Ul(n,"c","multiRNNCell"),a=Ul(s,"h","multiRNNCell");let c=i;const h=[];for(let y=0;y2)throw new Error(`Rank of probabilities must be 1 or 2, but is ${a}`);n=n||Math.random();const c=a===1?K(i,[1,-1]):i,h=V.runKernelFunc(d=>d.multinomial(c,s,t,n),{logits2D:c});return a===1?K(h,[h.size]):h}const YT=P({multinomial_:dW});function pW(e,t){let n=W(e,"a","notEqual"),s=W(t,"b","notEqual");[n,s]=Bt(n,s),tt(n.shape,s.shape);const i=a=>a.notEqual(n,s),o={a:n,b:s};return V.runKernelFunc(i,o,null,Yu)}const kr=P({notEqual_:pW});function mW(e){const t=W(e,"input","real"),n=i=>i.real(t),s={input:t};return V.runKernelFunc(n,s,null,_g)}const xo=P({real_:mW});function fW(e){const t=W(e,"x","onesLike"),n=(i,o)=>{if(t.dtype==="complex64"){const a=On(xo(t)),c=Qe(Ea(t));return xi(a,c)}return i.onesLike(t)},s={x:t};return V.runKernelFunc(n,s,null,Eg)}const On=P({onesLike_:fW});function gW(e,t){const n=W(e,"v1","outerProduct"),s=W(t,"v2","outerProduct");k(n.rank===1&&s.rank===1,()=>`Error in outerProduct: inputs must be rank 1, but got ranks ${n.rank} and ${s.rank}.`);const i=K(n,[-1,1]),o=K(s,[1,-1]);return at(i,o)}const yW=P({outerProduct_:gW});function bW(e,t,n=0){const s=W(e,"x","pad");if(s.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");const i=(c,h)=>(h([s]),c.pad(s,t,n)),o={paddings:t,constantValue:n},a={x:s};return V.runKernelFunc(i,a,null,ju,o)}const Ci=P({pad_:bW});function wW(e,t,n=0){return k(t.length===2,()=>"Invalid number of paddings. Must be length of 2."),Ci(e,[t],n)}const LW=P({pad1d_:wW});function SW(e,t,n=0){return k(t.length===2&&t[0].length===2&&t[1].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),Ci(e,t,n)}const IW=P({pad2d_:SW});function xW(e,t,n=0){return k(t.length===3&&t[0].length===2&&t[1].length===2&&t[2].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),Ci(e,t,n)}const TW=P({pad3d_:xW});function AW(e,t,n=0){return k(t.length===4&&t[0].length===2&&t[1].length===2&&t[2].length===2&&t[3].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),Ci(e,t,n)}const vW=P({pad4d_:AW});function NW(e,t,n){const s=W(e,"x","spaceToBatchND");k(s.rank>=1+t.length,()=>`input rank ${s.rank} should be > than [blockShape] ${t.length}`),k(n.length===t.length,()=>`paddings.shape[0] ${n.length} must be equal to [blockShape] ${t.length}`),k(s.shape.reduce((c,h,d)=>d>0&&d<=t.length?c&&(h+n[d-1][0]+n[d-1][1])%t[d-1]===0:c,!0),()=>`input spatial dimensions ${s.shape.slice(1)} with paddings ${n.toString()} must be divisible by blockShapes ${t.toString()}`);const i=c=>c.spaceToBatchND(s,t,n),o={x:s},a={blockShape:t,paddings:n};return V.runKernelFunc(i,o,null,Xu,a)}const th=P({spaceToBatchND_:NW});function CW(e,t,n,s,i,o){i==null&&(i=[1,1]),o==null&&(o=1),s===0&&(s="valid");const a=W(e,"x","maxPool");let c=a,h=!1;a.rank===3&&(h=!0,c=K(a,[1,a.shape[0],a.shape[1],a.shape[2]])),k(rn(o,i),()=>`Error in pool: Either strides or dilations must be 1. Got strides ${o} and dilations '${i}'`);const d=Fn(c.shape,t,o,i,s),m=[d.dilationHeight,d.dilationWidth];let y;s==="same"?y=OW([d.filterHeight,d.filterWidth],m):y=[[0,0],[0,0]];const b=m[0]===1&&m[1]===1,[w,L]=RW([d.inHeight,d.inWidth],m,y),T=b?s:"valid",A=b?c:th(c,m,w),N=n==="avg"?()=>Hl(A,t,o,T):()=>eh(A,t,o,T),E=N(),D=b?E:ql(E,m,L);return h?K(D,[D.shape[1],D.shape[2],D.shape[3]]):D}function RW(e,t,n){const s=n.map(m=>m[0]),i=n.map(m=>m[1]),o=e.concat(s,i),a=t.map((m,y)=>(m-o[y]%m)%m),c=i.map((m,y)=>m+a[y]),h=t.map((m,y)=>[s[y],c[y]]),d=t.map((m,y)=>[0,a[y]]);return[h,d]}function OW(e,t){const n=e.map((a,c)=>a+(a-1)*(t[c]-1)),s=n.map(a=>a-1),i=s.map(a=>Math.floor(a/2)),o=s.map((a,c)=>a-i[c]);return s.map((a,c)=>[i[c],o[c]])}const HT=P({pool_:CW});function EW(e,t){let n=W(e,"base","pow"),s=W(t,"exp","pow");[n,s]=Bt(n,s);const i={a:n,b:s},o=(a,c)=>{const h=a.pow(n,s);return c([n,s,h]),h};return V.runKernelFunc(o,i,null,kg)}const ei=P({pow_:EW});function DW(e,t){const n=W(e,"x","prelu"),s=W(t,"alpha","prelu"),i=(a,c)=>{const h=a.prelu(n,s);return c([n,s]),h},o={x:n,alpha:s};return V.runKernelFunc(i,o,null,Fg)}const nh=P({prelu_:DW});function kW(e,t=null,n=!1){let s=W(e,"x","prod");const i=c=>{s.dtype==="bool"&&(s=ve(s,"int32"));const h=ft(t,s.shape),d=kn(h,s.rank);let m=h,y=s;d!=null&&(y=Me(s,d),m=ws(m.length,s.rank));let b=c.prod(y,m);if(n){const w=Rn(b.shape,h);b=K(b,w)}return b},o={x:s},a={axis:t,keepDims:n};return V.runKernelFunc(i,o,null,Sx,a)}const Bd=P({prod_:kW});function FW(e,t,n){const s=we(e);let i=null;if(n==null||n==="float32")i=new Float32Array(s);else if(n==="int32")i=new Int32Array(s);else if(n==="bool")i=new Uint8Array(s);else throw new Error(`Unknown data type ${n}`);for(let o=0;o>>0,b-=h,b*=h,h=b>>>0,b-=h,h+=b*4294967296}return(h>>>0)*23283064365386963e-26};return d}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.alea=a})(Fa,e,!1)}),$W=To(function(e){(function(t,n,s){function i(c){var h=this,d="";h.x=0,h.y=0,h.z=0,h.w=0,h.next=function(){var y=h.x^h.x<<11;return h.x=h.y,h.y=h.z,h.z=h.w,h.w^=h.w>>>19^y^y>>>8},c===(c|0)?h.x=c:d+=c;for(var m=0;m>>0)/4294967296};return y.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},y.int32=d.next,y.quick=y,m&&(typeof m=="object"&&o(m,d),y.state=function(){return o(d,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xor128=a})(Fa,e,!1)}),UW=To(function(e){(function(t,n,s){function i(c){var h=this,d="";h.next=function(){var y=h.x^h.x>>>2;return h.x=h.y,h.y=h.z,h.z=h.w,h.w=h.v,(h.d=h.d+362437|0)+(h.v=h.v^h.v<<4^(y^y<<1))|0},h.x=0,h.y=0,h.z=0,h.w=0,h.v=0,c===(c|0)?h.x=c:d+=c;for(var m=0;m>>4),h.next()}function o(c,h){return h.x=c.x,h.y=c.y,h.z=c.z,h.w=c.w,h.v=c.v,h.d=c.d,h}function a(c,h){var d=new i(c),m=h&&h.state,y=function(){return(d.next()>>>0)/4294967296};return y.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},y.int32=d.next,y.quick=y,m&&(typeof m=="object"&&o(m,d),y.state=function(){return o(d,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xorwow=a})(Fa,e,!1)}),BW=To(function(e){(function(t,n,s){function i(c){var h=this;h.next=function(){var m=h.x,y=h.i,b,w,L;return b=m[y],b^=b>>>7,w=b^b<<24,b=m[y+1&7],w^=b^b>>>10,b=m[y+3&7],w^=b^b>>>3,b=m[y+4&7],w^=b^b<<7,b=m[y+7&7],b=b^b<<13,w^=b^b<<9,m[y]=w,h.i=y+1&7,w};function d(m,y){var b,w,L=[];if(y===(y|0))w=L[0]=y;else for(y=""+y,b=0;b0;--b)m.next()}d(h,c)}function o(c,h){return h.x=c.x.slice(),h.i=c.i,h}function a(c,h){c==null&&(c=+new Date);var d=new i(c),m=h&&h.state,y=function(){return(d.next()>>>0)/4294967296};return y.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},y.int32=d.next,y.quick=y,m&&(m.x&&o(m,d),y.state=function(){return o(d,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xorshift7=a})(Fa,e,!1)}),MW=To(function(e){(function(t,n,s){function i(c){var h=this;h.next=function(){var m=h.w,y=h.X,b=h.i,w,L;return h.w=m=m+1640531527|0,L=y[b+34&127],w=y[b=b+1&127],L^=L<<13,w^=w<<17,L^=L>>>15,w^=w>>>12,L=y[b]=L^w,h.i=b,L+(m^m>>>16)|0};function d(m,y){var b,w,L,T,A,N=[],E=128;for(y===(y|0)?(w=y,y=null):(y=y+"\0",w=0,E=Math.max(E,y.length)),L=0,T=-32;T>>15,w^=w<<4,w^=w>>>13,T>=0&&(A=A+1640531527|0,b=N[T&127]^=w+A,L=b==0?L+1:0);for(L>=128&&(N[(y&&y.length||0)&127]=-1),L=127,T=4*128;T>0;--T)w=N[L+34&127],b=N[L=L+1&127],w^=w<<13,b^=b<<17,w^=w>>>15,b^=b>>>12,N[L]=w^b;m.w=A,m.X=N,m.i=L}d(h,c)}function o(c,h){return h.i=c.i,h.w=c.w,h.X=c.X.slice(),h}function a(c,h){c==null&&(c=+new Date);var d=new i(c),m=h&&h.state,y=function(){return(d.next()>>>0)/4294967296};return y.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},y.int32=d.next,y.quick=y,m&&(m.X&&o(m,d),y.state=function(){return o(d,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xor4096=a})(Fa,e,!1)}),PW=To(function(e){(function(t,n,s){function i(c){var h=this,d="";h.next=function(){var y=h.b,b=h.c,w=h.d,L=h.a;return y=y<<25^y>>>7^b,b=b-w|0,w=w<<24^w>>>8^L,L=L-y|0,h.b=y=y<<20^y>>>12^b,h.c=b=b-w|0,h.d=w<<16^b>>>16^L,h.a=L-y|0},h.a=0,h.b=0,h.c=2654435769|0,h.d=1367130551,c===Math.floor(c)?(h.a=c/4294967296|0,h.b=c|0):d+=c;for(var m=0;m>>0)/4294967296};return y.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},y.int32=d.next,y.quick=y,m&&(typeof m=="object"&&o(m,d),y.state=function(){return o(d,{})}),y}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.tychei=a})(Fa,e,!1)}),Ao=To(function(e){(function(t,n){var s=this,i=256,o=6,a=52,c="random",h=n.pow(i,o),d=n.pow(2,a),m=d*2,y=i-1,b;function w(F,_,B){var $=[];_=_==!0?{entropy:!0}:_||{};var H=N(A(_.entropy?[F,D(t)]:F==null?E():F,3),$),q=new L($),J=function(){for(var re=q.g(o),ce=h,ue=0;re=m;)re/=2,ce/=2,ue>>>=1;return(re+ue)/ce};return J.int32=function(){return q.g(4)|0},J.quick=function(){return q.g(4)/4294967296},J.double=J,N(D(q.S),t),(_.pass||B||function(re,ce,ue,he){return he&&(he.S&&T(he,q),re.state=function(){return T(q,{})}),ue?(n[c]=re,ce):re})(J,H,"global"in _?_.global:this==n,_.state)}n["seed"+c]=w;function L(F){var _,B=F.length,$=this,H=0,q=$.i=$.j=0,J=$.S=[];for(B||(F=[B++]);H=1||o===0);const a=Math.sqrt(-2*Math.log(o)/o);e=this.mean+this.stdDev*s*a,t=this.mean+this.stdDev*i*a,(!this.truncated||this.isValidTruncated(e))&&(n=!0)}return(!this.truncated||this.isValidTruncated(t))&&(this.nextVal=this.convertValue(t)),this.convertValue(e)}convertValue(e){return this.dtype==null||this.dtype==="float32"?e:Math.round(e)}isValidTruncated(e){return e<=this.upper&&e>=this.lower}}class GW{constructor(e,t,n,s){this.alpha=e,this.beta=1/t,this.dtype=n;const i=s||Math.random();this.randu=_a(i.toString()),this.randn=new lb(0,1,n,!1,this.randu()),e<1?this.d=e+2/3:this.d=e-1/3,this.c=1/Math.sqrt(9*this.d)}nextValue(){let e,t,n,s,i,o;for(;;){do s=this.randn.nextValue(),o=1+this.c*s;while(o<=0);if(o*=o*o,e=s*s,t=1-.331*e*e,n=.5*e+this.d*(1-o+Math.log(o)),i=this.randu(),ithis.dtype==null||this.dtype==="float32",this.min=e,this.range=t-e,this.dtype=n,s==null&&(s=Math.random()),typeof s=="number"&&(s=s.toString()),!this.canReturnFloat()&&this.range<=1)throw new Error(`The difference between ${e} - ${t} <= 1 and dtype is not float`);this.random=_a(s)}convertValue(e){return this.canReturnFloat()?e:Math.round(e)}nextValue(){return this.convertValue(this.min+this.range*this.random())}}function PZ(e){const t=e.length,n=qW(e),s=HW(e),i=t/6*(Math.pow(n,2)+.25*Math.pow(s-3,2)),o=5.991;if(i>o)throw new Error(`Invalid p-value for JB: ${i}`)}function zZ(e,t,n,s){s==null&&(s=Ld());const i=hb(e);Fy(i,t,s),Fy(YW(e,i),n,s)}function hb(e){let t=0;for(let n=0;n{const a=e===t,c=e1;if(a||c||h)return ct([0],s);const d=Math.abs(Math.ceil((t-e)/n)),m=wa(d,s);t{const o=s.reciprocal(t);return i([t]),o},n,null,gl)}const db=P({reciprocal_:ZW});function QW(e){const t=W(e,"x","relu"),n=(i,o)=>(o([t]),t.dtype==="bool"?ve(t,"int32"):i.relu(t)),s={x:t};return V.runKernelFunc(n,s,null,Wg)}const Ri=P({relu_:QW});function e$(e){const t=W(e,"x","relu6"),n=(i,o)=>(o([t]),t.dtype==="bool"?ve(t,"int32"):i.relu6(t)),s={x:t};return V.runKernelFunc(n,s,null,Bg)}const pb=P({relu6_:e$});function t$(e,t){const n=W(e,"x","reverse"),s=a=>{const c=ft(t,n.shape);if(n.rank===0)return Cr(n);const h=a.reverse(n,c);return K(h,n.shape)},i={x:n},o={dims:t};return V.runKernelFunc(s,i,null,Mg,o)}const Is=P({reverse_:t$});function n$(e){const t=W(e,"x","reverse");return k(t.rank===1,()=>`Error in reverse1D: x must be rank 1 but got rank ${t.rank}.`),Is(t,0)}const s$=P({reverse1d_:n$});function i$(e,t){const n=W(e,"x","reverse");return k(n.rank===2,()=>`Error in reverse2D: x must be rank 2 but got rank ${n.rank}.`),Is(n,t)}const r$=P({reverse2d_:i$});function o$(e,t){const n=W(e,"x","reverse");return k(n.rank===3,()=>`Error in reverse3D: x must be rank 3 but got rank ${n.rank}.`),Is(n,t)}const a$=P({reverse3d_:o$});function c$(e,t){const n=W(e,"x","reverse");return k(n.rank===4,()=>`Error in reverse4D: x must be rank 4 but got rank ${n.rank}.`),Is(n,t)}const l$=P({reverse4d_:c$});function h$(e){const t=W(e,"x","round"),n={x:t};return V.runKernelFunc(s=>s.round(t),n,null,bl)}const mb=P({round_:h$});function u$(e){const t=W(e,"x","rsqrt"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.rsqrt(t);return i([t]),o},n,null,wl)}const Md=P({rsqrt_:u$});function d$(e){const t=W(e,"x","selu"),n=(i,o)=>{const a=i.selu(t);return o([t]),a},s={x:t};return V.runKernelFunc(n,s,null,Ll)}const Pd=P({selu_:d$});function p$(e,t,n,s,i,o=[1,1],a="NHWC"){const c=W(e,"x","separableConv2d"),h=W(t,"depthwiseFilter","separableConv2d"),d=W(n,"pointwiseFilter","separableConv2d");let m=c,y=!1;if(c.rank===3&&(y=!0,m=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),a==="NCHW")throw new Error("separableConv2d currently does not support dataFormat NCHW; only NHWC is supported");k(m.rank===4,()=>`Error in separableConv2d: input must be rank 4, but got rank ${m.rank}.`),k(h.rank===4,()=>`Error in separableConv2d: depthwise filter must be rank 4, but got rank ${h.rank}.`),k(d.rank===4,()=>`Error in separableConv2d: pointwise filter must be rank 4, but got rank ${h.rank}.`),k(d.shape[0]===1,()=>`Error in separableConv2d: the first dimension of pointwise filter must be 1, but got ${d.shape[0]}.`),k(d.shape[1]===1,()=>`Error in separableConv2d: the second dimension of pointwise filter must be 1, but got ${d.shape[1]}.`);const b=h.shape[2],w=h.shape[3];k(d.shape[2]===b*w,()=>`Error in separableConv2d: the third dimension of pointwise filter must be ${b*w}, but got ${d.shape[2]}.`);const L=wo(m,h,s,i,a,o),T=1,A=ji(L,d,T,"valid",a);return y?K(A,[A.shape[1],A.shape[2],A.shape[3]]):A}const fb=P({separableConv2d_:p$});async function m$(e,t){const n=W(e,"x","setdiff1d"),s=W(t,"y","setdiff1d");k(n.dtype===s.dtype,()=>`x and y should have the same dtype, but got x (${n.dtype}) and y (${s.dtype}).`),k(n.rank===1,()=>`x should be 1D tensor, but got x (${n.shape}).`),k(s.rank===1,()=>`y should be 1D tensor, but got y (${s.shape}).`);const i=await n.data(),o=await s.data(),a=new Set(o);let c=0;for(let m=0;ms.sign(t),n,null,Il)}const gb=P({sign_:f$});function g$(e){const t=W(e,"x","sin"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sin(t);return i([t]),o},n,null,ma)}const zd=P({sin_:g$});function y$(e){const t=W(e,"x","sinh"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sinh(t);return i([t]),o},n,null,Sl)}const Gd=P({sinh_:y$});function b$(e,t,n){const s=W(e,"x","slice1d");return k(s.rank===1,()=>`slice1d expects a rank-1 tensor, but got a rank-${s.rank} tensor`),nt(s,[t],[n])}const Vd=P({slice1d_:b$});function w$(e,t,n){const s=W(e,"x","slice2d");return k(s.rank===2,()=>`slice2d expects a rank-2 tensor, but got a rank-${s.rank} tensor`),nt(s,t,n)}const yb=P({slice2d_:w$});function L$(e,t,n){const s=W(e,"x","slice3d");return k(s.rank===3,()=>`slice3d expects a rank-3 tensor, but got a rank-${s.rank} tensor`),nt(s,t,n)}const Yd=P({slice3d_:L$});function S$(e,t,n){const s=W(e,"x","slice4d");return k(s.rank===4,()=>`slice4d expects a rank-4 tensor, but got a rank-${s.rank} tensor`),nt(s,t,n)}const ih=P({slice4d_:S$});function I$(e,t=-1){const n=W(e,"logits","softmax","float32");if(t===-1&&(t=n.rank-1),t!==n.rank-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and dim was ${t}`);const s={logits:n},i={dim:t};return V.runKernelFunc((o,a)=>{const c=o.softmax(n,t);return a([c]),c},s,null,Vg,i)}const No=P({softmax_:I$});function x$(e){k(e.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${e.dtype}.`);const t={input:e};return V.runKernelFunc(n=>{const s=e.shape[e.shape.length-1],i=e.size/s,o=e.as2D(i,s),a=n.fft(o);return a.reshape(e.shape)},t,null,gg)}const rh=P({fft_:x$});function T$(e){k(e.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${e.dtype}.`);const t={input:e};return V.runKernelFunc(n=>{const s=e.shape[e.shape.length-1],i=e.size/s,o=K(e,[i,s]),a=n.ifft(o);return K(a,e.shape)},t,null,Lg)}const Wa=P({ifft_:T$});function A$(e){const t=e.shape[e.shape.length-1],n=e.size/t;let s;if(t<=2){const i=K(e,[n,t]);s=Wa(i)}else{const i=[n,2*(t-1)],o=K(xo(e),[n,t]),a=K(Ea(e),[n,t]),c=Is(nt(o,[0,1],[n,t-2]),1),h=X(Is(nt(a,[0,1],[n,t-2]),1),Ne(-1)),d=Mt([o,c],1),m=Mt([a,h],1),y=K(xi(d,m),[i[0],i[1]]);s=Wa(y)}if(s=xo(s),e.rank===3&&e.shape[0]!==0){const i=s,o=e.shape[0];s=K(s,[o,s.shape[0]/o,s.shape[1]]),i.dispose()}return s}const Hd=P({irfft_:A$});function jT(e,t,n=0){let s=[];if(typeof t=="number")k(e.shape[n]%t===0,()=>"Number of splits must evenly divide the axis."),s=new Array(t).fill(e.shape[n]/t);else{const i=t.reduce((a,c)=>(c===-1&&(a+=1),a),0);k(i<=1,()=>"There should be only one negative value in split array.");const o=t.indexOf(-1);if(o!==-1){const a=t.reduce((c,h)=>h>0?c+h:c);t[o]=e.shape[n]-a}k(e.shape[n]===t.reduce((a,c)=>a+c),()=>"The sum of sizes must match the size of the axis dimension."),s=t}return s}function v$(e,t,n=0){const s=W(e,"x","split"),i=(c,h)=>{const d=ft(n,s.shape)[0],m=jT(s,t,d);return c.split(s,m,d)},o={x:s},a={numOrSizeSplits:t,axis:n};return V.runKernelFunc(i,o,null,Gg,a)}const ss=P({split_:v$});function N$(e,t){k(e.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${e.dtype}`);let n=e.shape[e.shape.length-1];const s=e.size/n;let i;if(t!=null&&t0),T=e.shape.map(A=>A);T[e.shape.length-1]=t,i=nt(e,L,T),n=t}else if(t!=null&&t>n){const L=e.shape.map(T=>T);L[e.shape.length-1]=t-n,i=Mt([e,ct(L)],e.shape.length-1),n=t}else i=e;const o=Qe(i),a=K(xi(i,o),[s,n]),c=rh(a),h=Math.floor(n/2)+1,d=xo(c),m=Ea(c),y=ss(d,[h,n-h],d.shape.length-1),b=ss(m,[h,n-h],m.shape.length-1),w=i.shape.slice();return w[i.shape.length-1]=h,K(xi(y[0],b[0]),w)}const oh=P({rfft_:N$});function C$(e){const t=W(e,"x","sqrt"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.sqrt(t);return i([t]),o},n,null,Al)}const Ln=P({sqrt_:C$});function R$(e,t){let n=W(e,"a","squaredDifference"),s=W(t,"b","squaredDifference");[n,s]=Bt(n,s),tt(n.shape,s.shape);const i=(c,h)=>{const d=c.squaredDifference(n,s);return h([n,s]),d},o={a:n,b:s},a={};return V.runKernelFunc(i,o,null,fa,a)}const ah=P({squaredDifference_:R$});function O$(e,t){const n=W(e,"x","squeeze");return K(n,Sr(n.shape,t).newShape)}const Fr=P({squeeze_:O$});function E$(e,t=0){const n=Ul(e,"tensors","stack");if(k(n.length>=1,()=>"Pass at least one tensor to tf.stack"),n.length===1)return Hn(n[0],t);const s=n[0].rank,i=n[0].shape,o=n[0].dtype;k(t<=s,()=>"Axis must be <= rank of the tensor"),n.forEach(c=>{dt(i,c.shape,"All tensors passed to stack must have matching shapes"),k(o===c.dtype,()=>"All tensors passed to stack must have matching dtypes")});const a=n.map(c=>Hn(c,t));return Mt(a,t)}const is=P({stack_:E$});function D$(e,t=0){const n=W(e,"x","step"),s={x:n},i={alpha:t};return V.runKernelFunc(o=>o.step(n,t),s,null,Rl,i)}const $a=P({step_:D$});function k$(e,t,n,s,i=0,o=0,a=0,c=0,h=0){let d=W(e,"x","stridedSlice");const m=w=>{s==null&&(s=new Array(t.length));const L=yd(a);if(L.length>1)throw new Error("Multiple ellipses in slice is not allowed.");if(a!==0&&c!==0)throw new Error("Using both ellipsisMask and newAxisMask is not yet supported.");if(a!==0&&h!==0)throw new Error("Using both ellipsisMask and shrinkAxisMask is not yet supported.");const T=d.rank-t.length,A=yd(c),N=d.shape.slice();A.forEach(J=>{t[J]=0,n[J]=1,N.splice(J,0,1)}),d=K(d,N);const{begin:E,end:D,strides:F}=fT(d.shape,L,T,t,n,s,i,o,a);t=E,n=D,s=F;const _=yd(h);_.forEach(J=>{n[J]=t[J]+1,s[J]=1});const B=bd(t,n,s),$=B.filter((J,re)=>_.indexOf(re)===-1),H=s.every(J=>J===1);if(H)return K(nt(d,t,B),$);const q=w.stridedSlice(d,t,n,s);return K(q,$)},y={x:d},b={begin:t,end:n,strides:s,beginMask:i,endMask:o,ellipsisMask:a,newAxisMask:c,shrinkAxisMask:h};return V.runKernelFunc(m,y,null,Nx,b)}const bb=P({stridedSlice_:k$});function F$(e){const t=W(e,"x","tan"),n={x:t};return V.runKernelFunc((s,i)=>{const o=s.tan(t);return i([t]),o},n,null,ga)}const wb=P({tan_:F$});function _r(e,t,n){if(ao(e),t!=null&&t.length!==2)throw new Error("tensor2d() requires shape to have two numbers");const s=Ii(e,n);if(s.length!==2&&s.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return vr(e,t,s,n)}function Ua(e,t,n){if(ao(e),t!=null&&t.length!==4)throw new Error("tensor4d() requires shape to have four numbers");const s=Ii(e,n);if(s.length!==4&&s.length!==1)throw new Error("tensor4d() requires values to be number[][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor4d() requires shape to be provided when `values` are a flat array");return vr(e,t,s,n)}function _$(e,t,n){if(ao(e),t!=null&&t.length!==5)throw new Error("tensor5d() requires shape to have five numbers");const s=Ii(e,n);if(s.length!==5&&s.length!==1)throw new Error("tensor5d() requires values to be number[][][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor5d() requires shape to be provided when `values` are a flat array");return vr(e,t,s,n)}function W$(e,t,n){if(ao(e),t!=null&&t.length!==6)throw new Error("tensor6d() requires shape to have six numbers");const s=Ii(e,n);if(s.length!==6&&s.length!==1)throw new Error("tensor6d() requires values to be number[][][][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor6d() requires shape to be provided when `values` are a flat array");return t=t||s,vr(e,t,s,n)}function $$(e,t=1,n=!0){const s=W(e,"x","topk");if(s.rank===0)throw new Error("topk() expects the input to be of rank 1 or higher");const i=s.shape[s.shape.length-1];if(t>i)throw new Error(`'k' passed to topk() must be <= the last dimension (${i}) but got ${t}`);const o={x:s},a={k:t,sorted:n},[c,h]=V.runKernelFunc(d=>d.topk(s,t,n),o,null,Cx,a);return{values:c,indices:h}}const Lb=P({topk_:$$});function U$(e,t=0,n=1,s,i){if(s!=null&&s==="bool")throw new Error("Unsupported data type $ { dtype }");const o=new lb(t,n,s,!0,i),a=Ze(e,s);for(let c=0;c0,()=>"The input tensor must be at least 1D");const s={x:n},i={axis:t},[o,a]=V.runKernel(Zu,s,i);return{values:o,indices:a}}const qd=P({unique_:B$});function M$(e,t,n){const s=W(e,"x","unsortedSegmentSum"),i=W(t,"segmentIds","unsortedSegmentSum","int32");k(Ut(n),()=>"numSegments must be of dtype int");const o={x:s,segmentIds:i},a={numSegments:n},c=(h,d)=>{const m=h.unsortedSegmentSum(s,i,n);return d([i]),m};return V.runKernelFunc(c,o,null,qg,a)}const Sb=P({unsortedSegmentSum_:M$});function P$(e,t=0){const n=W(e,"x","unstack");k(t>=-n.shape.length&&t`Axis = ${t} is not in [-${n.shape.length}, ${n.shape.length})`),t<0&&(t+=n.shape.length);const s={value:n},i={axis:t},o=a=>a.unstack(n,t);return V.runKernelFunc(o,s,null,Hg,i)}const Oi=P({unstack_:P$});function KT(e,t=!0,n,s){return V.makeVariable(e,t,n,s)}function jd(e,t){const n=[];for(let o=0;o0,()=>"mask cannot be scalar"),dt(c.slice(o,o+a),i.shape,"mask's shape must match the first K dimensions of tensor's shape,");let h=1;for(let T=o;T"Shape mismatch in v and x");const h=Ne(1),d=Ce(h,c);let m=X(Ce(a,o),d);if(i){k(s!=null,()=>"When using zeroDebias: true, step is required.");const y=W(s,"step","movingAverage");m=_e(m,Ce(h,ei(c,y)))}return be(o,m)}const cU=P({movingAverage_:aU});function lU(e,t,n){const s=W(e,"indices","scatterND","int32"),i=W(t,"updates","scatterND");Ry(i,s,n);const o=h=>h.scatterND(s,i,n),a={indices:s,updates:i},c={shape:n};return V.runKernelFunc(o,a,null,Ax,c)}const dA=P({scatterND_:lU});function hU(e,t,n,s){if(e.dtype!=="int32")throw new Error(`tf.sparseToDense() expects the indices to be int32 type, but the dtype was ${e.dtype}.`);if(e.rank>2)throw new Error(`sparseIndices should be a scalar, vector, or matrix, but got shape ${e.shape}.`);const i=e.rank>0?e.shape[0]:1,o=e.rank>1?e.shape[1]:1;if(n.length!==o)throw new Error(`outputShape has incorrect number of elements:, ${n.length}, should be: ${o}.`);const a=t.size;if(!(t.rank===0||t.rank===1&&a===i))throw new Error(`sparseValues has incorrect shape ${t.shape}, should be [] or [${i}]`);if(t.dtype!==s.dtype)throw new Error("sparseValues.dtype must match defaultValues.dtype")}function uU(e,t,n,s=0){const i=W(e,"sparseIndices","sparseToDense","int32"),o=W(t,"sparseValues","sparseToDense"),a=W(s,"defaultValue","sparseToDense",o.dtype);hU(i,o,n,a);const c={sparseIndices:i,sparseValues:o,defaultValue:a},h={outputShape:n};return V.runKernelFunc(d=>d.sparseToDense(i,o,n,a),c,null,vx,h)}const xb=P({sparseToDense_:uU});function dU(e,t){const n=W(t,"indices","gatherND","int32"),s=W(e,"x","gatherND"),i=a=>a.gatherND(s,n),o={params:s,indices:n};return V.runKernelFunc(i,o,null,dx)}const pA=P({gatherND_:dU});function pU(e,t){if(t==null)return e.shape.slice();if(ot(e.shape,t))return t;if(e.shape.length===t.length){const n=[];for(let s=0;s`x has to be a floating point tensor since it's going to be scaled, but got a ${i.dtype} tensor instead.`),k(t>=0&&t<1,()=>`rate must be a float in the range [0, 1), but got ${t}.`),t===0)return e instanceof Q?i.clone():i;const o=pU(i,n),a=1-t,c=_e(Ra(be(vo(o,0,1,"float32",s),a)),a);return X(i,c)}const mA=P({dropout_:mU});function fA(e){return Math.floor(Math.pow(2,Math.ceil(Math.log(e)/Math.log(2))))}function Tb(e,t,n){const s=1-e%2,i=new Float32Array(e);for(let o=0;o1,()=>`inTopK() expects the predictions to be of rank 2 or higher, but got ${s.rank}`),k(s.rank-1===i.rank,()=>`predictions rank should be 1 larger than targets rank, but got predictions rank ${s.rank} and targets rank ${i.rank}`),dt(s.shape.slice(0,s.shape.length-1),i.shape,"predictions's shape should be align with the targets' shape, except the last dimension.");const o=s.shape[s.shape.length-1];k(n>0&&n<=o,()=>`'k' passed to inTopK() must be > 0 && <= the predictions last dimension (${o}), but got ${n}`);const a=await s.data(),c=await i.data(),[h,d]=[a.length/o,o],m=bn("bool",h);for(let y=0;yA.value-T.value),m[y]=0;for(let T=0;T`Error in conv2dDerFilter: input must be rank 4, but got shape ${c.shape}.`),k(h.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${h.shape}.`),k(n.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${n}.`);const d=o==="NHWC"?c.shape[3]:c.shape[1],m=o==="NHWC"?h.shape[3]:h.shape[1];k(d===n[2],()=>`Error in conv2dDerFilter: depth of input ${d}) must match input depth in filter (${n[2]}.`),k(m===n[3],()=>`Error in conv2dDerFilter: depth of dy (${m}) must match output depth for filter (${n[3]}).`),a!=null&&k(Ut(i),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${a} but got pad ${i}.`);const y=L=>{const T=1,A=Yl(o),N=Ai(c.shape,n,s,T,i,a,!1,A);return L.conv2dDerFilter(c,h,N)},b={x:c,dy:h},w={strides:s,pad:i,dataFormat:o,dimRoundingMode:a};return V.runKernelFunc(y,b,null,tx,w)}const Ab=P({conv2DBackpropFilter_:yU});function Xd(e,t,n){if(n==null||n==="linear")return e;if(n==="relu")return X(e,$a(t));throw new Error(`Cannot compute gradient for fused activation ${n}.`)}function Jd(e,t){let n=t;const s=on(e.shape,t.shape);return s.length>0&&(n=Ue(n,s)),K(n,e.shape)}function Zd(e,t,n){if(t==="linear")return e;if(t==="relu")return Ri(e);if(t==="elu")return So(e);if(t==="relu6")return pb(e);if(t==="prelu")return nh(e,n);throw new Error(`Unknown fused activation ${t}.`)}const Qd=(e,t)=>{const n=e>0;return!n||t==="linear"};function bU({x:e,filter:t,strides:n,pad:s,dataFormat:i="NHWC",dilations:o=[1,1],dimRoundingMode:a,bias:c,activation:h="linear",preluActivationWeights:d}){if(h=h||"linear",Qd(V.state.gradientDepth,h)===!1){let _=ji(e,t,n,s,i,o,a);return c!=null&&(_=be(_,c)),Zd(_,h,d)}const m=W(e,"x","conv2d"),y=W(t,"filter","conv2d");let b=m,w=!1;m.rank===3&&(w=!0,b=K(m,[1,m.shape[0],m.shape[1],m.shape[2]])),k(b.rank===4,()=>`Error in fused conv2d: input must be rank 4, but got rank ${b.rank}.`),k(y.rank===4,()=>`Error in fused conv2d: filter must be rank 4, but got rank ${y.rank}.`),a!=null&&k(Ut(s),()=>`Error in fused conv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`),k(b.shape[3]===y.shape[2],()=>`Error in conv2d: depth of input (${b.shape[3]}) must match input depth for filter ${y.shape[2]}.`),k(rn(n,o),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),k(i==="NHWC",()=>`Error in conv2d: got dataFormat of ${i} but only NHWC is currently supported.`);const L=Ai(b.shape,y.shape,n,o,s,a);let T;c!=null&&(T=W(c,"bias","fused conv2d"),[T]=Bt(T,m),tt(L.outShape,T.shape));let A;d!=null&&(A=W(d,"prelu weights","fused conv2d"));const N=(_,B)=>{const[$,H,q,J]=B,re=Xd(_,q,h);k(Rr(o),()=>`Error in gradient of fused conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${o}'`);const ce=Jy(H.shape,re,$,n,s),ue=Ab(H,re,$.shape,n,s),he=[ce,ue];if(J!=null){const de=Jd(J,re);he.push(de)}return he},E=_=>{const B=_.fusedConv2d({input:b,filter:y,convInfo:L,bias:T,activation:h,preluActivationWeights:A});return B},D={x:b,filter:y,bias:T,preluActivationWeights:A},F={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a,activation:h};if(c==null){const _=Ni((B,$,H)=>{let q=V.runKernelFunc(E,D,null,Xg,F);return H([$,B,q]),w&&(q=K(q,[q.shape[1],q.shape[2],q.shape[3]])),{value:q,gradFunc:N}});return _(b,y)}else{const _=Ni((B,$,H,q)=>{let J=V.runKernelFunc(E,D,null,Xg,F);return q([$,B,J,H]),w&&(J=K(J,[J.shape[1],J.shape[2],J.shape[3]])),{value:J,gradFunc:N}});return _(b,y,T)}}const vb=P({fusedConv2d_:bU});function wU(e,t,n,s){let i=e;e.rank===3&&(i=K(e,[1,e.shape[0],e.shape[1],e.shape[2]]));let o=t;o.rank===3&&(o=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const a=h=>h.depthwiseConv2DDerFilter(i,o,s),c={x:i,dy:o};return V.runKernelFunc(a,c,null,ox)}const gA=P({depthwiseConv2dNativeBackpropFilter_:wU});function LU(e,t,n,s){let i=t,o=!1;t.rank===3&&(o=!0,i=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const a=d=>d.depthwiseConv2DDerInput(i,n,s),c={dy:i},h=V.runKernelFunc(a,c,null,ax);return o?K(h,[h.shape[1],h.shape[2],h.shape[3]]):h}const yA=P({depthwiseConv2dNativeBackpropInput_:LU});function SU({x:e,filter:t,strides:n,pad:s,dataFormat:i="NHWC",dilations:o=[1,1],dimRoundingMode:a,bias:c,activation:h="linear",preluActivationWeights:d}){if(Qd(V.state.gradientDepth,h)===!1){let _=wo(e,t,n,s,i,o,a);return c!=null&&(_=be(_,c)),Zd(_,h,d)}const m=W(e,"x","depthwiseConv2d"),y=W(t,"filter","depthwiseConv2d");let b=m,w=!1;m.rank===3&&(w=!0,b=K(m,[1,m.shape[0],m.shape[1],m.shape[2]])),k(b.rank===4,()=>`Error in fused depthwiseConv2d: input must be rank 4, but got rank ${b.rank}.`),k(y.rank===4,()=>`Error in fused depthwiseConv2d: filter must be rank 4, but got rank ${y.rank}.`),k(b.shape[3]===y.shape[2],()=>`Error in fused depthwiseConv2d: number of input channels (${b.shape[3]}) must match the inChannels dimension in filter ${y.shape[2]}.`),o==null&&(o=[1,1]),k(rn(n,o),()=>`Error in fused depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),a!=null&&k(Ut(s),()=>`Error in fused depthwiseConv2d: pad must be an integer when using dimRoundingMode ${a} but got pad ${s}.`);const L=Ai(b.shape,y.shape,n,o,s,a,!0);let T;c!=null&&(T=W(c,"bias","fused conv2d"),[T]=Bt(T,m),tt(L.outShape,T.shape));let A;d!=null&&(A=W(d,"prelu weights","fused depthwiseConv2d"));const N=(_,B)=>{k(Rr(o),()=>`Error in gradient of fused depthwiseConv2d: dilation rates greater than 1 are not yet supported. Got dilations '${o}'`);const[$,H,q,J]=B,re=Xd(_,q,h),ce=yA(H.shape,re,$,L),ue=gA(H,re,$.shape,L);if(J!=null){const he=Jd(T,re);return[ce,ue,he]}return[ce,ue]},E=_=>{const B=_.fusedDepthwiseConv2D({input:b,filter:y,convInfo:L,bias:T,activation:h,preluActivationWeights:A});return B},D={x:b,filter:y,bias:T,preluActivationWeights:A},F={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a,activation:h};if(c==null){const _=Ni((B,$,H)=>{let q=V.runKernelFunc(E,D,null,Jg,F);return H([$,B,q]),w&&(q=K(q,[q.shape[1],q.shape[2],q.shape[3]])),{value:q,gradFunc:N}});return _(b,y)}else{const _=Ni((B,$,H,q)=>{let J=V.runKernelFunc(E,D,null,Jg,F);return q([$,B,J,H]),w&&(J=K(J,[J.shape[1],J.shape[2],J.shape[3]])),{value:J,gradFunc:N}});return _(b,y,T)}}const bA=P({fusedDepthwiseConv2d_:SU});function IU({a:e,b:t,transposeA:n=!1,transposeB:s=!1,bias:i,activation:o="linear",preluActivationWeights:a}){if(Qd(V.state.gradientDepth,o)===!1){let J=at(e,t,n,s);return i!=null&&(J=be(J,i)),Zd(J,o,a)}let c=W(e,"a","fused matMul"),h=W(t,"b","fused matMul");[c,h]=Bt(c,h);const d=n?c.shape[c.rank-2]:c.shape[c.rank-1],m=s?h.shape[h.rank-1]:h.shape[h.rank-2],y=n?c.shape[c.rank-1]:c.shape[c.rank-2],b=s?h.shape[h.rank-2]:h.shape[h.rank-1],w=c.shape.slice(0,-2),L=h.shape.slice(0,-2),T=we(w),A=we(L);k(c.rank>=2&&h.rank>=2&&c.rank===h.rank,()=>`Error in fused matMul: inputs must have the same rank of at least 2, got ranks ${c.rank} and ${h.rank}.`),k(ot(w,L),()=>`Error in fused matMul: outer dimensions (${w}) and (${L}) of Tensors with shapes ${c.shape} and ${h.shape} must match.`),k(d===m,()=>`Error in fused matMul: inner shapes (${d}) and (${m}) of Tensors with shapes ${c.shape} and ${h.shape} and transposeA=${n} and transposeB=${s} must match.`);const N=c.shape.slice(0,-2).concat([y,b]),E=n?K(c,[T,d,y]):K(c,[T,y,d]),D=s?K(h,[A,b,m]):K(h,[A,m,b]);let F;i!=null&&(F=W(i,"bias","fused matMul"),[F]=Bt(F,c),tt(N,F.shape));let _;a!=null&&(_=W(a,"prelu weights","fused matMul"));const B=(J,re)=>{const[ce,ue,he,de]=re,le=Xd(K(J,he.shape),he,o);let ye,pe;if(!n&&!s?(ye=at(le,ue,!1,!0),pe=at(ce,le,!0,!1)):!n&&s?(ye=at(le,ue,!1,!1),pe=at(le,ce,!0,!1)):n&&!s?(ye=at(ue,le,!1,!0),pe=at(ce,le,!1,!1)):(ye=at(ue,le,!0,!0),pe=at(le,ce,!0,!0)),i!=null){const Ie=Jd(de,le);return[ye,pe,Ie]}else return[ye,pe]},$=J=>{const re=J.fusedBatchMatMul({a:E,b:D,transposeA:n,transposeB:s,bias:F,activation:o,preluActivationWeights:_});return re},H={a:E,b:D,bias:F,preluActivationWeights:_},q={transposeA:n,transposeB:s,activation:o};if(i==null){const J=Ni((re,ce,ue)=>{const he=V.runKernelFunc($,H,null,Kg,q);return ue([re,ce,he]),{value:K(he,N),gradFunc:B}});return J(E,D)}else{const J=Ni((re,ce,ue,he)=>{const de=V.runKernelFunc($,H,null,Kg,q);return he([re,ce,de,ue]),{value:K(de,N),gradFunc:B}});return J(E,D,F)}}const ep=P({fusedMatMul_:IU});var xU=Object.freeze({__proto__:null,conv2d:vb,depthwiseConv2d:bA,matMul:ep});function TU(e){return Tb(e,.54,.46)}const AU=P({hammingWindow_:TU});function vU(e){return Tb(e,.5,.5)}const wA=P({hannWindow_:vU});function NU(e,t,n,s=!1,i=0){let o=0;const a=[];for(;o+t<=e.size;)a.push(nt(e,o,t)),o+=n;if(s)for(;o`Error in cropAndResize: image must be rank 4,but got rank ${a.rank}.`),k(c.rank===2&&c.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${d},4] but had shape ${c.shape}.`),k(h.rank===1&&h.shape[0]===d,()=>`Error in cropAndResize: boxInd must be have size [${d}] but had shape ${c.shape}.`),k(s.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${s.length}.`),k(s[0]>=1&&s[1]>=1,()=>`cropSize must be atleast [1,1], but was ${s}`),k(i==="bilinear"||i==="nearest",()=>`method must be bilinear or nearest, but was ${i}`);const m=L=>L.cropAndResize(a,c,h,s,i,o),y={image:a,boxes:c,boxInd:h},b={method:i,extrapolationValue:o,cropSize:s},w=V.runKernelFunc(m,y,null,ix,b);return w}const EU=P({cropAndResize_:OU});function DU(e){const t=W(e,"image","flipLeftRight","float32");k(t.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${t.rank}.`);const n={image:t},s=V.runKernel(Pu,n,{});return s}const kU=P({flipLeftRight_:DU});function FU(e,t,n=0,s=.5){const i=W(e,"image","rotateWithOffset","float32");k(i.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${i.rank}.`);const o={image:i},a={radians:t,fillValue:n,center:s},c=V.runKernel(ed,o,a);return c}const _U=P({rotateWithOffset_:FU});function Ba(e,t,n,s,i,o){s==null&&(s=.5),i==null&&(i=Number.NEGATIVE_INFINITY),o==null&&(o=0);const a=e.shape[0];return n=Math.min(n,a),k(0<=s&&s<=1,()=>`iouThreshold must be in [0, 1], but was '${s}'`),k(e.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${e.rank}'`),k(e.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${e.shape[1]}`),k(t.rank===1,()=>"scores must be a 1D tensor"),k(t.shape[0]===a,()=>`scores has incompatible shape with boxes. Expected ${a}, but was ${t.shape[0]}`),k(0<=o&&o<=1,()=>`softNmsSigma must be in [0, 1], but was '${o}'`),{maxOutputSize:n,iouThreshold:s,scoreThreshold:i,softNmsSigma:o}}function WU(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY){const o=W(e,"boxes","nonMaxSuppression"),a=W(t,"scores","nonMaxSuppression"),c=Ba(o,a,n,s,i);n=c.maxOutputSize,s=c.iouThreshold,i=c.scoreThreshold;const h={maxOutputSize:n,iouThreshold:s,scoreThreshold:i};return V.runKernelFunc(d=>d.nonMaxSuppression(o,a,n,s,i),{boxes:o,scores:a},null,Og,h)}const $U=P({nonMaxSuppression_:WU});function UU(e,t,n){const s=BU(e,t,n),i=s<0?-(s+1):s;e.splice(i,0,t)}function BU(e,t,n){return PU(e,t,n||MU)}function MU(e,t){return e>t?1:e>>1);const c=n(t,e[o]);c>0?s=o+1:(i=o,a=!c)}return a?s:-s-1}function tp(e,t,n,s,i){return Nb(e,t,n,s,i,0).selectedIndices}function np(e,t,n,s,i,o){return Nb(e,t,n,s,i,0,!1,o,!0)}function sp(e,t,n,s,i,o){return Nb(e,t,n,s,i,o,!0)}function Nb(e,t,n,s,i,o,a=!1,c=!1,h=!1){const d=[];for(let A=0;Ai&&d.push({score:t[A],boxIndex:A,suppressBeginIndex:0});d.sort(SA);const m=o>0?-.5/o:0,y=[],b=[];for(;y.length0;){const A=d.pop(),{score:N,boxIndex:E,suppressBeginIndex:D}=A;if(N=D;--_){const B=zU(e,E,y[_]);if(B>=s){F=!0;break}if(A.score=A.score*GU(s,m,B),A.score<=i)break}A.suppressBeginIndex=y.length,F||(A.score===N?(y.push(E),b.push(A.score)):A.score>i&&UU(d,A,SA))}const w=y.length,L=n-w;c&&L>0&&(y.push(...new Array(L).fill(0)),b.push(...new Array(L).fill(0)));const T={selectedIndices:ns(y,"int32")};return a&&(T.selectedScores=ns(b,"float32")),h&&(T.validOutputs=Ne(w,"int32")),T}function zU(e,t,n){const s=e.subarray(t*4,t*4+4),i=e.subarray(n*4,n*4+4),o=Math.min(s[0],s[2]),a=Math.min(s[1],s[3]),c=Math.max(s[0],s[2]),h=Math.max(s[1],s[3]),d=Math.min(i[0],i[2]),m=Math.min(i[1],i[3]),y=Math.max(i[0],i[2]),b=Math.max(i[1],i[3]),w=(c-o)*(h-a),L=(y-d)*(b-m);if(w<=0||L<=0)return 0;const T=Math.max(o,d),A=Math.max(a,m),N=Math.min(c,y),E=Math.min(h,b),D=Math.max(N-T,0)*Math.max(E-A,0);return D/(w+L-D)}function GU(e,t,n){const s=Math.exp(t*n*n);return n<=e?s:0}function SA(e,t){return e.score-t.score||e.score===t.score&&t.boxIndex-e.boxIndex}async function VU(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY){const o=W(e,"boxes","nonMaxSuppressionAsync"),a=W(t,"scores","nonMaxSuppressionAsync"),c=Ba(o,a,n,s,i);n=c.maxOutputSize,s=c.iouThreshold,i=c.scoreThreshold;const h=await Promise.all([o.data(),a.data()]),d=h[0],m=h[1],y=tp(d,m,n,s,i);return o!==e&&o.dispose(),a!==t&&a.dispose(),y}const YU=VU;function HU(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=0){const a=W(e,"boxes","nonMaxSuppression"),c=W(t,"scores","nonMaxSuppression"),h=Ba(a,c,n,s,i,o);n=h.maxOutputSize,s=h.iouThreshold,i=h.scoreThreshold,o=h.softNmsSigma;const d={boxes:a,scores:c},m={maxOutputSize:n,iouThreshold:s,scoreThreshold:i,softNmsSigma:o},y=V.runKernel(qu,d,m);return{selectedIndices:y[0],selectedScores:y[1]}}const qU=P({nonMaxSuppressionWithScore_:HU});async function jU(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=0){const a=W(e,"boxes","nonMaxSuppressionAsync"),c=W(t,"scores","nonMaxSuppressionAsync"),h=Ba(a,c,n,s,i,o);n=h.maxOutputSize,s=h.iouThreshold,i=h.scoreThreshold,o=h.softNmsSigma;const d=await Promise.all([a.data(),c.data()]),m=d[0],y=d[1],b=sp(m,y,n,s,i,o);return a!==e&&a.dispose(),c!==t&&c.dispose(),b}const KU=jU;function XU(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=!1){const a=W(e,"boxes","nonMaxSuppression"),c=W(t,"scores","nonMaxSuppression"),h=Ba(a,c,n,s,i,null),d=h.maxOutputSize,m=h.iouThreshold,y=h.scoreThreshold,b={boxes:a,scores:c},w={maxOutputSize:d,iouThreshold:m,scoreThreshold:y,padToMaxOutputSize:o},L=V.runKernel(Hu,b,w);return{selectedIndices:L[0],validOutputs:L[1]}}const JU=P({nonMaxSuppressionPadded_:XU});async function ZU(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=!1){const a=W(e,"boxes","nonMaxSuppressionAsync"),c=W(t,"scores","nonMaxSuppressionAsync"),h=Ba(a,c,n,s,i,null),d=h.maxOutputSize,m=h.iouThreshold,y=h.scoreThreshold,[b,w]=await Promise.all([a.data(),c.data()]),L=np(b,w,d,m,y,o);return a!==e&&a.dispose(),c!==t&&c.dispose(),L}const QU=ZU;function eB(e,t,n=!1){const s=W(e,"images","resizeBilinear");k(s.rank===3||s.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${s.rank}.`),k(t.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${t}.`);let i=s,o=!1;s.rank===3&&(o=!0,i=K(s,[1,s.shape[0],s.shape[1],s.shape[2]]));const[a,c]=t,h=(b,w)=>(w([i]),b.resizeBilinear(i,a,c,n)),d={images:i},m={alignCorners:n,size:t},y=V.runKernelFunc(h,d,null,Ug,m);return o?K(y,[y.shape[1],y.shape[2],y.shape[3]]):y}const IA=P({resizeBilinear_:eB});function tB(e,t,n=!1){const s=W(e,"images","resizeNearestNeighbor");k(s.rank===3||s.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${s.rank}.`),k(t.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${t}.`),k(s.dtype==="float32"||s.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let i=s,o=!1;s.rank===3&&(o=!0,i=K(s,[1,s.shape[0],s.shape[1],s.shape[2]]));const[a,c]=t,h={images:i},d={alignCorners:n,size:t},m=(b,w)=>(w([i]),b.resizeNearestNeighbor(i,a,c,n)),y=V.runKernelFunc(m,h,null,$g,d);return o?K(y,[y.shape[1],y.shape[2],y.shape[3]]):y}const xA=P({resizeNearestNeighbor_:tB});function nB(e,t,n){k(t%1===0,()=>`bandPart(): numLower must be an integer, got ${t}.`),k(n%1===0,()=>`bandPart(): numUpper must be an integer, got ${n}.`);const s=W(e,"a","bandPart");k(s.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${s.rank}.`);const i=s.shape,[o,a]=s.shape.slice(-2);if(!(t<=o))throw new Error(`bandPart(): numLower (${t}) must not be greater than the number of rows (${o}).`);if(!(n<=a))throw new Error(`bandPart(): numUpper (${n}) must not be greater than the number of columns (${a}).`);t<0&&(t=o),n<0&&(n=a);const c=K(sh(0,o,1,"int32"),[-1,1]),h=sh(0,a,1,"int32"),d=Ce(c,h),m=Ws(Dr(d,Ne(+t,"int32")),Ki(d,Ne(-n,"int32"))),y=ct([o,a],s.dtype);return K(is(Oi(K(s,[-1,o,a])).map(b=>_n(m,b,y))),i)}const sB=P({bandPart_:nB});function iB(e){let t;if(Array.isArray(e)){t=!1,k(e!=null&&e.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");const i=e[0].shape[0];for(let o=1;o`Gram-Schmidt: Non-unique lengths found in the input vectors: (${e[o].shape[0]} vs. ${i})`)}else t=!0,e=ss(e,e.shape[0],0).map(i=>Fr(i,[0]));k(e.length<=e[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${e.length}) exceeds number of dimensions (${e[0].shape[0]}).`);const n=[],s=e;for(let i=0;i{let o=s[i];if(i>0)for(let a=0;a=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${e.rank}`),e.rank===2)return TA(e,t);{const n=e.shape.slice(0,e.shape.length-2).reduce((h,d)=>h*d),s=Oi(K(e,[n,e.shape[e.shape.length-2],e.shape[e.shape.length-1]]),0),i=[],o=[];s.forEach(h=>{const[d,m]=TA(h,t);i.push(d),o.push(m)});const a=K(is(i,0),e.shape),c=K(is(o,0),e.shape);return[a,c]}}function TA(e,t=!1){return V.tidy(()=>{k(e.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${e.shape.length}D Tensor.`);const n=e.shape[0],s=e.shape[1];let i=Ed(n),o=Cr(e);const a=_r([[1]],[1,1]);let c=Cr(a);const h=n>=s?s:n;for(let d=0;d{const w=nt(o,[d,d],[n-d,1]),L=Kd(w),T=nt(o,[d,d],[1,1]),A=_n(Ss(T,0),_r([[-1]]),_r([[1]])),N=Ce(T,X(A,L)),E=_e(w,N);E.shape[0]===1?c=Cr(a):c=Mt([a,nt(E,[1,0],[E.shape[0]-1,E.shape[1]])],0);const D=Pt(_e(at(A,N),L)),F=nt(o,[d,0],[n-d,s]),_=X(D,c),B=Me(c);if(d===0)o=Ce(F,at(_,at(B,F)));else{const q=Ce(F,at(_,at(B,F)));o=Mt([nt(o,[0,0],[d,s]),q],0)}const $=Me(_),H=nt(i,[0,d],[n,i.shape[1]-d]);if(d===0)i=Ce(H,at(at(H,c),$));else{const q=Ce(H,at(at(H,c),$));i=Mt([nt(i,[0,0],[n,d]),q],1)}return[c,o,i]}),He([m,y,b])}return!t&&n>s&&(i=nt(i,[0,0],[n,s]),o=nt(o,[0,0],[s,s])),[i,o]})}const aB=P({qr_:oB});(function(e){e[e.NONE=0]="NONE",e[e.MEAN=1]="MEAN",e[e.SUM=2]="SUM",e[e.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(r.Reduction||(r.Reduction={}));function cB(e,t,n=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const s=W(e,"losses","computeWeightedLoss");let i=null;t!=null&&(i=W(t,"weights","computeWeightedLoss"));const o=i==null?s:X(s,i);if(n===r.Reduction.NONE)return o;if(n===r.Reduction.SUM)return Ue(o);if(n===r.Reduction.MEAN){if(i==null)return zt(o);{const a=s.size/i.size,c=_e(Ue(o),Ue(i));return a>1?_e(c,Ne(a)):c}}if(n===r.Reduction.SUM_BY_NONZERO_WEIGHTS){if(i==null)return _e(Ue(o),Ne(s.size));{const a=X(i,Qs(s.shape)),c=ve(Ue(kr(a,Ne(0))),"float32");return _e(Ue(o),c)}}throw Error(`Unknown reduction: ${n}`)}const Xi=P({computeWeightedLoss_:cB});function lB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const i=W(e,"labels","absoluteDifference"),o=W(t,"predictions","absoluteDifference");let a=null;n!=null&&(a=W(n,"weights","absoluteDifference")),dt(i.shape,o.shape,"Error in absoluteDifference: ");const c=sn(Ce(i,o));return Xi(c,a,s)}const hB=P({absoluteDifference_:lB});function uB(e,t,n,s,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","cosineDistance"),a=W(t,"predictions","cosineDistance");let c=null;s!=null&&(c=W(s,"weights","cosineDistance")),dt(o.shape,a.shape,"Error in cosineDistance: ");const h=Ne(1),d=Ce(h,Ue(X(o,a),n,!0));return Xi(d,c,i)}const dB=P({cosineDistance_:uB});function pB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let i=W(e,"labels","hingeLoss");const o=W(t,"predictions","hingeLoss");let a=null;n!=null&&(a=W(n,"weights","hingeLoss")),dt(i.shape,o.shape,"Error in hingeLoss: ");const c=Ne(1);i=Ce(X(Ne(2),i),c);const h=Ri(Ce(c,X(i,o)));return Xi(h,a,s)}const mB=P({hingeLoss_:pB});function fB(e,t,n,s=1,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","huberLoss"),a=W(t,"predictions","huberLoss");let c=null;n!=null&&(c=W(n,"weights","huberLoss")),dt(o.shape,a.shape,"Error in huberLoss: ");const h=Ne(s),d=sn(Ce(a,o)),m=Io(d,h),y=Ce(d,m),b=be(X(Ne(.5),wt(m)),X(h,y));return Xi(b,c,i)}const gB=P({huberLoss_:fB});function yB(e,t,n,s=1e-7,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","logLoss"),a=W(t,"predictions","logLoss");let c=null;n!=null&&(c=W(n,"weights","logLoss")),dt(o.shape,a.shape,"Error in logLoss: ");const h=Ne(1),d=Ne(s),m=Pt(X(o,ts(be(a,d)))),y=X(Ce(h,o),ts(be(Ce(h,a),d))),b=Ce(m,y);return Xi(b,c,i)}const bB=P({logLoss_:yB});function wB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const i=W(e,"labels","meanSquaredError"),o=W(t,"predictions","meanSquaredError");let a=null;n!=null&&(a=W(n,"weights","meanSquaredError")),dt(i.shape,o.shape,"Error in meanSquaredError: ");const c=ah(i,o);return Xi(c,a,s)}const LB=P({meanSquaredError_:wB});function SB(e,t){const n=W(e,"labels","sigmoidCrossEntropyWithLogits"),s=W(t,"logits","sigmoidCrossEntropyWithLogits");dt(n.shape,s.shape,"Error in sigmoidCrossEntropyWithLogits: ");const i=Ri(s),o=X(s,n),a=kd(Ls(Pt(sn(s))));return be(Ce(i,o),a)}function IB(e,t,n,s=0,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=W(e,"multiClassLabels","sigmoidCrossEntropy");const a=W(t,"logits","sigmoidCrossEntropy");let c=null;if(n!=null&&(c=W(n,"weights","sigmoidCrossEntropy")),dt(o.shape,a.shape,"Error in sigmoidCrossEntropy: "),s>0){const d=Ne(s),m=Ne(1),y=Ne(.5);o=be(X(o,Ce(m,d)),X(y,d))}const h=SB(o,a);return Xi(h,c,i)}const xB=P({sigmoidCrossEntropy_:IB});function TB(e,t,n=-1){if(n===-1&&(n=t.rank-1),n!==t.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${t.rank} and dim was ${n}`);const s=Ni((i,o,a)=>{const c=!0,h=ab(o,[n],c),d=Ce(ve(o,"float32"),h);a([i,d]);const m=Pt(X(d,i)),y=Ue(m,[n]),b=(w,L)=>{const[T,A]=L,N=Rn(w.shape,[n]);return[X(K(w,N),Ce(ve(T,"float32"),Ls(A))),X(K(w,N),Ce(Ls(A),ve(T,"float32")))]};return{value:y,gradFunc:b}});return s(e,t)}function AB(e,t,n,s=0,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=W(e,"onehotLabels","softmaxCrossEntropy");const a=W(t,"logits","softmaxCrossEntropy");let c=null;if(n!=null&&(c=W(n,"weights","softmaxCrossEntropy")),dt(o.shape,a.shape,"Error in softmaxCrossEntropy: "),s>0){const d=Ne(s),m=Ne(1),y=Ne(o.shape[1]);o=be(X(o,Ce(m,d)),_e(d,y))}const h=TB(o,a);return Xi(h,c,i)}const vB=P({softmaxCrossEntropy_:AB});const NB={fft:rh,ifft:Wa,rfft:oh,irfft:Hd},CB={hammingWindow:AU,hannWindow:wA,frame:LA,stft:RU},Wr={flipLeftRight:kU,resizeNearestNeighbor:xA,resizeBilinear:IA,rotateWithOffset:_U,cropAndResize:EU,nonMaxSuppression:$U,nonMaxSuppressionAsync:YU,nonMaxSuppressionWithScore:qU,nonMaxSuppressionWithScoreAsync:KU,nonMaxSuppressionPadded:JU,nonMaxSuppressionPaddedAsync:QU},AA={bandPart:sB,gramSchmidt:rB,qr:aB},RB={absoluteDifference:hB,computeWeightedLoss:Xi,cosineDistance:dB,hingeLoss:mB,huberLoss:gB,logLoss:bB,meanSquaredError:LB,sigmoidCrossEntropy:xB,softmaxCrossEntropy:vB};class Ji extends go{minimize(e,t=!1,n){const{value:s,grads:i}=this.computeGradients(e,n);if(n!=null){const o=n.map(a=>({name:a.name,tensor:i[a.name]}));this.applyGradients(o)}else this.applyGradients(i);return He(i),t?s:(s.dispose(),null)}get iterations(){return this.iterations_==null&&(this.iterations_=0),this.iterations_}incrementIterations(){this.iterations_=this.iterations+1}computeGradients(e,t){return ob(e,t)}dispose(){this.iterations_!=null&&He(this.iterations_)}async saveIterations(){return this.iterations_==null&&(this.iterations_=0),{name:"iter",tensor:Ne(this.iterations_,"int32")}}async getWeights(){throw new Error("getWeights() is not implemented for this optimizer yet.")}async setWeights(e){throw new Error(`setWeights() is not implemented for this optimizer class ${this.getClassName()}`)}async extractIterations(e){return this.iterations_=(await e[0].tensor.data())[0],e.slice(1)}}Object.defineProperty(Ji,Symbol.hasInstance,{value:e=>e.minimize!=null&&e.computeGradients!=null&&e.applyGradients!=null});class lh extends Ji{constructor(e,t,n=null){super();this.learningRate=e,this.rho=t,this.epsilon=n,this.accumulatedGrads=[],this.accumulatedUpdates=[],n==null&&(this.epsilon=V.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n],o=!1;this.accumulatedGrads[s]==null&&(this.accumulatedGrads[s]={originalName:`${n}/accum_grad`,variable:ee(()=>Qe(i).variable(o))}),this.accumulatedUpdates[s]==null&&(this.accumulatedUpdates[s]={originalName:`${n}/accum_var`,variable:ee(()=>Qe(i).variable(o))});const a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;const c=this.accumulatedGrads[s].variable,h=this.accumulatedUpdates[s].variable;ee(()=>{const d=be(X(c,this.rho),X(wt(a),1-this.rho)),m=X(_e(Ln(be(h,this.epsilon)),Ln(be(c,this.epsilon))),a),y=be(X(h,this.rho),X(wt(m),1-this.rho));c.assign(d),h.assign(y);const b=be(X(m,-this.learningRate),i);i.assign(b)})}),this.incrementIterations()}dispose(){this.accumulatedUpdates!=null&&(He(this.accumulatedGrads.map(e=>e.variable)),He(this.accumulatedUpdates.map(e=>e.variable)))}async getWeights(){const e=[...this.accumulatedGrads,...this.accumulatedUpdates];return[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=e.length/2,n=!1;this.accumulatedGrads=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedUpdates=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)}))}getConfig(){return{learningRate:this.learningRate,rho:this.rho,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.rho,t.epsilon)}}lh.className="Adadelta",me(lh);class hh extends Ji{constructor(e,t=.1){super();this.learningRate=e,this.initialAccumulatorValue=t,this.accumulatedGrads=[]}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n];if(this.accumulatedGrads[s]==null){const c=!1;this.accumulatedGrads[s]={originalName:`${n}/accumulator`,variable:ee(()=>Xl(i.shape,this.initialAccumulatorValue).variable(c))}}const o=Array.isArray(e)?e[s].tensor:e[n];if(o==null)return;const a=this.accumulatedGrads[s].variable;ee(()=>{const c=be(a,wt(o));a.assign(c);const h=be(X(_e(o,Ln(be(c,V.backend.epsilon()))),-this.learningRate),i);i.assign(h)})}),this.incrementIterations()}dispose(){this.accumulatedGrads!=null&&He(this.accumulatedGrads.map(e=>e.variable))}async getWeights(){return[await this.saveIterations()].concat(this.accumulatedGrads.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=!1;this.accumulatedGrads=e.map(n=>({originalName:n.name,variable:n.tensor.variable(t)}))}getConfig(){return{learningRate:this.learningRate,initialAccumulatorValue:this.initialAccumulatorValue}}static fromConfig(e,t){return new e(t.learningRate,t.initialAccumulatorValue)}}hh.className="Adagrad",me(hh);class uh extends Ji{constructor(e,t,n,s=null){super();this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.accumulatedFirstMoment=[],this.accumulatedSecondMoment=[],ee(()=>{this.accBeta1=Ne(t).variable(),this.accBeta2=Ne(n).variable()}),s==null&&(this.epsilon=V.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);ee(()=>{const n=Ce(1,this.accBeta1),s=Ce(1,this.accBeta2);t.forEach((i,o)=>{const a=V.registeredVariables[i],c=!1;this.accumulatedFirstMoment[o]==null&&(this.accumulatedFirstMoment[o]={originalName:`${i}/m`,variable:ee(()=>Qe(a).variable(c))}),this.accumulatedSecondMoment[o]==null&&(this.accumulatedSecondMoment[o]={originalName:`${i}/v`,variable:ee(()=>Qe(a).variable(c))});const h=Array.isArray(e)?e[o].tensor:e[i];if(h==null)return;const d=this.accumulatedFirstMoment[o].variable,m=this.accumulatedSecondMoment[o].variable,y=be(X(d,this.beta1),X(h,1-this.beta1)),b=be(X(m,this.beta2),X(wt(h),1-this.beta2)),w=_e(y,n),L=_e(b,s);d.assign(y),m.assign(b);const T=be(X(_e(w,be(Ln(L),this.epsilon)),-this.learningRate),a);a.assign(T)}),this.accBeta1.assign(X(this.accBeta1,this.beta1)),this.accBeta2.assign(X(this.accBeta2,this.beta2))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.accBeta2.dispose(),this.accumulatedFirstMoment!=null&&He(this.accumulatedFirstMoment.map(e=>e.variable)),this.accumulatedSecondMoment!=null&&He(this.accumulatedSecondMoment.map(e=>e.variable))}async getWeights(){const e=[...this.accumulatedFirstMoment,...this.accumulatedSecondMoment];return[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e),ee(()=>{this.accBeta1.assign(ei(this.beta1,this.iterations_+1)),this.accBeta2.assign(ei(this.beta2,this.iterations_+1))});const t=e.length/2,n=!1;this.accumulatedFirstMoment=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedSecondMoment=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)}))}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon)}}uh.className="Adam",me(uh);class dh extends Ji{constructor(e,t,n,s=null,i=0){super();this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.decay=i,this.accumulatedFirstMoment=[],this.accumulatedWeightedInfNorm=[],ee(()=>{this.iteration=Ne(0).variable(),this.accBeta1=Ne(t).variable()}),s==null&&(this.epsilon=V.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);ee(()=>{const n=Ce(1,this.accBeta1),s=_e(-this.learningRate,be(X(this.iteration,this.decay),1));t.forEach((i,o)=>{const a=V.registeredVariables[i],c=!1;this.accumulatedFirstMoment[o]==null&&(this.accumulatedFirstMoment[o]={originalName:`${i}/m`,variable:Qe(a).variable(c)}),this.accumulatedWeightedInfNorm[o]==null&&(this.accumulatedWeightedInfNorm[o]={originalName:`${i}/v`,variable:Qe(a).variable(c)});const h=Array.isArray(e)?e[o].tensor:e[i];if(h==null)return;const d=this.accumulatedFirstMoment[o].variable,m=this.accumulatedWeightedInfNorm[o].variable,y=be(X(d,this.beta1),X(h,1-this.beta1)),b=X(m,this.beta2),w=sn(h),L=_s(b,w);d.assign(y),m.assign(L);const T=be(X(_e(s,n),_e(y,be(L,this.epsilon))),a);a.assign(T)}),this.iteration.assign(be(this.iteration,1)),this.accBeta1.assign(X(this.accBeta1,this.beta1))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.iteration.dispose(),this.accumulatedFirstMoment!=null&&He(this.accumulatedFirstMoment.map(e=>e.variable)),this.accumulatedWeightedInfNorm!=null&&He(this.accumulatedWeightedInfNorm.map(e=>e.variable))}async getWeights(){throw new Error("getWeights() is not implemented for Adamax yet.")}async setWeights(e){throw new Error("setWeights() is not implemented for Adamax yet.")}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon,decay:this.decay}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon,t.decay)}}dh.className="Adamax",me(dh);class Ma extends Ji{constructor(e){super();this.learningRate=e,this.setLearningRate(e)}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=Array.isArray(e)?e[s].tensor:e[n];if(i==null)return;const o=V.registeredVariables[n];ee(()=>{const a=be(X(this.c,i),o);o.assign(a)})}),this.incrementIterations()}setLearningRate(e){this.learningRate=e,this.c!=null&&this.c.dispose(),this.c=Nn(Ne(-e))}dispose(){this.c.dispose()}async getWeights(){return[await this.saveIterations()]}async setWeights(e){if(e=await this.extractIterations(e),e.length!==0)throw new Error("SGD optimizer does not have settable weights.")}getConfig(){return{learningRate:this.learningRate}}static fromConfig(e,t){return new e(t.learningRate)}}Ma.className="SGD",me(Ma);class ph extends Ma{constructor(e,t,n=!1){super(e);this.learningRate=e,this.momentum=t,this.useNesterov=n,this.accumulations=[],this.m=Ne(this.momentum)}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n];if(this.accumulations[s]==null){const c=!1;this.accumulations[s]={originalName:`${n}/momentum`,variable:ee(()=>Qe(i).variable(c))}}const o=this.accumulations[s].variable,a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;ee(()=>{let c;const h=be(X(this.m,o),a);this.useNesterov?c=be(X(this.c,be(a,X(h,this.m))),i):c=be(X(this.c,h),i),o.assign(h),i.assign(c)})}),this.incrementIterations()}dispose(){this.m.dispose(),this.accumulations!=null&&He(this.accumulations.map(e=>e.variable))}setMomentum(e){this.momentum=e}async getWeights(){return[await this.saveIterations()].concat(this.accumulations.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=!1;this.accumulations=e.map(n=>({originalName:n.name,variable:n.tensor.variable(t)}))}getConfig(){return{learningRate:this.learningRate,momentum:this.momentum,useNesterov:this.useNesterov}}static fromConfig(e,t){return new e(t.learningRate,t.momentum,t.useNesterov)}}ph.className="Momentum",me(ph);class mh extends Ji{constructor(e,t=.9,n=0,s=null,i=!1){super();if(this.learningRate=e,this.decay=t,this.momentum=n,this.epsilon=s,this.accumulatedMeanSquares=[],this.accumulatedMoments=[],this.accumulatedMeanGrads=[],this.centered=i,s==null&&(this.epsilon=V.backend.epsilon()),e==null)throw new Error("learningRate for RMSPropOptimizer must be defined.")}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=V.registeredVariables[n],o=!1;this.accumulatedMeanSquares[s]==null&&(this.accumulatedMeanSquares[s]={originalName:`${n}/rms`,variable:ee(()=>Qe(i).variable(o))}),this.accumulatedMoments[s]==null&&(this.accumulatedMoments[s]={originalName:`${n}/momentum`,variable:ee(()=>Qe(i).variable(o))}),this.accumulatedMeanGrads[s]==null&&this.centered&&(this.accumulatedMeanGrads[s]={originalName:`${n}/mg`,variable:ee(()=>Qe(i).variable(o))});const a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;const c=this.accumulatedMeanSquares[s].variable,h=this.accumulatedMoments[s].variable;ee(()=>{const d=be(X(c,this.decay),X(wt(a),1-this.decay));if(this.centered){const m=this.accumulatedMeanGrads[s].variable,y=be(X(m,this.decay),X(a,1-this.decay)),b=_e(X(a,this.learningRate),Ln(Ce(d,be(wt(y),this.epsilon)))),w=be(X(h,this.momentum),b);c.assign(d),m.assign(y),h.assign(w);const L=Ce(i,w);i.assign(L)}else{const m=be(X(c,this.decay),X(wt(a),1-this.decay)),y=be(X(h,this.momentum),_e(X(a,this.learningRate),Ln(be(m,this.epsilon))));c.assign(m),h.assign(y);const b=Ce(i,y);i.assign(b)}})}),this.incrementIterations()}dispose(){this.accumulatedMeanSquares!=null&&He(this.accumulatedMeanSquares.map(e=>e.variable)),this.accumulatedMeanGrads!=null&&this.centered&&He(this.accumulatedMeanGrads.map(e=>e.variable)),this.accumulatedMoments!=null&&He(this.accumulatedMoments.map(e=>e.variable))}async getWeights(){const e=[...this.accumulatedMeanSquares,...this.accumulatedMoments];return this.centered&&e.push(...this.accumulatedMeanGrads),[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=this.centered?e.length/3:e.length/2,n=!1;this.accumulatedMeanSquares=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedMoments=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.centered&&(this.accumulatedMeanGrads=e.slice(t*2,t*3).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})))}getConfig(){return{learningRate:this.learningRate,decay:this.decay,momentum:this.momentum,epsilon:this.epsilon,centered:this.centered}}static fromConfig(e,t){return new e(t.learningRate,t.decay,t.momentum,t.epsilon,t.centered)}}mh.className="RMSProp",me(mh);class Co{static sgd(e){return new Ma(e)}static momentum(e,t,n=!1){return new ph(e,t,n)}static rmsprop(e,t=.9,n=0,s=null,i=!1){return new mh(e,t,n,s,i)}static adam(e=.001,t=.9,n=.999,s=null){return new uh(e,t,n,s)}static adadelta(e=.001,t=.95,n=null){return new lh(e,t,n)}static adamax(e=.002,t=.9,n=.999,s=null,i=0){return new dh(e,t,n,s,i)}static adagrad(e,t=.1){return new hh(e,t)}}const Ro={sgd:Co.sgd,momentum:Co.momentum,adadelta:Co.adadelta,adagrad:Co.adagrad,rmsprop:Co.rmsprop,adamax:Co.adamax,adam:Co.adam};const OB=(()=>typeof requestAnimationFrame!="undefined"?requestAnimationFrame:typeof setImmediate!="undefined"?setImmediate:e=>e())();function ip(){return new Promise(e=>OB(()=>e()))}function Cb(e,t,n){const s=n*(typeof e=="number"?e:e[0]),i=t*(typeof e=="number"?e:e[1]);return[s,i]}function fh(e,t,n,s=!0){let i=[];if(s)i=i.concat(t.slice(0)),i.push(e[0]/n),i=i.concat(e.slice(1));else{i=i.concat(e[0]);const o=t.length;for(let a=0;a=t*2+1||a%2===1?o.push(a):i.push(a);s.push(...i),s.push(0),s.push(...o)}return s}function yh(e,t,n,s=!0){const i=[];s?i.push(e[0]/n):i.push(e[0]*n);for(let o=1;o{const a=[...i];a[n]=o;const c=nt(e,s,a);return s[n]+=o,c})}function Pb(e,t){const n=new Array(e.rank);for(let i=0;iD.value-E.value);const T=y*s,A=h.subarray(T,T+s),N=d.subarray(T,T+s);for(let E=0;E{const[n]=t;return{x:()=>X(e,$a(ve(n,"float32"),-1))}}};const FB={kernelName:fe,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=wt(ve(n,"float32")),i=Ln(Ce(Ne(1),s));return Pt(_e(e,i))}}}};const _B={kernelName:Ae,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Ln(Ce(wt(ve(n,"float32")),1));return _e(e,s)}}}};const WB={kernelName:Te,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=tt(n.shape,s.shape),o=()=>{let c=e;const h=on(n.shape,i);return h.length>0&&(c=Ue(c,h)),K(c,n.shape)},a=()=>{let c=e;const h=on(s.shape,i);return h.length>0&&(c=Ue(c,h)),K(c,s.shape)};return{a:o,b:a}}};const $B={kernelName:Ve,saveAllInputs:!0,gradFunc:(e,t)=>{const n={};return t.forEach((s,i)=>{n[i]=()=>e.clone()}),n}};const UB={kernelName:$t,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Qe(n)}}};const BB={kernelName:Kt,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Qe(n)}}};const MB={kernelName:Dn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,Ln(Ce(Ne(1),wt(ve(n,"float32")))))}}};const PB={kernelName:Tn,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Ln(be(Ne(1),wt(ve(n,"float32"))));return _e(e,s)}}}};const zB={kernelName:Li,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=tt(n.shape,s.shape),o=()=>{const c=be(wt(n),wt(s));let h=X(e,_e(s,c));const d=on(n.shape,i);return d.length>0&&(h=Ue(h,d)),K(h,n.shape)},a=()=>{const c=be(wt(n),wt(s));let h=Pt(X(e,_e(n,c)));const d=on(s.shape,i);return d.length>0&&(h=Ue(h,d)),K(h,s.shape)};return{a:o,b:a}}};const GB={kernelName:An,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,be(wt(ve(n,"float32")),1))}}};const VB={kernelName:Ks,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,Ce(Ne(1),wt(ve(n,"float32"))))}}};function YB(e,t,n,s,i=[1,1,1],o,a){const c=W(e,"dy","avgPool3dBackprop"),h=W(t,"input","avgPool3dBackprop");let d=c,m=h,y=!1;h.rank===4&&(y=!0,d=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]]),m=K(h,[1,h.shape[0],h.shape[1],h.shape[2],h.shape[3]])),k(d.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${d.rank}.`),k(m.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${m.rank}.`),k(rn(s,i),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${s} and dilations '${i}'`),a!=null&&k(Ut(o),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const b=A=>{const N=Gl(m.shape,n,s,i,o,a);return A.avgPool3dBackprop(d,m,N)},w={dy:d,input:m},L={filterSize:n,strides:s,dilations:i,pad:o,dimRoundingMode:a},T=V.runKernelFunc(b,w,null,ex,L);return y?K(T,[T.shape[1],T.shape[2],T.shape[3],T.shape[4]]):T}const HB=P({avgPool3dBackprop_:YB});const qB={kernelName:Xc,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:i,strides:o,dilations:a,pad:c,dimRoundingMode:h}=n,d=a==null?[1,1,1]:a;return{x:()=>HB(e,s,i,o,d,c,h)}}};function jB(e,t,n,s,i){const o=W(e,"dy","avgPoolBackprop"),a=W(t,"input","avgPoolBackprop");k(a.rank===o.rank,()=>`Rank of input (${a.rank}) does not match rank of dy (${o.rank})`);let c=a,h=o,d=!1;a.rank===3&&(d=!0,c=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]),h=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),k(h.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${h.rank}.`),k(c.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${c.rank}.`);const m=L=>{const T=Fn(c.shape,n,s,1,i);return L.avgPoolBackprop(h,c,T)},y={dy:h,input:c},b={filterSize:n,strides:s,pad:i},w=V.runKernelFunc(m,y,null,ua,b);return d?K(w,[w.shape[1],w.shape[2],w.shape[3]]):w}const KB=P({avgPoolBackprop_:jB});const XB={kernelName:Xs,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:i,strides:o,pad:a}=n;return{x:()=>KB(e,s,i,o,a)}}};const JB={kernelName:ag,inputsToSave:["a","b"],gradFunc:(e,t,n)=>{const[s,i]=t,{transposeA:o,transposeB:a}=n;return!o&&!a?{a:()=>at(e,i,!1,!0),b:()=>at(s,e,!0,!1)}:!o&&a?{a:()=>at(e,i,!1,!1),b:()=>at(e,s,!0,!1)}:o&&!a?{a:()=>at(i,e,!1,!0),b:()=>at(s,e,!1,!1)}:{a:()=>at(i,e,!0,!0),b:()=>at(e,s,!0,!0)}}};const ZB={kernelName:cg,gradFunc:(e,t,n)=>{const{blockShape:s,crops:i}=n;return{x:()=>th(e,s,i)}}};const QB={kernelName:lg,gradFunc:(e,t,n)=>{const s=n,i=s.inputShape,o=s.shape,a=Array.from(o);for(let h=i.length-1;h>=0;h--)if(i[h]===o[h])a[h]=1;else if(i[h]!==1)throw new Error(`broadcastTo(): [${i}] cannot be broadcast to [${o}].`);const c=[];for(let h=0;h1&&c.push(h);return{x:()=>Ue(e,c,!0)}}};const eM={kernelName:Jc,gradFunc:e=>({x:()=>e.clone()})};const tM={kernelName:Zc,gradFunc:e=>({x:()=>Qe(e)})};const nM={kernelName:Qc,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{clipValueMin:i,clipValueMax:o}=n;return{x:()=>_n(Ws(Ki(s,i),Dr(s,o)),e,Qe(e))}}};const sM={kernelName:$u,saveAllInputs:!0,gradFunc:(e,t,n)=>{const s=t.map(h=>h.shape),{axis:i}=n,o=ft(i,t[0].shape)[0],a=s.map(h=>h[o]),c=ss(e,a,o);return c.map(h=>()=>h)}};const iM={kernelName:ug,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,{dilations:o,strides:a,pad:c,dataFormat:h}=n;return k(Rr(o),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${o}'`),{x:()=>Jy(s.shape,e,i,a,c,h),filter:()=>Ab(s,e,i.shape,a,c,h)}}};const rM={kernelName:dg,inputsToSave:["dy","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,{strides:o,pad:a,dataFormat:c,dimRoundingMode:h}=n;return{dy:()=>ji(e,i,o,a,c,1,h),filter:()=>Ab(e,s,i.shape,o,a,c,h)}}};function oM(e,t,n,s,i){let o=e;e.rank===4&&(o=K(e,[1,e.shape[0],e.shape[1],e.shape[2],e.shape[3]]));let a=t;a.rank===4&&(a=K(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]])),k(o.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${o.shape}.`),k(a.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${a.shape}.`),k(n.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${n}.`),k(o.shape[4]===n[3],()=>`Error in conv3dDerFilter: depth of input ${o.shape[4]}) must match input depth in filter (${n[3]}.`),k(a.shape[4]===n[4],()=>`Error in conv3dDerFilter: depth of dy (${a.shape[4]}) must match output depth for filter (${n[4]}).`);const c=m=>{const y=1,b=Vl(o.shape,n,s,y,i);return m.conv3dDerFilter(o,a,b)},h={x:o,y:a},d={strides:s,pad:i};return V.runKernelFunc(c,h,null,nx,d)}const aM=P({conv3DBackpropFilter_:oM});const cM={kernelName:pg,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:i,pad:o}=n;k(Rr(s),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${s}'`);const[a,c]=t;return{x:()=>_T(a.shape,e,c,i,o),filter:()=>aM(a,e,c.shape,i,o)}}};const lM={kernelName:da,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Pt(zd(ve(n,"float32"))),e)}}};const hM={kernelName:el,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Gd(ve(n,"float32")),e)}}};const uM={kernelName:mg,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{axis:i,exclusive:o,reverse:a}=n;return{x:()=>{const c=kn([i],s.rank);let h=Od(e,i,o,!a);return c!=null&&(h=Me(h,c)),h}}}};const dM={kernelName:fg,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:i,pad:o,dimRoundingMode:a}=n,c=s==null?[1,1]:s;k(Rr(c),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${c}'`);const[h,d]=t;k(h.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${h.rank}.`),k(d.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${d.rank}.`),k(h.shape[3]===d.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${h.shape[3]}) must match the inChannels dimension in filter ${d.shape[2]}.`),k(rn(i,c),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${i} and dilations '${c}'.`),a!=null&&k(Ut(o),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const m=Ai(h.shape,d.shape,i,c,o,a,!0);return{x:()=>yA(h.shape,e,d,m),filter:()=>gA(h,e,d.shape,m)}}};const pM={kernelName:Uu,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,o={x:s,filter:i,dy:e},a={x:s,filter:i,dy:e};return{x:()=>V.runKernel(Bu,o,n),filter:()=>V.runKernel(Mu,a,n)}}};const mM={kernelName:pa,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=tt(n.shape,s.shape),o=()=>{const c=_e(e,ve(s,"float32")),h=on(n.shape,i);return h.length>0?K(Ue(c,h),n.shape):c},a=()=>{let c=X(e,ve(n,"float32"));const h=on(s.shape,i);h.length>0&&(c=K(Ue(c,h),s.shape));const d=wt(s);return Pt(_e(c,ve(d,"float32")))};return{a:o,b:a}}};const fM={kernelName:tl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t,s=o=>o.eluDer(e,n),i={dy:e,y:n};return{x:()=>V.runKernelFunc(s,i,null,lx)}}};const gM={kernelName:nl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=X(Ls(Pt(wt(n))),2/Math.sqrt(Math.PI));return{x:()=>X(e,s)}}};const yM={kernelName:sl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,n)}}};const bM={kernelName:il,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,Ls(n))}}};const wM={kernelName:rl,gradFunc:e=>({x:()=>Qe(e)})};const LM={kernelName:yg,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=tt(n.shape,s.shape),o=()=>{const c=_e(e,ve(s,"float32")),h=on(n.shape,i);return h.length>0?K(Ue(c,h),n.shape):c},a=()=>{let c=X(e,ve(n,"float32"));const h=on(s.shape,i);h.length>0&&(c=K(Ue(c,h),s.shape));const d=wt(s);return Pt(_e(c,ve(d,"float32")))};return{a:o,b:a}}};const SM={kernelName:ol,inputsToSave:["x","mean","variance","scale"],gradFunc:(e,t,n)=>{const{varianceEpsilon:s}=n,[i,o,a,c]=t,h=c==null?Ne(1):c,d=on(o.shape,i.shape),m=[];if(o.rank===1){for(let F=0;Fo.rank===1?K(X(X(e,Er(K(w,[1,1,1,o.shape[0]]),m)),h),i.shape):K(X(X(e,w),h),i.shape),A=()=>{let F=X(X(w,Ne(-1)),b);return o.rank===1&&(F=Ue(F,d)),K(F,o.shape)},N=()=>{let F=X(X(L,y),b);return o.rank===1&&(F=Ue(F,d)),K(F,o.shape)},E=()=>{const F=X(y,w);let _=X(e,F);return o.rank===1&&(_=Ue(_,d)),K(_,o.shape)},D=()=>{let F=e;return o.rank===1&&(F=Ue(F,d)),K(F,o.shape)};return{x:T,mean:A,variance:N,scale:E,offset:D}}};const IM={kernelName:bg,inputsToSave:["x","indices"],gradFunc:(e,t,n)=>{const[s,i]=t,{axis:o}=n,a=ft(o,s.shape)[0],c=()=>{const h=s.shape,d=i.size,m=h.slice(0,a),y=m.length,b=h.slice(o,h.length).slice(1),w=b.length,L=FA(0,y),T=FA(y+1,y+1+w),A=_A([m,[d],b]),N=K(e,A),E=K(i,[d]),D=_A([[y],L,T]),F=Me(N,D);let _=Sb(F,E,s.shape[a]);const B=Ml(D);return _=Me(_,B),_};return{x:c,indices:()=>i}}};function FA(e,t){const n=[];for(let s=e;s{const[n,s]=t;return{a:()=>Qe(n),b:()=>Qe(s)}}};const TM={kernelName:al,gradFunc:e=>({x:()=>ve(e,"float32")})};const AM={kernelName:cl,gradFunc:e=>({x:()=>Qe(e)})};const vM={kernelName:ll,gradFunc:e=>({x:()=>Qe(e)})};const NM={kernelName:hl,gradFunc:e=>({x:()=>Qe(e)})};const CM={kernelName:dl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,be(n,1))}}};const RM={kernelName:ul,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,ve(n,"float32"))}}};const OM={kernelName:Ig,inputsToSave:[],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{axis:i}=n;return{logits:()=>{const o=!0,a=Ls(s);return Ce(e,X(Ue(e,i,o),a))}}}};function EM(e,t,n,s=5,i=1,o=1,a=.5){const c=m=>m.LRNGrad(n,e,t,s,i,o,a),h={x:e,y:t,dy:n},d={depthRadius:s,bias:i,alpha:o,beta:a};return V.runKernelFunc(c,h,null,wx,d)}const DM=P({localResponseNormalizationBackprop_:EM});const kM={kernelName:xg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{depthRadius:o,bias:a,alpha:c,beta:h}=n;return{x:()=>DM(s,i,e,o,a,c,h)}}};function WA(e,t,n,s,i){return t.rank{const o=X(e,ve(Zs(n,t),e.dtype));return i==null?o:Me(o,i)}}}const $A={kernelName:pl,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{reductionIndices:i}=s,[o,a]=t,c=ft(i,o.shape),h=kn(c,o.rank),d=WA(e,a,o,c,h);return{x:()=>{let m=d.x();return h!=null&&(m=Me(m)),m}}}};const FM={kernelName:Tg,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=()=>X(e,ve(Ki(n,s),"float32")),o=()=>X(e,ve(Zl(n,s),"float32"));return{a:i,b:o}}};function _M(e,t,n,s,i,o=[1,1,1],a,c){const h=W(e,"dy","maxPool3dBackprop"),d=W(t,"input","maxPool3dBackprop"),m=W(n,"output","maxPool3dBackprop");let y=h,b=d,w=m,L=!1;d.rank===4&&(L=!0,y=K(h,[1,h.shape[0],h.shape[1],h.shape[2],h.shape[3]]),b=K(d,[1,d.shape[0],d.shape[1],d.shape[2],d.shape[3]]),w=K(m,[1,m.shape[0],m.shape[1],m.shape[2],m.shape[3]])),k(y.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${y.rank}.`),k(b.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${b.rank}.`),k(w.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${w.rank}.`),k(rn(i,o),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${i} and dilations '${o}'`),c!=null&&k(Ut(a),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${c} but got pad ${a}.`);const T=D=>{const F=Gl(b.shape,s,i,o,a,c);return D.maxPool3dBackprop(y,b,w,F)},A={dy:y,input:b,output:w},N={filterSize:s,strides:i,dilations:o,pad:a,dimRoundingMode:c},E=V.runKernelFunc(T,A,null,Lx,N);return L?K(E,[E.shape[1],E.shape[2],E.shape[3],E.shape[4]]):E}const WM=P({maxPool3dBackprop_:_M});const $M={kernelName:Ag,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{filterSize:o,strides:a,dilations:c,pad:h,dimRoundingMode:d}=n,m=c==null?[1,1,1]:c;return{x:()=>WM(e,s,i,o,a,m,h,d)}}};function UM(e,t,n,s,i,o,a){const c=W(e,"dy","maxPoolBackprop"),h=W(t,"input","maxPoolBackprop"),d=W(n,"output","maxPoolBackprop");k(h.rank===c.rank,()=>`Rank of input (${h.rank}) does not match rank of dy (${c.rank})`),k(c.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${c.rank}.`),k(h.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${h.rank}.`),a!=null&&k(Ut(o),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const m=w=>{const L=Fn(h.shape,s,i,1,o,a);return w.maxPoolBackprop(c,h,d,L)},y={dy:c,input:h,output:d},b={filterSize:s,strides:i,pad:o,dimRoundingMode:a};return V.runKernelFunc(m,y,null,Gu,b)}const BM=P({maxPoolBackprop_:UM});const MM={kernelName:ml,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{filterSize:o,strides:a,pad:c}=n;return{x:()=>BM(e,s,i,o,a,c)}}};const PM={kernelName:vg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{axis:i}=s,[o,a]=t,c=ft(i,o.shape),h=kn(c,o.rank),d=WA(e,a,o,c,h);return{x:()=>{let m=d.x();return h!=null&&(m=Me(m)),m}}}};const zM={kernelName:Ng,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=()=>X(e,ve(Dr(n,s),"float32")),o=()=>X(e,ve(Ss(n,s),"float32"));return{a:i,b:o}}};const GM={kernelName:Cg,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=tt(n.shape,s.shape),o=()=>{const c=on(n.shape,i);return c.length>0?K(Ue(e,c),n.shape):e},a=()=>{const c=X(e,Pt(Ra(_e(n,s)))),h=on(s.shape,i);return h.length>0?K(Ue(c,h),s.shape):c};return{a:o,b:a}}};const VM={kernelName:fl,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=tt(n.shape,s.shape),o=()=>{const c=X(e,ve(s,"float32")),h=on(n.shape,i);return h.length>0?K(Ue(c,h),n.shape):c},a=()=>{const c=X(e,ve(n,"float32")),h=on(s.shape,i);return h.length>0?K(Ue(c,h),s.shape):c};return{a:o,b:a}}};const YM={kernelName:Rg,gradFunc:e=>({x:()=>Pt(e)})};const HM={kernelName:Dg,inputsToSave:["indices"],gradFunc:(e,t)=>{const n=t[0];return{indices:()=>ct(n.shape,"float32")}}};const qM={kernelName:Eg,gradFunc:e=>({x:()=>Qe(e)})};const UA={kernelName:ju,inputsToSave:["x"],gradFunc:(e,t,n)=>{const s=t[0],{paddings:i}=n,o=i.map(a=>a[0]);return{x:()=>nt(e,o,s.shape)}}};const jM={kernelName:kg,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(e,t)=>{const[n,s,i]=t,o=n,a=s,c=tt(o.shape,a.shape),h=()=>{const m=ve(a,"float32");let y=X(e,X(m,ei(o,Ce(m,Ne(1)))));const b=on(o.shape,c);return b.length>0&&(y=Ue(y,b)),K(y,o.shape)},d=()=>{const m=Ss(o,0),y=_n(m,ts(o),Qe(o));let b=X(e,X(i,y));const w=on(a.shape,c);return w.length>0&&(b=Ue(b,w)),K(b,a.shape)};return{a:h,b:d}}};const KM={kernelName:Fg,inputsToSave:["x","alpha"],gradFunc:(e,t)=>{const[n,s]=t,i=Ss(n,0);return{x:()=>_n(i,e,X(e,s)),alpha:()=>{let o=_n(i,Qe(e),X(e,n));const a=on(s.shape,e.shape);return a.length>0&&(o=Ue(o,a)),K(o,s.shape)}}}};const XM={kernelName:gl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,Pt(wt(n)))}}};const JM={kernelName:Bg,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=X(Dr(n,6),$a(n));return{x:()=>X(e,ve(s,"float32"))}}};const ZM={kernelName:Wg,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,ve($a(n),"float32"))}}};const QM={kernelName:yl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>K(e,n.shape)}}};const eP={kernelName:Ug,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,i=c=>{const{alignCorners:h}=n;return c.resizeBilinearBackprop(e,s,h)},o={images:s},a=()=>V.runKernelFunc(i,o,null,Tx,n);return{images:a}}};const tP={kernelName:$g,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,i=c=>{const{alignCorners:h}=n;return c.resizeNearestNeighborBackprop(e,s,h)},o={images:s},a=()=>V.runKernelFunc(i,o,null,xx,n);return{images:a}}};const nP={kernelName:Mg,gradFunc:(e,t,n)=>{const{dims:s}=n,i=ft(s,e.shape);return{x:()=>Is(e,i)}}};const sP={kernelName:bl,gradFunc:e=>({x:()=>Qe(e)})};const iP={kernelName:wl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Pt(_e(e,X(ei(n,1.5),2)))}}};const rP={kernelName:Pg,inputsToSave:["condition"],gradFunc:(e,t)=>{const[n]=t;return{condition:()=>ve(Qe(n),"float32"),t:()=>X(e,ve(n,e.dtype)),e:()=>X(e,ve(Ql(n),e.dtype))}}};const oP={kernelName:Ll,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Ss(n,Ne(0)),i=Ne(rp),o=Ne(op),a=X(e,o),c=X(X(e,i),Ls(ve(n,"float32")));return _n(s,a,c)}}}};const aP={kernelName:xl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,X(n,Ce(Ne(1),n)))}}};const cP={kernelName:Il,gradFunc:e=>({x:()=>Qe(e)})};const lP={kernelName:ma,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Kl(ve(n,"float32")),e)}}};const hP={kernelName:Sl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Rd(ve(n,"float32")),e)}}};const uP={kernelName:Ku,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{begin:i,size:o}=n,a=s.shape,[c,h]=wd(s,i,o),d=[];for(let m=0;mCi(e,d)}}};const dP={kernelName:Vg,outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{dim:i}=n,o=!0,a=X(e,s);return{logits:()=>Ce(a,X(Ue(a,[i],o),s))}}};const pP={kernelName:Tl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,vi(n))}}};const BA={kernelName:Xu,gradFunc:(e,t,n)=>{const{blockShape:s,paddings:i}=n;return{x:()=>ql(e,s,i)}}};const MA={kernelName:Gg,gradFunc:(e,t,n)=>{const{axis:s}=n;return{x:()=>Mt(e,s)}}};const mP={kernelName:Al,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,X(Ln(ve(n,"float32")),2))}}};const fP={kernelName:Ju,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,X(ve(n,"float32"),2))}}};const gP={kernelName:fa,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=Ne(2),o=()=>X(e,X(i,Ce(n,s))),a=()=>X(e,X(i,Ce(s,n)));return{a:o,b:a}}};const yP={kernelName:Rl,gradFunc:e=>({x:()=>Qe(e)})};const bP={kernelName:vl,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=tt(n.shape,s.shape),o=()=>{let c=e;const h=on(n.shape,i);return h.length>0&&(c=Ue(c,h)),K(c,n.shape)},a=()=>{let c=e;const h=on(s.shape,i);return h.length>0&&(c=Ue(c,h)),K(Pt(c),s.shape)};return{a:o,b:a}}};const wP={kernelName:zg,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,i=s.shape.slice(),{axis:o}=n,a=ft(o,s.shape);a.forEach(d=>{i[d]=1});const c=K(e,i),h=X(c,Qs(s.shape,"float32"));return{x:()=>h}}};const LP={kernelName:ga,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>_e(e,wt(Kl(n)))}}};const SP={kernelName:Nl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Ce(Ne(1),wt(n)),e)}}};const IP={kernelName:Yg,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{reps:i}=n,o=()=>{let a=Qe(s);if(s.rank===1)for(let c=0;c{const s=n,{perm:i}=s,o=Ml(i);return{x:()=>Me(e,o)}}};const TP={kernelName:Hg,gradFunc:(e,t,n)=>{const s=n,{axis:i}=s;return{value:()=>is(e,i)}}};const AP={kernelName:qg,inputsToSave:["segmentIds"],gradFunc:(e,t)=>{const[n]=t,s=()=>vP(e,n);return{x:s}}};function vP(e,t){const n=_s(t,Qe(t)),s=Oa(e,n);let i=Ki(t,Ne(0,"int32"));const o=s.rank-i.rank;for(let c=0;c({x:()=>Qe(e)})};const CP=[kB,FB,_B,WB,$B,UB,BB,MB,PB,zB,GB,VB,qB,XB,JB,ZB,QB,eM,tM,nM,sM,rM,iM,cM,lM,hM,uM,dM,pM,mM,fM,gM,yM,bM,LM,wM,SM,IM,xM,TM,AM,vM,NM,CM,RM,OM,kM,$A,$A,FM,$M,MM,PM,zM,GM,VM,YM,HM,qM,UA,UA,jM,KM,XM,JM,ZM,QM,eP,tP,nP,sP,iP,rP,oP,aP,cP,lP,hP,uP,dP,pP,BA,BA,MA,MA,mP,gP,fP,yP,bP,wP,LP,SP,IP,xP,TP,AP,NP];for(const e of CP)Rx(e);Q.prototype.abs=function(){return this.throwIfDisposed(),sn(this)};Q.prototype.acos=function(){return this.throwIfDisposed(),$y(this)};Q.prototype.acosh=function(){return this.throwIfDisposed(),Uy(this)};Q.prototype.addStrict=function(e){return this.throwIfDisposed(),nA(this,e)};Q.prototype.add=function(e){return this.throwIfDisposed(),be(this,e)};Q.prototype.all=function(e,t){return this.throwIfDisposed(),xd(this,e,t)};Q.prototype.any=function(e,t){return this.throwIfDisposed(),Pl(this,e,t)};Q.prototype.argMax=function(e){return this.throwIfDisposed(),zl(this,e)};Q.prototype.argMin=function(e){return this.throwIfDisposed(),My(this,e)};Q.prototype.asScalar=function(){return this.throwIfDisposed(),k(this.size===1,()=>"The array must have only 1 element."),K(this,[])};Q.prototype.asType=function(e){return this.throwIfDisposed(),ve(this,e)};Q.prototype.as1D=function(){return this.throwIfDisposed(),K(this,[this.size])};Q.prototype.as2D=function(e,t){return this.throwIfDisposed(),K(this,[e,t])};Q.prototype.as3D=function(e,t,n){return this.throwIfDisposed(),K(this,[e,t,n])};Q.prototype.as4D=function(e,t,n,s){return this.throwIfDisposed(),K(this,[e,t,n,s])};Q.prototype.as5D=function(e,t,n,s,i){return this.throwIfDisposed(),K(this,[e,t,n,s,i])};Q.prototype.asin=function(){return this.throwIfDisposed(),Py(this)};Q.prototype.asinh=function(){return this.throwIfDisposed(),zy(this)};Q.prototype.atan=function(){return this.throwIfDisposed(),Gy(this)};Q.prototype.atan2=function(e){return this.throwIfDisposed(),Vy(this,e)};Q.prototype.atanh=function(){return this.throwIfDisposed(),Yy(this)};Q.prototype.avgPool=function(e,t,n,s){return this.throwIfDisposed(),Hl(this,e,t,n,s)};Q.prototype.batchToSpaceND=function(e,t){return this.throwIfDisposed(),ql(this,e,t)};Q.prototype.batchNorm=function(e,t,n,s,i){return this.throwIfDisposed(),bo(this,e,t,n,s,i)};Q.prototype.broadcastTo=function(e){return this.throwIfDisposed(),jl(this,e)};Q.prototype.cast=function(e){return this.throwIfDisposed(),ve(this,e)};Q.prototype.ceil=function(){return this.throwIfDisposed(),Xy(this)};Q.prototype.clipByValue=function(e,t){return this.throwIfDisposed(),Yn(this,e,t)};Q.prototype.concat=function(e,t){return this.throwIfDisposed(),e instanceof Q&&(e=[e]),Mt([this,...e],t)};Q.prototype.conv1d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Nd(this,e,t,n,s,i,o)};Q.prototype.conv2dTranspose=function(e,t,n,s,i){return this.throwIfDisposed(),Cd(this,e,t,n,s,i)};Q.prototype.conv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),ji(this,e,t,n,s,i,o)};Q.prototype.cos=function(){return this.throwIfDisposed(),Kl(this)};Q.prototype.cosh=function(){return this.throwIfDisposed(),Rd(this)};Q.prototype.cumsum=function(e,t,n){return this.throwIfDisposed(),Od(this,e,t,n)};Q.prototype.depthToSpace=function(e,t){return this.throwIfDisposed(),Qy(this,e,t)};Q.prototype.depthwiseConv2D=function(e,t,n,s,i,o){return nn("depthwiseConv2D is deprecated, use depthwiseConv2d instead"),this.throwIfDisposed(),wo(this,e,t,n,s,i,o)};Q.prototype.depthwiseConv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),wo(this,e,t,n,s,i,o)};Q.prototype.dilation2d=function(e,t,n,s,i){return this.throwIfDisposed(),eb(this,e,t,n,s,i)};Q.prototype.divNoNan=function(e){return this.throwIfDisposed(),tb(this,e)};Q.prototype.divStrict=function(e){return this.throwIfDisposed(),sA(this,e)};Q.prototype.div=function(e){return this.throwIfDisposed(),_e(this,e)};Q.prototype.dot=function(e){return this.throwIfDisposed(),WT(this,e)};Q.prototype.elu=function(){return this.throwIfDisposed(),So(this)};Q.prototype.equalStrict=function(e){return this.throwIfDisposed(),XT(this,e)};Q.prototype.equal=function(e){return this.throwIfDisposed(),Zs(this,e)};Q.prototype.erf=function(){return this.throwIfDisposed(),nb(this)};Q.prototype.exp=function(){return this.throwIfDisposed(),Ls(this)};Q.prototype.expandDims=function(e){return this.throwIfDisposed(),Hn(this,e)};Q.prototype.expm1=function(){return this.throwIfDisposed(),sb(this)};Q.prototype.fft=function(){return this.throwIfDisposed(),rh(this)};Q.prototype.flatten=function(){return this.throwIfDisposed(),K(this,[this.size])};Q.prototype.floor=function(){return this.throwIfDisposed(),Ra(this)};Q.prototype.floorDiv=function(e){return this.throwIfDisposed(),Id(this,e)};Q.prototype.gather=function(e,t){return this.throwIfDisposed(),Oa(this,e,t)};Q.prototype.greaterEqualStrict=function(e){return this.throwIfDisposed(),JT(this,e)};Q.prototype.greaterEqual=function(e){return this.throwIfDisposed(),Ki(this,e)};Q.prototype.greaterStrict=function(e){return this.throwIfDisposed(),ZT(this,e)};Q.prototype.greater=function(e){return this.throwIfDisposed(),Ss(this,e)};Q.prototype.ifft=function(){return this.throwIfDisposed(),Wa(this)};Q.prototype.irfft=function(){return this.throwIfDisposed(),Hd(this)};Q.prototype.isFinite=function(){return this.throwIfDisposed(),UT(this)};Q.prototype.isInf=function(){return this.throwIfDisposed(),BT(this)};Q.prototype.isNaN=function(){return this.throwIfDisposed(),MT(this)};Q.prototype.leakyRelu=function(e){return this.throwIfDisposed(),Dd(this,e)};Q.prototype.lessEqualStrict=function(e){return this.throwIfDisposed(),QT(this,e)};Q.prototype.lessEqual=function(e){return this.throwIfDisposed(),Dr(this,e)};Q.prototype.lessStrict=function(e){return this.throwIfDisposed(),eA(this,e)};Q.prototype.less=function(e){return this.throwIfDisposed(),Zl(this,e)};Q.prototype.localResponseNormalization=function(e,t,n,s){return this.throwIfDisposed(),rb(this,e,t,n,s)};Q.prototype.logSigmoid=function(){return this.throwIfDisposed(),zT(this)};Q.prototype.logSoftmax=function(e){return this.throwIfDisposed(),_d(this,e)};Q.prototype.logSumExp=function(e,t){return this.throwIfDisposed(),ab(this,e,t)};Q.prototype.log=function(){return this.throwIfDisposed(),ts(this)};Q.prototype.log1p=function(){return this.throwIfDisposed(),kd(this)};Q.prototype.logicalAnd=function(e){return this.throwIfDisposed(),Ws(this,e)};Q.prototype.logicalNot=function(){return this.throwIfDisposed(),Ql(this)};Q.prototype.logicalOr=function(e){return this.throwIfDisposed(),Wd(this,e)};Q.prototype.logicalXor=function(e){return this.throwIfDisposed(),GT(this,e)};Q.prototype.matMul=function(e,t,n){return this.throwIfDisposed(),at(this,e,t,n)};Q.prototype.maxPool=function(e,t,n,s){return this.throwIfDisposed(),eh(this,e,t,n,s)};Q.prototype.max=function(e,t){return this.throwIfDisposed(),qn(this,e,t)};Q.prototype.maximumStrict=function(e){return this.throwIfDisposed(),iA(this,e)};Q.prototype.maximum=function(e){return this.throwIfDisposed(),_s(this,e)};Q.prototype.mean=function(e,t){return this.throwIfDisposed(),zt(this,e,t)};Q.prototype.min=function(e,t){return this.throwIfDisposed(),ka(this,e,t)};Q.prototype.minimumStrict=function(e){return this.throwIfDisposed(),rA(this,e)};Q.prototype.minimum=function(e){return this.throwIfDisposed(),Io(this,e)};Q.prototype.modStrict=function(e){return this.throwIfDisposed(),oA(this,e)};Q.prototype.mod=function(e){return this.throwIfDisposed(),$d(this,e)};Q.prototype.mulStrict=function(e){return this.throwIfDisposed(),aA(this,e)};Q.prototype.mul=function(e){return this.throwIfDisposed(),X(this,e)};Q.prototype.neg=function(){return this.throwIfDisposed(),Pt(this)};Q.prototype.norm=function(e,t,n){return this.throwIfDisposed(),Kd(this,e,t,n)};Q.prototype.notEqualStrict=function(e){return this.throwIfDisposed(),tA(this,e)};Q.prototype.notEqual=function(e){return this.throwIfDisposed(),kr(this,e)};Q.prototype.oneHot=function(e,t=1,n=0){return this.throwIfDisposed(),fo(this,e,t,n)};Q.prototype.onesLike=function(){return this.throwIfDisposed(),On(this)};Q.prototype.pad=function(e,t){return this.throwIfDisposed(),Ci(this,e,t)};Q.prototype.pool=function(e,t,n,s,i){return this.throwIfDisposed(),HT(this,e,t,n,s,i)};Q.prototype.powStrict=function(e){return this.throwIfDisposed(),cA(this,e)};Q.prototype.pow=function(e){return this.throwIfDisposed(),ei(this,e)};Q.prototype.prelu=function(e){return this.throwIfDisposed(),nh(this,e)};Q.prototype.prod=function(e,t){return this.throwIfDisposed(),Bd(this,e,t)};Q.prototype.reciprocal=function(){return this.throwIfDisposed(),db(this)};Q.prototype.relu=function(){return this.throwIfDisposed(),Ri(this)};Q.prototype.relu6=function(){return this.throwIfDisposed(),pb(this)};Q.prototype.reshapeAs=function(e){return this.throwIfDisposed(),K(this,e.shape)};Q.prototype.reshape=function(e){return this.throwIfDisposed(),K(this,e)};Q.prototype.resizeBilinear=function(e,t){return this.throwIfDisposed(),IA(this,e,t)};Q.prototype.resizeNearestNeighbor=function(e,t){return this.throwIfDisposed(),xA(this,e,t)};Q.prototype.reverse=function(e){return this.throwIfDisposed(),Is(this,e)};Q.prototype.rfft=function(){return this.throwIfDisposed(),oh(this)};Q.prototype.round=function(){return this.throwIfDisposed(),mb(this)};Q.prototype.rsqrt=function(){return this.throwIfDisposed(),Md(this)};Q.prototype.selu=function(){return this.throwIfDisposed(),Pd(this)};Q.prototype.separableConv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),fb(this,e,t,n,s,i,o)};Q.prototype.sigmoid=function(){return this.throwIfDisposed(),vi(this)};Q.prototype.sign=function(){return this.throwIfDisposed(),gb(this)};Q.prototype.sin=function(){return this.throwIfDisposed(),zd(this)};Q.prototype.sinh=function(){return this.throwIfDisposed(),Gd(this)};Q.prototype.slice=function(e,t){return this.throwIfDisposed(),nt(this,e,t)};Q.prototype.softmax=function(e){return this.throwIfDisposed(),No(this,e)};Q.prototype.softplus=function(){return this.throwIfDisposed(),Da(this)};Q.prototype.spaceToBatchND=function(e,t){return this.throwIfDisposed(),th(this,e,t)};Q.prototype.split=function(e,t){return this.throwIfDisposed(),ss(this,e,t)};Q.prototype.sqrt=function(){return this.throwIfDisposed(),Ln(this)};Q.prototype.square=function(){return this.throwIfDisposed(),wt(this)};Q.prototype.squaredDifference=function(e){return this.throwIfDisposed(),ah(this,e)};Q.prototype.squaredDifferenceStrict=function(e){return this.throwIfDisposed(),lA(this,e)};Q.prototype.squeeze=function(e){return this.throwIfDisposed(),Fr(this,e)};Q.prototype.stack=function(e,t){this.throwIfDisposed();const n=e instanceof Q?[this,e]:[this,...e];return is(n,t)};Q.prototype.step=function(e){return this.throwIfDisposed(),$a(this,e)};Q.prototype.stridedSlice=function(e,t,n,s,i,o,a,c){return this.throwIfDisposed(),bb(this,e,t,n,s,i,o,a,c)};Q.prototype.subStrict=function(e){return this.throwIfDisposed(),hA(this,e)};Q.prototype.sub=function(e){return this.throwIfDisposed(),Ce(this,e)};Q.prototype.sum=function(e,t){return this.throwIfDisposed(),Ue(this,e,t)};Q.prototype.tan=function(){return this.throwIfDisposed(),wb(this)};Q.prototype.tanh=function(){return this.throwIfDisposed(),Ca(this)};Q.prototype.tile=function(e){return this.throwIfDisposed(),Er(this,e)};Q.prototype.toBool=function(){return this.throwIfDisposed(),ve(this,"bool")};Q.prototype.toFloat=function(){return this.throwIfDisposed(),ve(this,"float32")};Q.prototype.toInt=function(){return this.throwIfDisposed(),ve(this,"int32")};Q.prototype.topk=function(e,t){return this.throwIfDisposed(),Lb(this,e,t)};Q.prototype.transpose=function(e){return this.throwIfDisposed(),Me(this,e)};Q.prototype.unique=function(e){return this.throwIfDisposed(),qd(this,e)};Q.prototype.unsortedSegmentSum=function(e,t){return this.throwIfDisposed(),Sb(this,e,t)};Q.prototype.unstack=function(e){return this.throwIfDisposed(),Oi(this,e)};Q.prototype.where=function(e,t){return this.throwIfDisposed(),_n(e,this,t)};Q.prototype.zerosLike=function(){return this.throwIfDisposed(),Qe(this)};let ap;function an(){return ap==null&&(ap=AT().epsilon()),ap}function GZ(e){ap=e}function ti(){return"channelsLast"}class Qi extends Error{constructor(e){super(e);Object.setPrototypeOf(this,Qi.prototype)}}class ni extends Error{constructor(e){super(e);Object.setPrototypeOf(this,ni.prototype)}}class j extends Error{constructor(e){super(e);Object.setPrototypeOf(this,j.prototype)}}class ze extends Error{constructor(e){super(e);Object.setPrototypeOf(this,ze.prototype)}}class PA extends Error{constructor(e){super(e);Object.setPrototypeOf(this,PA.prototype)}}class RP extends Error{constructor(e){super(e);Object.setPrototypeOf(this,RP.prototype)}}function Oo(e,t){if(Array.isArray(e)){let n=[];for(let s=0;sn.toUpperCase())}let $s={};function Gb(e){if(e==null)return null;const t={};return t.className=e.getClassName(),t.config=e.getConfig(),t}function Vb(e){if(e==null||typeof e!="object")return;if(Array.isArray(e))e.forEach(t=>Vb(t));else{const t=Object.keys(e);for(const n of t){const s=e[n];s!=null&&typeof s=="object"&&(!Array.isArray(s)&&s.type==="ndarray"&&typeof s.value=="number"?e[n]=s.value:Vb(s))}}}function bh(e,t={},n={},s="object",i=!1){if(typeof e=="string"){const o=e;let a;if(o in n)a=n[o];else if(o in $s)a=$s[o];else if(a=t[o],a==null)throw new j(`Unknown ${s}: ${e}. This may be due to one of the following reasons: + ${s.shape}`),A(a%(t*t)===0,()=>`Dimension size must be evenly divisible by ${t*t} but is ${a} for depthToSpace with input shape ${s.shape}`);const c=m=>m.depthToSpace(s,t,n),h={x:s},d={blockSize:t,dataFormat:n};return G.runKernelFunc(c,h,null,kx,d)}const Sb=z({depthToSpace_:$_});function U_(e,t,n,s,i="NHWC",o=[1,1],a){const c=W(e,"x","depthwiseConv2d"),h=W(t,"filter","depthwiseConv2d");let d=c,m=!1;c.rank===3&&(m=!0,d=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),A(d.rank===4,()=>`Error in depthwiseConv2d: input must be rank 4, but got rank ${d.rank}.`),A(h.rank===4,()=>`Error in depthwiseConv2d: filter must be rank 4, but got rank ${h.rank}.`),A(d.shape[3]===h.shape[2],()=>`Error in depthwiseConv2d: number of input channels (${d.shape[3]}) must match the inChannels dimension in filter ${h.shape[2]}.`),a!=null&&A(Le(s),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`);const f=(x,v)=>{o==null&&(o=[1,1]),A(cn(n,o),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`);const N=kn(d.shape,h.shape,n,o,s,a,!0),O=x.depthwiseConv2D(d,h,N);return v([d,h]),O},b={x:d,filter:h},w={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a},L=G.runKernelFunc(f,b,null,ld,w);return m?K(L,[L.shape[1],L.shape[2],L.shape[3]]):L}const Co=z({depthwiseConv2d_:U_});function B_(e){const t=W(e,"x","diag"),n=i=>{const o=K(t,[t.size]),a=i.diag(o),c=[...e.shape,...e.shape];return K(a,c)},s={x:t};return G.runKernelFunc(n,s,null,Fx)}const M_=z({diag_:B_});function P_(e,t,n,s,i=[1,1],o="NHWC"){const a=W(e,"x","dilation2d"),c=W(t,"filter","dilation2d");A(a.rank===3||a.rank===4,()=>`Error in dilation2d: input must be rank 3 or 4, but got rank ${a.rank}.`),A(c.rank===3,()=>`Error in dilation2d: filter must be rank 3, but got rank ${c.rank}.`),A(o==="NHWC",()=>`Error in dilation2d: Only NHWC is currently supported, but got dataFormat of ${o}`);let h=a,d=!1;a.rank===3&&(h=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]),d=!0);const m={x:h,filter:c},f={strides:n,pad:s,dilations:i},b=G.runKernel(hd,m,f);return d?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const Ib=z({dilation2d_:P_});function Ro(e,t){const n=e.length,s=[];for(let i=0;i1&&a===1&&s.unshift(o)}return s}function pn(e,t){const n=[];for(let s=0;s1)&&n.unshift(o)}return n}function nt(e,t){const n=[],s=Math.max(e.length,t.length);for(let i=0;ia.equal(n,s),o={a:n,b:s};return G.runKernelFunc(i,o,null,Wx)}const Xs=z({equal_:z_});function V_(e,t,n){const s=W(t,"a","where"),i=W(n,"b","where"),o=W(e,"condition","where","bool"),a=nt(s.shape,i.shape),c=lh(s,a),h=lh(i,a);o.rank===1&&A(o.shape[0]===s.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),o.rank!==1&&B(o.shape,h.shape,"Error in where: ");const d=(f,b)=>{const w=f.select(o,c,h);return b([o]),w},m={condition:o,t:c,e:h};return G.runKernelFunc(d,m,null,Iy)}const Bn=z({where_:V_});function G_(e){const t=W(e,"x","zerosLike"),n={x:t};return G.runKernelFunc(s=>s.zerosLike(t),n,null,Ry)}const et=z({zerosLike_:G_});function Y_(e,t){let n=W(e,"a","div"),s=W(t,"b","div");[n,s]=Gt(n,s);const i=We(n,s),o=et(i),a=Xs(s,o);return Bn(a,o,i)}const xb=z({divNoNan_:Y_});function H_(e,t){const n=W(e,"t1","dot"),s=W(t,"t2","dot");A((n.rank===1||n.rank===2)&&(s.rank===1||s.rank===2),()=>`Error in dot: inputs must all be rank 1 or 2, but got ranks ${n.rank} and ${s.rank}.`);const i=n.rank===1?n.size:n.shape[1],o=s.rank===1?s.size:s.shape[0];if(A(i===o,()=>`Error in dot: inner dimensions of inputs must match, but got ${i} and ${o}.`),n.rank===1&&s.rank===1){const a=K(n,[1,-1]),c=K(s,[-1,1]),h=ct(a,c);return K(h,[])}else if(n.rank===1&&s.rank===2){const a=K(n,[1,-1]),c=K(s,[s.shape[0],s.shape[1]]),h=ct(a,c);return K(h,[h.size])}else if(n.rank===2&&s.rank===1){const a=K(s,[-1,1]),c=ct(n,a);return K(c,[c.size])}else{const a=K(s,[s.shape[0],s.shape[1]]),c=ct(n,a);return c}}const tA=z({dot_:H_});function q_(e){const t=W(e,"x","elu"),n=(i,o)=>{const a=i.elu(t);return o([a]),a},s={x:t};return G.runKernelFunc(n,s,null,yl)}const Ua=z({elu_:q_});function j_(e){let t=W(e,"x","erf");A(t.dtype==="int32"||t.dtype==="float32",()=>"Input dtype must be `int32` or `float32`."),t.dtype==="int32"&&(t=Ae(t,"float32"));const n={x:t};return G.runKernelFunc((s,i)=>{const o=s.erf(t);return i([t]),o},n,null,bl)}const Tb=z({erf_:j_});function K_(e){const t=W(e,"x","exp"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.exp(t);return i([o]),o},n,null,wl)}const Is=z({exp_:K_});function X_(e,t=0){const n=null,s=W(e,"x","expandDims",n);A(t<=s.rank,()=>"Axis must be <= rank of the tensor");const i=s.shape.slice();return t<0&&(A(-(s.rank+1)<=t,()=>`Axis must be in the interval [${-(s.rank+1)}, ${s.rank}]`),t=s.rank+t+1),i.splice(t,0,1),K(s,i)}const Zn=z({expandDims_:X_});function J_(e){const t=W(e,"x","expm1"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.expm1(t);return i([t]),o},n,null,Ll)}const Ab=z({expm1_:J_});function Z_(e,t){const n=null,s=W(e,"x","tile",n);A(s.rank===t.length,()=>`Error in transpose: rank of input ${s.rank} must match length of reps ${t}.`);const i=(h,d)=>{const m=h.tile(s,t);return d([s]),m},o=[s],a={x:s},c={reps:t};return G.runKernelFunc(i,a,null,vy,c,o)}const $r=z({tile_:Z_});function Q_(e,t,n,s="float32"){t==null&&(t=e);const i=wt([e,t],s),o=e<=t?e:t;for(let c=0;ci.fill(e,t,n),{},null,ny,s)}function eW(e){const t=W(e,"x","floor"),n={x:t};return G.runKernelFunc(s=>s.floor(t),n,null,Sl)}const Ma=z({floor_:eW});const vb=30;function uh(e){return e<=vb?e:ed(e,Math.floor(Math.sqrt(e)))}function tW(e,t){let n=!1,s;for(e<=vb?(s=e,n=!0):s=ed(e,Math.floor(Math.sqrt(e)));!n;)s>t||s===e?n=!0:s=ed(e,s+1);return s}function nW(e,t,n){const s=[],i=e.length;for(let o=0;o{const m=qe(n,s.shape)[0],f=nA(s,i,m),b=h.gather(s,K(i,[i.size]),m);return d([s,i]),K(b,f.outputShape)};return G.runKernelFunc(c,o,null,iy,a)}const Pa=z({gather_:iW});function rW(e,t){let n=W(e,"a","greater"),s=W(t,"b","greater");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=a=>a.greater(n,s),o={a:n,b:s};return G.runKernelFunc(i,o,null,Ux)}const xs=z({greater_:rW});function oW(e,t){let n=W(e,"a","greaterEqual"),s=W(t,"b","greaterEqual");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=(a,c)=>{const h=a.greaterEqual(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,ry)}const Zi=z({greaterEqual_:oW});function aW(e){const t=W(e,"input","imag"),n=i=>i.imag(t),s={input:t};return G.runKernelFunc(n,s,null,gd)}const dh=z({imag_:aW});function cW(e){const t=W(e,"x","isFinite"),n={x:t};return G.runKernelFunc(s=>s.isFinite(t),n,null,Tl)}const sA=z({isFinite_:cW});function lW(e){const t=W(e,"x","isInf"),n={x:t};return G.runKernelFunc(s=>s.isInf(t),n,null,Al)}const iA=z({isInf_:lW});function hW(e){const t=W(e,"x","isNaN"),n={x:t};return G.runKernelFunc(s=>s.isNaN(t),n,null,vl)}const rA=z({isNaN_:hW});function uW(e,t){let n=W(e,"a","maximum"),s=W(t,"b","maximum");[n,s]=Gt(n,s),n.dtype==="bool"&&(n=Ae(n,"int32"),s=Ae(s,"int32")),nt(n.shape,s.shape);const i=(a,c)=>{const h=a.maximum(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,cy)}const $s=z({maximum_:uW});function Ce(e,t){if((hn(e)&&t!=="string"||Array.isArray(e))&&t!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(t==="string"&&hn(e)&&!(e instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");const n=[],s=[];return Er(e,n,s,t)}function dW(e,t=.2){const n=W(e,"x","leakyRelu");return $s(X(Ce(t),n),n)}const lp=z({leakyRelu_:dW});function pW(e,t){let n=W(e,"a","less"),s=W(t,"b","less");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=a=>a.less(n,s),o={a:n,b:s};return G.runKernelFunc(i,o,null,Bx)}const ph=z({less_:pW});function mW(e,t){let n=W(e,"a","lessEqual"),s=W(t,"b","lessEqual");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=(a,c)=>{const h=a.lessEqual(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,Mx)}const Ur=z({lessEqual_:mW});function oA(e,t,n){if(n<=0)throw new Error("The number of values should be positive.");const s={start:e,stop:t,num:n};return G.runKernelFunc(i=>i.linspace(e,t,n),{},null,Px,s)}function fW(e,t=5,n=1,s=1,i=.5){const o=W(e,"x","localResponseNormalization");A(o.rank===4||o.rank===3,()=>`Error in localResponseNormalization: x must be rank 3 or 4 but got + rank ${o.rank}.`),A(Le(t),()=>`Error in localResponseNormalization: depthRadius must be an integer but got depthRadius ${t}.`);let a=o,c=!1;o.rank===3&&(c=!0,a=K(o,[1,o.shape[0],o.shape[1],o.shape[2]]));const h=(b,w)=>{const L=b.localResponseNormalization4D(a,t,n,s,i);return w([a,L]),L},d={x:a},m={depthRadius:t,bias:n,alpha:s,beta:i},f=G.runKernelFunc(h,d,null,ay,m);return c?K(f,[f.shape[1],f.shape[2],f.shape[3]]):f}const Nb=z({localResponseNormalization_:fW});function gW(e){const t=W(e,"x","log"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.log(t);return i([t]),o},n,null,Nl)}const cs=z({log_:gW});function yW(e){const t=W(e,"x","log1p"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.log1p(t);return i([t]),o},n,null,Cl)}const hp=z({log1p_:yW});function bW(e){return A(Rr(e),()=>"The f passed in grad(f) must be a function"),(t,n)=>{const s=W(t,"x","tf.grad",null),i=n!=null?W(n,"dy","tf.grad"):null;return G.tidy(()=>{const{value:o,grads:a}=G.gradients(()=>e(s),[s],i);return i!=null&&B(o.shape,i.shape,"The shape of dy passed in grad(f)(x, dy) must match the shape returned by f(x)"),up(a),a[0]})}}function wW(e){return A(Rr(e),()=>"The f passed in grads(f) must be a function"),(t,n)=>{A(Array.isArray(t),()=>"The args passed in grads(f)(args) must be an array of `Tensor`s or `TensorLike`s");const s=th(t,"args","tf.grads",null),i=n!=null?W(n,"dy","tf.grads"):null;return G.tidy(()=>{const{value:o,grads:a}=G.gradients(()=>e(...s),s,i);return i!=null&&B(o.shape,i.shape,"The shape of dy passed in grads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),up(a),a})}}function LW(e){return A(Rr(e),()=>"The f passed in valueAndGrad(f) must be a function"),(t,n)=>{A(t instanceof ee,()=>"The x passed in valueAndGrad(f)(x) must be a tensor"),A(n==null||n instanceof ee,()=>"The dy passed in valueAndGrad(f)(x, dy) must be a tensor");const{grads:s,value:i}=G.gradients(()=>e(t),[t],n);return up(s),{grad:s[0],value:i}}}function SW(e){return A(Rr(e),()=>"The f passed in valueAndGrads(f) must be a function"),(t,n)=>{A(Array.isArray(t)&&t.every(i=>i instanceof ee),()=>"The args passed in valueAndGrads(f)(args) must be array of tensors"),A(n==null||n instanceof ee,()=>"The dy passed in valueAndGrads(f)(args, dy) must be a tensor");const s=G.gradients(()=>e(...t),t,n);return n!=null&&B(s.value.shape,n.shape,"The shape of dy passed in valueAndGrads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),up(s.grads),s}}function Cb(e,t){A(Rr(e),()=>"The f passed in variableGrads(f) must be a function"),A(t==null||Array.isArray(t)&&t.every(d=>d instanceof Ql),()=>"The varList passed in variableGrads(f, varList) must be an array of variables");const n=t!=null;if(!n){t=[];for(const d in G.registeredVariables)t.push(G.registeredVariables[d])}const s=n?t.filter(d=>!d.trainable):null,i=t.length;t=t.filter(d=>d.trainable),A(t.length>0,()=>`variableGrads() expects at least one of the input variables to be trainable, but none of the ${i} variables is trainable.`);const o=!0,{value:a,grads:c}=G.gradients(e,t,null,o);A(c.some(d=>d!=null),()=>"Cannot find a connection between any variable and the result of the loss function y=f(x). Please make sure the operations that use variables are inside the function f passed to minimize()."),A(a.rank===0,()=>`The f passed in variableGrads(f) must return a scalar, but it returned a rank-${a.rank} tensor`);const h={};return t.forEach((d,m)=>{c[m]!=null&&(h[d.name]=c[m])}),s!=null&&s.forEach(d=>h[d.name]=null),{value:a,grads:h}}function Ai(e){return G.customGrad(e)}function up(e){const t=e.filter(n=>n==null).length;if(t>0)throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that + the f you passed encloses all operations that lead from x to y.`)}function IW(e){const t=W(e,"x","neg"),n={x:t};return G.runKernelFunc(s=>s.neg(t),n,null,my)}const Ht=z({neg_:IW});function xW(e){const t=W(e,"x","softplus"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.softplus(t);return i([t]),o},n,null,Vl)}const za=z({softplus_:xW});function TW(e){const t=W(e,"x","logSigmoid"),n=Ai(s=>{const i=Ht(za(Ht(s))),o=a=>{const c=X(a,Ti(Ht(s)));return c};return{value:i,gradFunc:o}});return n(t)}const aA=z({logSigmoid_:TW});function AW(e,t=null,n=!1){const s=W(e,"x","max"),i=(c,h)=>{const d=qe(t,s.shape);let m=d;const f=Xn(m,s.rank);let b=s;f!=null&&(b=Ye(s,f),m=as(m.length,b.rank));const w=c.max(b,m);f!=null&&b.dispose();let L=w;if(n){const x=vn(L.shape,qe(t,s.shape));L=K(L,x),w.dispose()}return h([s,L]),L},o={x:s},a={reductionIndices:t,keepDims:n};return G.runKernelFunc(i,o,null,Rl,a)}const Qn=z({max_:AW});function vW(e,t){let n=W(e,"a","sub"),s=W(t,"b","sub");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.subtract(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,Na)}const Re=z({sub_:vW});function NW(e,t=null,n=!1){let s=W(e,"x","sum");s.dtype==="bool"&&(s=Ae(s,"int32"));const i=(c,h)=>{h([s]);const d=qe(t,s.shape),m=Xn(d,s.rank);let f=d,b=s;m!=null&&(b=Ye(s,m),f=as(f.length,s.rank));let w=c.sum(b,f);if(n){const L=vn(w.shape,d);w=K(w,L)}return w},o={x:s},a={axis:t,keepDims:n};return G.runKernelFunc(i,o,null,xy,a)}const $e=z({sum_:NW});function CW(e,t=-1){const n=W(e,"logits","logSoftmax");if(t===-1&&(t=n.rank-1),t!==n.rank-1)throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and axis was ${t}`);const s=(a,c)=>{const h=!0,d=Qn(e,t,!0),m=Re(e,d),f=Re(Ae(m,"float32"),cs($e(Is(m),t,h)));return c([f]),f},i={logits:n},o={axis:t};return G.runKernelFunc(s,i,null,oy,o)}const dp=z({logSoftmax_:CW});function RW(e,t=null,n=!1){const s=W(e,"x","logSumExp"),i=qe(t,s.shape),o=Qn(s,i,!0),a=Re(s,o),c=Is(a),h=$e(c,i),d=cs(h),m=be(K(o,d.shape),d);if(n){const f=vn(m.shape,i);return K(m,f)}return m}const Rb=z({logSumExp_:RW});function OW(e,t){const n=W(e,"a","logicalAnd","bool"),s=W(t,"b","logicalAnd","bool");nt(n.shape,s.shape);const i={a:n,b:s};return G.runKernelFunc(o=>o.logicalAnd(n,s),i,null,zx)}const Us=z({logicalAnd_:OW});function EW(e){const t=W(e,"x","logicalNot","bool"),n={x:t};return G.runKernelFunc(s=>s.logicalNot(t),n,null,yd)}const mh=z({logicalNot_:EW});function DW(e,t){const n=W(e,"a","logicalOr","bool"),s=W(t,"b","logicalOr","bool");nt(n.shape,s.shape);const i={a:n,b:s};return G.runKernelFunc(o=>o.logicalOr(n,s),i,null,Vx)}const pp=z({logicalOr_:DW});function kW(e,t){const n=W(e,"a","logicalXor","bool"),s=W(t,"b","logicalXor","bool");return nt(n.shape,s.shape),Us(pp(e,t),mh(Us(e,t)))}const cA=z({logicalXor_:kW});function FW(e,t,n,s,i){const o=W(e,"x","maxPool"),a=1;let c=o,h=!1;o.rank===3&&(h=!0,c=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),A(c.rank===4,()=>`Error in maxPool: input must be rank 4 but got rank ${c.rank}.`),A(cn(n,a),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&A(Le(s),()=>`Error in maxPool: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const d=(w,L)=>{const x=Un(c.shape,t,n,1,s,i);let v;return x.filterWidth===1&&x.filterHeight===1&&ae(x.inShape,x.outShape)?v=c.clone():v=w.maxPool(c,x),L([c,v]),v},m={x:c},f={filterSize:t,strides:n,pad:s,dimRoundingMode:i},b=G.runKernelFunc(d,m,null,Ol,f);return h?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const fh=z({maxPool_:FW});function _W(e,t=[1,1,1],n,s,i,o="NDHWC",a){a==null?a=[1,1,1]:un("dilations is deprecated, this field will be gone in v3.0.0.");const c=W(e,"x","maxPool3d");let h=c,d=!1;c.rank===4&&(d=!0,h=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]])),A(h.rank===5,()=>`Error in maxPool3d: x must be rank 5 but got rank ${h.rank}.`),A(o==="NDHWC",()=>`Error in maxPool3d: Only NDHWC is currently supported, but got dataFormat of ${o}`),A(cn(n,a),()=>`Error in maxPool3d: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&A(Le(s),()=>`Error in maxPool3d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const m=(L,x)=>{a==null&&(a=[1,1,1]);const v=oh(h.shape,t,n,a,s,i,o),N=L.maxPool3d(h,v);return x([h,N]),N},f={x:h},b={filterSize:t,strides:n,pad:s,dimRoundingMode:i,dataFormat:o,dilations:a},w=G.runKernelFunc(m,f,null,ly,b);return d?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const Ob=z({maxPool3d_:_W});function WW(e,t,n,s,i=!1){const o=W(e,"x","maxPoolWithArgmax"),a={x:o},c={filterSize:t,strides:n,pad:s,includeBatchInIndex:i},h=G.runKernel(wd,a,c);return{result:h[0],indexes:h[1]}}const lA=z({maxPoolWithArgmax_:WW});function dt(e,t="float32"){if(t==="complex64"){const s=dt(e,"float32"),i=dt(e,"float32");return ji(s,i)}const n=La(P(e),t);return G.makeTensor(n,e,t)}function Js(e,t="float32"){if(t==="complex64"){const s=Js(e,"float32"),i=dt(e,"float32");return ji(s,i)}const n=Mg(P(e),t);return G.makeTensor(n,e,t)}function $W(e,t=null,n=!1){const s=W(e,"x","mean"),i=qe(t,s.shape),o=An(s.shape,i),a=o[1],c=P(a),h={x:s},d={axis:t,keepDims:n},m=()=>{const b=Ce(c),w=b.dtype===s.dtype?s:Ae(s,b.dtype),L=We(w,b);return $e(L,t,n)},f=Ai(b=>{const w=G.runKernelFunc(m,h,null,hy,d),L=x=>{const v=b.shape.slice();i.forEach(E=>{v[E]=1});const N=K(x,v),O=We(X(N,Js(b.shape,"float32")),c);return O};return{value:w,gradFunc:L}});return f(s)}const qt=z({mean_:$W});function UW(e,t=null,n=!1){const s=W(e,"x","min"),i=(c,h)=>{const d=qe(t,s.shape);let m=d;const f=Xn(m,s.rank);let b=s;f!=null&&(b=Ye(s,f),m=as(m.length,s.rank));const w=c.min(b,m);f!=null&&b.dispose();let L=w;if(n){const x=vn(L.shape,d);L=K(w,x),w.dispose()}return h([s,L]),L},o={x:s},a={axis:t,keepDims:n};return G.runKernelFunc(i,o,null,uy,a)}const Va=z({min_:UW});function BW(e,t){let n=W(e,"a","minimum"),s=W(t,"b","minimum");[n,s]=Gt(n,s),n.dtype==="bool"&&(n=Ae(n,"int32"),s=Ae(s,"int32")),nt(n.shape,s.shape);const i=(a,c)=>{const h=a.minimum(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,dy)}const Oo=z({minimum_:BW});function MW(e,t,n){A(n==="reflect"||n==="symmetric",()=>`Invalid mode. Mode must be either reflect or symmetric. Got ${n}.`);const s=W(e,"x","mirrorPad");if(s.rank===0)throw new Error("mirrorPad(scalar) is not defined. Pass non-scalar to mirrorPad");A(t.length===s.rank,()=>`Padding doesn't match input. Must be ${s.rank}. Got ${t.length}.`);const i=n==="reflect"?1:0;for(let c=0;c"Invalid number of paddings. Must be length of 2 each."),A(t[c][0]>=0&&t[c][0]<=s.shape[c]-i&&t[c][1]>=0&&t[c][1]<=s.shape[c]-i,()=>`Padding in dimension ${c} cannot be greater than or equal to ${s.shape[c]-i} or less than 0 for input of shape ${s.shape}`);const o={paddings:t,mode:n},a={x:s};return G.runKernel(El,a,o)}const Eb=z({mirrorPad_:MW});function PW(e,t){let n=W(e,"a","mod"),s=W(t,"b","mod");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.mod(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,py)}const mp=z({mod_:PW});function zW(e){const t=W(e,"x","square"),n={},s=[t],i=[];return G.runKernelFunc((o,a)=>(a([t]),o.square(t)),{x:t},null,"Square",n,s,i)}const At=z({square_:zW});function VW(e,t=null,n=!1){e=W(e,"x","moments");const s=qe(t,e.shape),i=qt(e,s,n);let o=i.shape;n||(o=vn(i.shape,s));const a=At(Re(Ae(e,"float32"),K(i,o))),c=qt(a,s,n);return{mean:i,variance:c}}const fp=z({moments_:VW});function GW(e,t,n,s){const i=W(t,"data","multiRNNCell"),o=th(n,"c","multiRNNCell"),a=th(s,"h","multiRNNCell");let c=i;const h=[];for(let f=0;f2)throw new Error(`Rank of probabilities must be 1 or 2, but is ${a}`);n=n||Math.random();const c=a===1?K(i,[1,-1]):i,h=G.runKernelFunc(d=>d.multinomial(c,s,t,n),{logits2D:c});return a===1?K(h,[h.size]):h}const hA=z({multinomial_:HW});function qW(e,t){let n=W(e,"a","notEqual"),s=W(t,"b","notEqual");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=a=>a.notEqual(n,s),o={a:n,b:s};return G.runKernelFunc(i,o,null,Dl)}const Br=z({notEqual_:qW});function jW(e){const t=W(e,"input","real"),n=i=>i.real(t),s={input:t};return G.runKernelFunc(n,s,null,Td)}const Ga=z({real_:jW});function KW(e){const t=W(e,"x","onesLike"),n=(i,o)=>{if(t.dtype==="complex64"){const a=Fn(Ga(t)),c=et(dh(t));return ji(a,c)}return i.onesLike(t)},s={x:t};return G.runKernelFunc(n,s,null,gy)}const Fn=z({onesLike_:KW});function XW(e,t){const n=W(e,"v1","outerProduct"),s=W(t,"v2","outerProduct");A(n.rank===1&&s.rank===1,()=>`Error in outerProduct: inputs must be rank 1, but got ranks ${n.rank} and ${s.rank}.`);const i=K(n,[-1,1]),o=K(s,[1,-1]);return ct(i,o)}const JW=z({outerProduct_:XW});function ZW(e,t,n=0){const s=W(e,"x","pad");if(s.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");const i=(c,h)=>(h([s]),c.pad(s,t,n)),o={paddings:t,constantValue:n},a={x:s};return G.runKernelFunc(i,a,null,Id,o)}const vi=z({pad_:ZW});function QW(e,t,n=0){return A(t.length===2,()=>"Invalid number of paddings. Must be length of 2."),vi(e,[t],n)}const e$=z({pad1d_:QW});function t$(e,t,n=0){return A(t.length===2&&t[0].length===2&&t[1].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),vi(e,t,n)}const n$=z({pad2d_:t$});function s$(e,t,n=0){return A(t.length===3&&t[0].length===2&&t[1].length===2&&t[2].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),vi(e,t,n)}const i$=z({pad3d_:s$});function r$(e,t,n=0){return A(t.length===4&&t[0].length===2&&t[1].length===2&&t[2].length===2&&t[3].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),vi(e,t,n)}const o$=z({pad4d_:r$});function a$(e,t,n){const s=W(e,"x","spaceToBatchND");A(s.rank>=1+t.length,()=>`input rank ${s.rank} should be > than [blockShape] ${t.length}`),A(n.length===t.length,()=>`paddings.shape[0] ${n.length} must be equal to [blockShape] ${t.length}`),A(s.shape.reduce((c,h,d)=>d>0&&d<=t.length?c&&(h+n[d-1][0]+n[d-1][1])%t[d-1]===0:c,!0),()=>`input spatial dimensions ${s.shape.slice(1)} with paddings ${n.toString()} must be divisible by blockShapes ${t.toString()}`);const i=c=>c.spaceToBatchND(s,t,n),o={x:s},a={blockShape:t,paddings:n};return G.runKernelFunc(i,o,null,vd,a)}const gh=z({spaceToBatchND_:a$});function c$(e,t,n,s,i,o){i==null&&(i=[1,1]),o==null&&(o=1),s===0&&(s="valid");const a=W(e,"x","maxPool");let c=a,h=!1;a.rank===3&&(h=!0,c=K(a,[1,a.shape[0],a.shape[1],a.shape[2]])),A(cn(o,i),()=>`Error in pool: Either strides or dilations must be 1. Got strides ${o} and dilations '${i}'`);const d=Un(c.shape,t,o,i,s),m=[d.dilationHeight,d.dilationWidth];let f;s==="same"?f=h$([d.filterHeight,d.filterWidth],m):f=[[0,0],[0,0]];const b=m[0]===1&&m[1]===1,[w,L]=l$([d.inHeight,d.inWidth],m,f),x=b?s:"valid",v=b?c:gh(c,m,w),N=n==="avg"?()=>ah(v,t,o,x):()=>fh(v,t,o,x),O=N(),E=b?O:ch(O,m,L);return h?K(E,[E.shape[1],E.shape[2],E.shape[3]]):E}function l$(e,t,n){const s=n.map(m=>m[0]),i=n.map(m=>m[1]),o=e.concat(s,i),a=t.map((m,f)=>(m-o[f]%m)%m),c=i.map((m,f)=>m+a[f]),h=t.map((m,f)=>[s[f],c[f]]),d=t.map((m,f)=>[0,a[f]]);return[h,d]}function h$(e,t){const n=e.map((a,c)=>a+(a-1)*(t[c]-1)),s=n.map(a=>a-1),i=s.map(a=>Math.floor(a/2)),o=s.map((a,c)=>a-i[c]);return s.map((a,c)=>[i[c],o[c]])}const uA=z({pool_:c$});function u$(e,t){let n=W(e,"base","pow"),s=W(t,"exp","pow");[n,s]=Gt(n,s);const i={a:n,b:s},o=(a,c)=>{const h=a.pow(n,s);return c([n,s,h]),h};return G.runKernelFunc(o,i,null,by)}const Zs=z({pow_:u$});function d$(e,t){const n=W(e,"x","prelu"),s=W(t,"alpha","prelu"),i=(a,c)=>{const h=a.prelu(n,s);return c([n,s]),h},o={x:n,alpha:s};return G.runKernelFunc(i,o,null,xd)}const yh=z({prelu_:d$});function p$(e,t=null,n=!1){let s=W(e,"x","prod");s.dtype==="bool"&&(s=Ae(s,"int32"));const i=c=>{const h=qe(t,s.shape),d=Xn(h,s.rank);let m=h,f=s;d!=null&&(f=Ye(s,d),m=as(m.length,s.rank));let b=c.prod(f,m);if(n){const w=vn(b.shape,h);b=K(b,w)}return b},o={x:s},a={axis:t,keepDims:n};return G.runKernelFunc(i,o,null,Hx,a)}const gp=z({prod_:p$});function m$(e,t,n){const s=P(e);let i=null;if(n==null||n==="float32")i=new Float32Array(s);else if(n==="int32")i=new Int32Array(s);else if(n==="bool")i=new Uint8Array(s);else throw new Error(`Unknown data type ${n}`);for(let o=0;o>>0,b-=h,b*=h,h=b>>>0,b-=h,h+=b*4294967296}return(h>>>0)*23283064365386963e-26};return d}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.alea=a})(Ya,e,!1)}),y$=Eo(function(e){(function(t,n,s){function i(c){var h=this,d="";h.x=0,h.y=0,h.z=0,h.w=0,h.next=function(){var f=h.x^h.x<<11;return h.x=h.y,h.y=h.z,h.z=h.w,h.w^=h.w>>>19^f^f>>>8},c===(c|0)?h.x=c:d+=c;for(var m=0;m>>0)/4294967296};return f.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},f.int32=d.next,f.quick=f,m&&(typeof m=="object"&&o(m,d),f.state=function(){return o(d,{})}),f}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xor128=a})(Ya,e,!1)}),b$=Eo(function(e){(function(t,n,s){function i(c){var h=this,d="";h.next=function(){var f=h.x^h.x>>>2;return h.x=h.y,h.y=h.z,h.z=h.w,h.w=h.v,(h.d=h.d+362437|0)+(h.v=h.v^h.v<<4^(f^f<<1))|0},h.x=0,h.y=0,h.z=0,h.w=0,h.v=0,c===(c|0)?h.x=c:d+=c;for(var m=0;m>>4),h.next()}function o(c,h){return h.x=c.x,h.y=c.y,h.z=c.z,h.w=c.w,h.v=c.v,h.d=c.d,h}function a(c,h){var d=new i(c),m=h&&h.state,f=function(){return(d.next()>>>0)/4294967296};return f.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},f.int32=d.next,f.quick=f,m&&(typeof m=="object"&&o(m,d),f.state=function(){return o(d,{})}),f}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xorwow=a})(Ya,e,!1)}),w$=Eo(function(e){(function(t,n,s){function i(c){var h=this;h.next=function(){var m=h.x,f=h.i,b,w,L;return b=m[f],b^=b>>>7,w=b^b<<24,b=m[f+1&7],w^=b^b>>>10,b=m[f+3&7],w^=b^b>>>3,b=m[f+4&7],w^=b^b<<7,b=m[f+7&7],b=b^b<<13,w^=b^b<<9,m[f]=w,h.i=f+1&7,w};function d(m,f){var b,w,L=[];if(f===(f|0))w=L[0]=f;else for(f=""+f,b=0;b0;--b)m.next()}d(h,c)}function o(c,h){return h.x=c.x.slice(),h.i=c.i,h}function a(c,h){c==null&&(c=+new Date);var d=new i(c),m=h&&h.state,f=function(){return(d.next()>>>0)/4294967296};return f.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},f.int32=d.next,f.quick=f,m&&(m.x&&o(m,d),f.state=function(){return o(d,{})}),f}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xorshift7=a})(Ya,e,!1)}),L$=Eo(function(e){(function(t,n,s){function i(c){var h=this;h.next=function(){var m=h.w,f=h.X,b=h.i,w,L;return h.w=m=m+1640531527|0,L=f[b+34&127],w=f[b=b+1&127],L^=L<<13,w^=w<<17,L^=L>>>15,w^=w>>>12,L=f[b]=L^w,h.i=b,L+(m^m>>>16)|0};function d(m,f){var b,w,L,x,v,N=[],O=128;for(f===(f|0)?(w=f,f=null):(f=f+"\0",w=0,O=Math.max(O,f.length)),L=0,x=-32;x>>15,w^=w<<4,w^=w>>>13,x>=0&&(v=v+1640531527|0,b=N[x&127]^=w+v,L=b==0?L+1:0);for(L>=128&&(N[(f&&f.length||0)&127]=-1),L=127,x=4*128;x>0;--x)w=N[L+34&127],b=N[L=L+1&127],w^=w<<13,b^=b<<17,w^=w>>>15,b^=b>>>12,N[L]=w^b;m.w=v,m.X=N,m.i=L}d(h,c)}function o(c,h){return h.i=c.i,h.w=c.w,h.X=c.X.slice(),h}function a(c,h){c==null&&(c=+new Date);var d=new i(c),m=h&&h.state,f=function(){return(d.next()>>>0)/4294967296};return f.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},f.int32=d.next,f.quick=f,m&&(m.X&&o(m,d),f.state=function(){return o(d,{})}),f}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xor4096=a})(Ya,e,!1)}),S$=Eo(function(e){(function(t,n,s){function i(c){var h=this,d="";h.next=function(){var f=h.b,b=h.c,w=h.d,L=h.a;return f=f<<25^f>>>7^b,b=b-w|0,w=w<<24^w>>>8^L,L=L-f|0,h.b=f=f<<20^f>>>12^b,h.c=b=b-w|0,h.d=w<<16^b>>>16^L,h.a=L-f|0},h.a=0,h.b=0,h.c=2654435769|0,h.d=1367130551,c===Math.floor(c)?(h.a=c/4294967296|0,h.b=c|0):d+=c;for(var m=0;m>>0)/4294967296};return f.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},f.int32=d.next,f.quick=f,m&&(typeof m=="object"&&o(m,d),f.state=function(){return o(d,{})}),f}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.tychei=a})(Ya,e,!1)}),Do=Eo(function(e){(function(t,n){var s=this,i=256,o=6,a=52,c="random",h=n.pow(i,o),d=n.pow(2,a),m=d*2,f=i-1,b;function w(k,F,U){var $=[];F=F==!0?{entropy:!0}:F||{};var Y=N(v(F.entropy?[k,E(t)]:k==null?O():k,3),$),j=new L($),Z=function(){for(var ie=j.g(o),de=h,he=0;ie=m;)ie/=2,de/=2,he>>>=1;return(ie+he)/de};return Z.int32=function(){return j.g(4)|0},Z.quick=function(){return j.g(4)/4294967296},Z.double=Z,N(E(j.S),t),(F.pass||U||function(ie,de,he,ue){return ue&&(ue.S&&x(ue,j),ie.state=function(){return x(j,{})}),he?(n[c]=ie,de):ie})(Z,Y,"global"in F?F.global:this==n,F.state)}n["seed"+c]=w;function L(k){var F,U=k.length,$=this,Y=0,j=$.i=$.j=0,Z=$.S=[];for(U||(k=[U++]);Y=1||o===0);const a=Math.sqrt(-2*Math.log(o)/o);e=this.mean+this.stdDev*s*a,t=this.mean+this.stdDev*i*a,(!this.truncated||this.isValidTruncated(e))&&(n=!0)}return(!this.truncated||this.isValidTruncated(t))&&(this.nextVal=this.convertValue(t)),this.convertValue(e)}convertValue(e){return this.dtype==null||this.dtype==="float32"?e:Math.round(e)}isValidTruncated(e){return e<=this.upper&&e>=this.lower}}class x${constructor(e,t,n,s){this.alpha=e,this.beta=1/t,this.dtype=n;const i=s||Math.random();this.randu=Ha(i.toString()),this.randn=new Db(0,1,n,!1,this.randu()),e<1?this.d=e+2/3:this.d=e-1/3,this.c=1/Math.sqrt(9*this.d)}nextValue(){let e,t,n,s,i,o;for(;;){do s=this.randn.nextValue(),o=1+this.c*s;while(o<=0);if(o*=o*o,e=s*s,t=1-.331*e*e,n=.5*e+this.d*(1-o+Math.log(o)),i=this.randu(),ithis.dtype==null||this.dtype==="float32",this.min=e,this.range=t-e,this.dtype=n,s==null&&(s=Math.random()),typeof s=="number"&&(s=s.toString()),!this.canReturnFloat()&&this.range<=1)throw new Error(`The difference between ${e} - ${t} <= 1 and dtype is not float`);this.random=Ha(s)}convertValue(e){return this.canReturnFloat()?e:Math.round(e)}nextValue(){return this.convertValue(this.min+this.range*this.random())}}function vee(e){const t=e.length,n=N$(e),s=v$(e),i=t/6*(Math.pow(n,2)+.25*Math.pow(s-3,2)),o=5.991;if(i>o)throw new Error(`Invalid p-value for JB: ${i}`)}function Nee(e,t,n,s){s==null&&(s=Xd());const i=kb(e);sb(i,t,s),sb(A$(e,i),n,s)}function kb(e){let t=0;for(let n=0;n{const a=e===t,c=e1;if(a||c||h)return dt([0],s);const d=Math.abs(Math.ceil((t-e)/n)),m=La(d,s);t{const o=s.reciprocal(t);return i([t]),o},n,null,kl)}const _b=z({reciprocal_:D$});function k$(e){const t=W(e,"x","relu"),n=(i,o)=>(o([t]),t.dtype==="bool"?Ae(t,"int32"):i.relu(t)),s={x:t};return G.runKernelFunc(n,s,null,Fl)}const Ni=z({relu_:k$});function F$(e){const t=W(e,"x","relu6"),n=(i,o)=>(o([t]),t.dtype==="bool"?Ae(t,"int32"):i.relu6(t)),s={x:t};return G.runKernelFunc(n,s,null,Wl)}const Wb=z({relu6_:F$});function _$(e,t){const n=W(e,"x","reverse"),s=a=>{const c=qe(t,n.shape);if(n.rank===0)return kr(n);const h=a.reverse(n,c);return K(h,n.shape)},i={x:n},o={dims:t};return G.runKernelFunc(s,i,null,Sy,o)}const Ts=z({reverse_:_$});function W$(e){const t=W(e,"x","reverse");return A(t.rank===1,()=>`Error in reverse1D: x must be rank 1 but got rank ${t.rank}.`),Ts(t,0)}const $$=z({reverse1d_:W$});function U$(e,t){const n=W(e,"x","reverse");return A(n.rank===2,()=>`Error in reverse2D: x must be rank 2 but got rank ${n.rank}.`),Ts(n,t)}const B$=z({reverse2d_:U$});function M$(e,t){const n=W(e,"x","reverse");return A(n.rank===3,()=>`Error in reverse3D: x must be rank 3 but got rank ${n.rank}.`),Ts(n,t)}const P$=z({reverse3d_:M$});function z$(e,t){const n=W(e,"x","reverse");return A(n.rank===4,()=>`Error in reverse4D: x must be rank 4 but got rank ${n.rank}.`),Ts(n,t)}const V$=z({reverse4d_:z$});function G$(e){const t=W(e,"x","round"),n={x:t};return G.runKernelFunc(s=>s.round(t),n,null,$l)}const $b=z({round_:G$});function Y$(e){const t=W(e,"x","rsqrt"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.rsqrt(t);return i([t]),o},n,null,Ul)}const yp=z({rsqrt_:Y$});function H$(e){const t=W(e,"x","selu"),n=(i,o)=>{const a=i.selu(t);return o([t]),a},s={x:t};return G.runKernelFunc(n,s,null,Bl)}const bp=z({selu_:H$});function q$(e,t,n,s,i,o=[1,1],a="NHWC"){const c=W(e,"x","separableConv2d"),h=W(t,"depthwiseFilter","separableConv2d"),d=W(n,"pointwiseFilter","separableConv2d");let m=c,f=!1;if(c.rank===3&&(f=!0,m=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),a==="NCHW")throw new Error("separableConv2d currently does not support dataFormat NCHW; only NHWC is supported");A(m.rank===4,()=>`Error in separableConv2d: input must be rank 4, but got rank ${m.rank}.`),A(h.rank===4,()=>`Error in separableConv2d: depthwise filter must be rank 4, but got rank ${h.rank}.`),A(d.rank===4,()=>`Error in separableConv2d: pointwise filter must be rank 4, but got rank ${h.rank}.`),A(d.shape[0]===1,()=>`Error in separableConv2d: the first dimension of pointwise filter must be 1, but got ${d.shape[0]}.`),A(d.shape[1]===1,()=>`Error in separableConv2d: the second dimension of pointwise filter must be 1, but got ${d.shape[1]}.`);const b=h.shape[2],w=h.shape[3];A(d.shape[2]===b*w,()=>`Error in separableConv2d: the third dimension of pointwise filter must be ${b*w}, but got ${d.shape[2]}.`);const L=Co(m,h,s,i,a,o),x=1,v=Ji(L,d,x,"valid",a);return f?K(v,[v.shape[1],v.shape[2],v.shape[3]]):v}const Ub=z({separableConv2d_:q$});async function j$(e,t){const n=W(e,"x","setdiff1d"),s=W(t,"y","setdiff1d");A(n.dtype===s.dtype,()=>`x and y should have the same dtype, but got x (${n.dtype}) and y (${s.dtype}).`),A(n.rank===1,()=>`x should be 1D tensor, but got x (${n.shape}).`),A(s.rank===1,()=>`y should be 1D tensor, but got y (${s.shape}).`);const i=await n.data(),o=await s.data(),a=new Set(o);let c=0;for(let m=0;ms.sign(t),n,null,Pl)}const Bb=z({sign_:K$});function X$(e){const t=W(e,"x","sin"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.sin(t);return i([t]),o},n,null,Aa)}const wp=z({sin_:X$});function J$(e){const t=W(e,"x","sinh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.sinh(t);return i([t]),o},n,null,Ml)}const Lp=z({sinh_:J$});function Z$(e,t,n){const s=W(e,"x","slice1d");return A(s.rank===1,()=>`slice1d expects a rank-1 tensor, but got a rank-${s.rank} tensor`),tt(s,[t],[n])}const Sp=z({slice1d_:Z$});function Q$(e,t,n){const s=W(e,"x","slice2d");return A(s.rank===2,()=>`slice2d expects a rank-2 tensor, but got a rank-${s.rank} tensor`),tt(s,t,n)}const Mb=z({slice2d_:Q$});function eU(e,t,n){const s=W(e,"x","slice3d");return A(s.rank===3,()=>`slice3d expects a rank-3 tensor, but got a rank-${s.rank} tensor`),tt(s,t,n)}const Ip=z({slice3d_:eU});function tU(e,t,n){const s=W(e,"x","slice4d");return A(s.rank===4,()=>`slice4d expects a rank-4 tensor, but got a rank-${s.rank} tensor`),tt(s,t,n)}const wh=z({slice4d_:tU});function nU(e,t=-1){const n=W(e,"logits","softmax","float32");if(t===-1&&(t=n.rank-1),t!==n.rank-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and dim was ${t}`);const s={logits:n},i={dim:t};return G.runKernelFunc((o,a)=>{const c=o.softmax(n,t);return a([c]),c},s,null,Ay,i)}const Fo=z({softmax_:nU});function sU(e){A(e.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${e.dtype}.`);const t={input:e};return G.runKernelFunc(n=>{const s=e.shape[e.shape.length-1],i=e.size/s,o=e.as2D(i,s),a=n.fft(o);return a.reshape(e.shape)},t,null,pd)}const Lh=z({fft_:sU});function iU(e){A(e.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${e.dtype}.`);const t={input:e};return G.runKernelFunc(n=>{const s=e.shape[e.shape.length-1],i=e.size/s,o=K(e,[i,s]),a=n.ifft(o);return K(a,e.shape)},t,null,fd)}const qa=z({ifft_:iU});function rU(e){const t=e.shape[e.shape.length-1],n=e.size/t;let s;if(t<=2){const i=K(e,[n,t]);s=qa(i)}else{const i=[n,2*(t-1)],o=K(Ga(e),[n,t]),a=K(dh(e),[n,t]),c=Ts(tt(o,[0,1],[n,t-2]),1),h=X(Ts(tt(a,[0,1],[n,t-2]),1),Ce(-1)),d=Yt([o,c],1),m=Yt([a,h],1),f=K(ji(d,m),[i[0],i[1]]);s=qa(f)}if(s=Ga(s),e.rank===3&&e.shape[0]!==0){const i=s,o=e.shape[0];s=K(s,[o,s.shape[0]/o,s.shape[1]]),i.dispose()}return s}const xp=z({irfft_:rU});function pA(e,t,n=0){let s=[];if(typeof t=="number")A(e.shape[n]%t===0,()=>"Number of splits must evenly divide the axis."),s=new Array(t).fill(e.shape[n]/t);else{const i=t.reduce((a,c)=>(c===-1&&(a+=1),a),0);A(i<=1,()=>"There should be only one negative value in split array.");const o=t.indexOf(-1);if(o!==-1){const a=t.reduce((c,h)=>h>0?c+h:c);t[o]=e.shape[n]-a}A(e.shape[n]===t.reduce((a,c)=>a+c),()=>"The sum of sizes must match the size of the axis dimension."),s=t}return s}function oU(e,t,n=0){const s=W(e,"x","split"),i=(c,h)=>{const d=qe(n,s.shape)[0],m=pA(s,t,d);return c.split(s,m,d)},o={x:s},a={numOrSizeSplits:t,axis:n};return G.runKernelFunc(i,o,null,Ty,a)}const hs=z({split_:oU});function aU(e,t){A(e.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${e.dtype}`);let n=e.shape[e.shape.length-1];const s=e.size/n;let i;if(t!=null&&t0),x=e.shape.map(v=>v);x[e.shape.length-1]=t,i=tt(e,L,x),n=t}else if(t!=null&&t>n){const L=e.shape.map(x=>x);L[e.shape.length-1]=t-n,i=Yt([e,dt(L)],e.shape.length-1),n=t}else i=e;const o=et(i),a=K(ji(i,o),[s,n]),c=Lh(a),h=Math.floor(n/2)+1,d=Ga(c),m=dh(c),f=hs(d,[h,n-h],d.shape.length-1),b=hs(m,[h,n-h],m.shape.length-1),w=i.shape.slice();return w[i.shape.length-1]=h,K(ji(f[0],b[0]),w)}const Sh=z({rfft_:aU});function cU(e){const t=W(e,"x","sqrt"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.sqrt(t);return i([t]),o},n,null,Gl)}const Nn=z({sqrt_:cU});function lU(e,t){let n=W(e,"a","squaredDifference"),s=W(t,"b","squaredDifference");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=(c,h)=>{const d=c.squaredDifference(n,s);return h([n,s]),d},o={a:n,b:s},a={};return G.runKernelFunc(i,o,null,va,a)}const Ih=z({squaredDifference_:lU});function hU(e,t){const n=W(e,"x","squeeze");return K(n,ln(n.shape,t).newShape)}const Mr=z({squeeze_:hU});function uU(e,t=0){const n=th(e,"tensors","stack");if(A(n.length>=1,()=>"Pass at least one tensor to tf.stack"),n.length===1)return Zn(n[0],t);const s=n[0].rank,i=n[0].shape,o=n[0].dtype;A(t<=s,()=>"Axis must be <= rank of the tensor"),n.forEach(c=>{B(i,c.shape,"All tensors passed to stack must have matching shapes"),A(o===c.dtype,()=>"All tensors passed to stack must have matching dtypes")});const a=n.map(c=>Zn(c,t));return Yt(a,t)}const es=z({stack_:uU});function dU(e,t=0){const n=W(e,"x","step"),s={x:n},i={alpha:t};return G.runKernelFunc(o=>o.step(n,t),s,null,ql,i)}const ja=z({step_:dU});function pU(e,t,n,s,i=0,o=0,a=0,c=0,h=0){let d=W(e,"x","stridedSlice");const m=w=>{s==null&&(s=new Array(t.length));const L=qd(a);if(L.length>1)throw new Error("Multiple ellipses in slice is not allowed.");if(a!==0&&c!==0)throw new Error("Using both ellipsisMask and newAxisMask is not yet supported.");if(a!==0&&h!==0)throw new Error("Using both ellipsisMask and shrinkAxisMask is not yet supported.");const x=d.rank-t.length,v=qd(c),N=d.shape.slice();v.forEach(Z=>{t[Z]=0,n[Z]=1,N.splice(Z,0,1)}),d=K(d,N);const{begin:O,end:E,strides:k}=FT(d.shape,L,x,t,n,s,i,o,a);t=O,n=E,s=k;const F=qd(h);F.forEach(Z=>{n[Z]=t[Z]+1,s[Z]=1});const U=jd(t,n,s),$=U.filter((Z,ie)=>F.indexOf(ie)===-1),Y=s.every(Z=>Z===1);if(Y)return K(tt(d,t,U),$);const j=w.stridedSlice(d,t,n,s);return K(j,$)},f={x:d},b={begin:t,end:n,strides:s,beginMask:i,endMask:o,ellipsisMask:a,newAxisMask:c,shrinkAxisMask:h};return G.runKernelFunc(m,f,null,Zx,b)}const Pb=z({stridedSlice_:pU});function mU(e){const t=W(e,"x","tan"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.tan(t);return i([t]),o},n,null,Ca)}const zb=z({tan_:mU});function Pr(e,t,n){if(ne(e),t!=null&&t.length!==2)throw new Error("tensor2d() requires shape to have two numbers");const s=Ii(e,n);if(s.length!==2&&s.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return Er(e,t,s,n)}function Ka(e,t,n){if(ne(e),t!=null&&t.length!==4)throw new Error("tensor4d() requires shape to have four numbers");const s=Ii(e,n);if(s.length!==4&&s.length!==1)throw new Error("tensor4d() requires values to be number[][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor4d() requires shape to be provided when `values` are a flat array");return Er(e,t,s,n)}function fU(e,t,n){if(ne(e),t!=null&&t.length!==5)throw new Error("tensor5d() requires shape to have five numbers");const s=Ii(e,n);if(s.length!==5&&s.length!==1)throw new Error("tensor5d() requires values to be number[][][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor5d() requires shape to be provided when `values` are a flat array");return Er(e,t,s,n)}function gU(e,t,n){if(ne(e),t!=null&&t.length!==6)throw new Error("tensor6d() requires shape to have six numbers");const s=Ii(e,n);if(s.length!==6&&s.length!==1)throw new Error("tensor6d() requires values to be number[][][][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor6d() requires shape to be provided when `values` are a flat array");return t=t||s,Er(e,t,s,n)}function yU(e,t=1,n=!0){const s=W(e,"x","topk");if(s.rank===0)throw new Error("topk() expects the input to be of rank 1 or higher");const i=s.shape[s.shape.length-1];if(t>i)throw new Error(`'k' passed to topk() must be <= the last dimension (${i}) but got ${t}`);const o={x:s},a={k:t,sorted:n},[c,h]=G.runKernelFunc(d=>d.topk(s,t,n),o,null,Qx,a);return{values:c,indices:h}}const Vb=z({topk_:yU});function bU(e,t=0,n=1,s,i){if(s!=null&&s==="bool")throw new Error("Unsupported data type $ { dtype }");const o=new Db(t,n,s,!0,i),a=wt(e,s);for(let c=0;c0,()=>"The input tensor must be at least 1D");const s={x:n},i={axis:t},[o,a]=G.runKernel(Cd,s,i);return{values:o,indices:a}}const Tp=z({unique_:wU});function LU(e,t,n){const s=W(e,"x","unsortedSegmentSum"),i=W(t,"segmentIds","unsortedSegmentSum","int32");A(Le(n),()=>"numSegments must be of dtype int");const o={x:s,segmentIds:i},a={numSegments:n},c=(h,d)=>{const m=h.unsortedSegmentSum(s,i,n);return d([i]),m};return G.runKernelFunc(c,o,null,Cy,a)}const Gb=z({unsortedSegmentSum_:LU});function SU(e,t=0){const n=W(e,"x","unstack");A(t>=-n.shape.length&&t`Axis = ${t} is not in [-${n.shape.length}, ${n.shape.length})`),t<0&&(t+=n.shape.length);const s={value:n},i={axis:t},o=a=>a.unstack(n,t);return G.runKernelFunc(o,s,null,Ny,i)}const Qs=z({unstack_:SU});function mA(e,t=!0,n,s){return G.makeVariable(e,t,n,s)}function Ap(e,t){const n=[];for(let o=0;o0,()=>"mask cannot be scalar"),B(c.slice(o,o+a),i.shape,"mask's shape must match the first K dimensions of tensor's shape,");let h=1;for(let x=o;x"Shape mismatch in v and x");const h=Ce(1),d=Re(h,c);let m=X(Re(a,o),d);if(i){A(s!=null,()=>"When using zeroDebias: true, step is required.");const f=W(s,"step","movingAverage");m=We(m,Re(h,Zs(c,f)))}return be(o,m)}const zU=z({movingAverage_:PU});function VU(e,t,n){const s=W(e,"indices","scatterND","int32"),i=W(t,"updates","scatterND");Zy(i,s,n);const o=h=>h.scatterND(s,i,n),a={indices:s,updates:i},c={shape:n};return G.runKernelFunc(o,a,null,Xx,c)}const EA=z({scatterND_:VU});function GU(e,t,n,s){if(e.dtype!=="int32")throw new Error(`tf.sparseToDense() expects the indices to be int32 type, but the dtype was ${e.dtype}.`);if(e.rank>2)throw new Error(`sparseIndices should be a scalar, vector, or matrix, but got shape ${e.shape}.`);const i=e.rank>0?e.shape[0]:1,o=e.rank>1?e.shape[1]:1;if(n.length!==o)throw new Error(`outputShape has incorrect number of elements:, ${n.length}, should be: ${o}.`);const a=t.size;if(!(t.rank===0||t.rank===1&&a===i))throw new Error(`sparseValues has incorrect shape ${t.shape}, should be [] or [${i}]`);if(t.dtype!==s.dtype)throw new Error("sparseValues.dtype must match defaultValues.dtype")}function YU(e,t,n,s=0){const i=W(e,"sparseIndices","sparseToDense","int32"),o=W(t,"sparseValues","sparseToDense"),a=W(s,"defaultValue","sparseToDense",o.dtype);GU(i,o,n,a);const c={sparseIndices:i,sparseValues:o,defaultValue:a},h={outputShape:n};return G.runKernelFunc(d=>d.sparseToDense(i,o,n,a),c,null,Jx,h)}const Hb=z({sparseToDense_:YU});function HU(e,t){const n=W(t,"indices","gatherND","int32"),s=W(e,"x","gatherND"),i=a=>a.gatherND(s,n),o={params:s,indices:n};return G.runKernelFunc(i,o,null,$x)}const DA=z({gatherND_:HU});function qU(e,t){if(t==null)return e.shape.slice();if(ae(e.shape,t))return t;if(e.shape.length===t.length){const n=[];for(let s=0;s`x has to be a floating point tensor since it's going to be scaled, but got a ${i.dtype} tensor instead.`),A(t>=0&&t<1,()=>`rate must be a float in the range [0, 1), but got ${t}.`),t===0)return e instanceof ee?i.clone():i;const o=qU(i,n),a=1-t,c=We(Ma(be(ko(o,0,1,"float32",s),a)),a);return X(i,c)}const kA=z({dropout_:jU});function FA(e){return Math.floor(Math.pow(2,Math.ceil(Math.log(e)/Math.log(2))))}function qb(e,t,n){const s=1-e%2,i=new Float32Array(e);for(let o=0;o1,()=>`inTopK() expects the predictions to be of rank 2 or higher, but got ${s.rank}`),A(s.rank-1===i.rank,()=>`predictions rank should be 1 larger than targets rank, but got predictions rank ${s.rank} and targets rank ${i.rank}`),B(s.shape.slice(0,s.shape.length-1),i.shape,"predictions's shape should be align with the targets' shape, except the last dimension.");const o=s.shape[s.shape.length-1];A(n>0&&n<=o,()=>`'k' passed to inTopK() must be > 0 && <= the predictions last dimension (${o}), but got ${n}`);const a=await s.data(),c=await i.data(),[h,d]=[a.length/o,o],m=bt("bool",h);for(let f=0;fv.value-x.value),m[f]=0;for(let x=0;x`Error in conv2dDerFilter: input must be rank 4, but got shape ${c.shape}.`),A(h.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${h.shape}.`),A(n.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${n}.`);const d=o==="NHWC"?c.shape[3]:c.shape[1],m=o==="NHWC"?h.shape[3]:h.shape[1];A(d===n[2],()=>`Error in conv2dDerFilter: depth of input ${d}) must match input depth in filter (${n[2]}.`),A(m===n[3],()=>`Error in conv2dDerFilter: depth of dy (${m}) must match output depth for filter (${n[3]}).`),a!=null&&A(Le(i),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${a} but got pad ${i}.`);const f=L=>{const x=1,v=Wr(o),N=kn(c.shape,n,s,x,i,a,!1,v);return L.conv2dDerFilter(c,h,N)},b={x:c,dy:h},w={strides:s,pad:i,dataFormat:o,dimRoundingMode:a,filterShape:n};return G.runKernelFunc(f,b,null,Xg,w)}const jb=z({conv2DBackpropFilter_:JU});function Np(e,t,n){if(n==null||n==="linear")return e;if(n==="relu")return X(e,ja(t));throw new Error(`Cannot compute gradient for fused activation ${n}.`)}function Cp(e,t){let n=t;const s=pn(e.shape,t.shape);return s.length>0&&(n=$e(n,s)),K(n,e.shape)}function Rp(e,t,n){if(t==="linear")return e;if(t==="relu")return Ni(e);if(t==="elu")return Ua(e);if(t==="relu6")return Wb(e);if(t==="prelu")return yh(e,n);throw new Error(`Unknown fused activation ${t}.`)}const Op=(e,t)=>{const n=e>0;return!n||t==="linear"};function ZU({x:e,filter:t,strides:n,pad:s,dataFormat:i="NHWC",dilations:o=[1,1],dimRoundingMode:a,bias:c,activation:h="linear",preluActivationWeights:d}){if(h=h||"linear",Op(G.state.gradientDepth,h)===!1){let F=Ji(e,t,n,s,i,o,a);return c!=null&&(F=be(F,c)),Rp(F,h,d)}const m=W(e,"x","conv2d"),f=W(t,"filter","conv2d");let b=m,w=!1;m.rank===3&&(w=!0,b=K(m,[1,m.shape[0],m.shape[1],m.shape[2]])),A(b.rank===4,()=>`Error in fused conv2d: input must be rank 4, but got rank ${b.rank}.`),A(f.rank===4,()=>`Error in fused conv2d: filter must be rank 4, but got rank ${f.rank}.`),a!=null&&A(Le(s),()=>`Error in fused conv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`),A(b.shape[3]===f.shape[2],()=>`Error in conv2d: depth of input (${b.shape[3]}) must match input depth for filter ${f.shape[2]}.`),A(cn(n,o),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),A(i==="NHWC",()=>`Error in conv2d: got dataFormat of ${i} but only NHWC is currently supported.`);const L=kn(b.shape,f.shape,n,o,s,a);let x;c!=null&&(x=W(c,"bias","fused conv2d"),[x]=Gt(x,m),nt(L.outShape,x.shape));let v;d!=null&&(v=W(d,"prelu weights","fused conv2d"));const N=(F,U)=>{const[$,Y,j,Z]=U,ie=Np(F,j,h);A(_r(o),()=>`Error in gradient of fused conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${o}'`);const de=wb(Y.shape,ie,$,n,s),he=jb(Y,ie,$.shape,n,s),ue=[de,he];if(Z!=null){const me=Cp(Z,ie);ue.push(me)}return ue},O=F=>{const U=F.fusedConv2d({input:b,filter:f,convInfo:L,bias:x,activation:h,preluActivationWeights:v});return U},E={x:b,filter:f,bias:x,preluActivationWeights:v},k={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a,activation:h};if(c==null){const F=Ai((U,$,Y)=>{let j=G.runKernelFunc(O,E,null,Dd,k);return Y([$,U,j]),w&&(j=K(j,[j.shape[1],j.shape[2],j.shape[3]])),{value:j,gradFunc:N}});return F(b,f)}else{const F=Ai((U,$,Y,j)=>{let Z=G.runKernelFunc(O,E,null,Dd,k);return j([$,U,Z,Y]),w&&(Z=K(Z,[Z.shape[1],Z.shape[2],Z.shape[3]])),{value:Z,gradFunc:N}});return F(b,f,x)}}const Kb=z({fusedConv2d_:ZU});function QU(e,t,n,s,i,o=[1,1],a){let c=e;e.rank===3&&(c=K(e,[1,e.shape[0],e.shape[1],e.shape[2]]));let h=t;h.rank===3&&(h=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const d=b=>{const w=kn(e.shape,n,s,o,i,a,!0);return b.depthwiseConv2DDerFilter(c,h,w)},m={x:c,dy:h},f={strides:s,pad:i,dimRoundingMode:a,dilations:o,filterShape:n};return G.runKernelFunc(d,m,null,ey,f)}const _A=z({depthwiseConv2dNativeBackpropFilter_:QU});function eB(e,t,n,s,i,o=[1,1],a){let c=t,h=!1;t.rank===3&&(h=!0,c=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const d=w=>{const L=kn(e,n.shape,s,o,i,a,!0);return w.depthwiseConv2DDerInput(c,n,L)},m={dy:c,filter:n},f={strides:s,pad:i,dimRoundingMode:a,dilations:o,inputShape:e},b=G.runKernelFunc(d,m,null,ty,f);return h?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const WA=z({depthwiseConv2dNativeBackpropInput_:eB});function tB({x:e,filter:t,strides:n,pad:s,dataFormat:i="NHWC",dilations:o=[1,1],dimRoundingMode:a,bias:c,activation:h="linear",preluActivationWeights:d}){if(Op(G.state.gradientDepth,h)===!1){let F=Co(e,t,n,s,i,o,a);return c!=null&&(F=be(F,c)),Rp(F,h,d)}const m=W(e,"x","depthwiseConv2d"),f=W(t,"filter","depthwiseConv2d");let b=m,w=!1;m.rank===3&&(w=!0,b=K(m,[1,m.shape[0],m.shape[1],m.shape[2]])),A(b.rank===4,()=>`Error in fused depthwiseConv2d: input must be rank 4, but got rank ${b.rank}.`),A(f.rank===4,()=>`Error in fused depthwiseConv2d: filter must be rank 4, but got rank ${f.rank}.`),A(b.shape[3]===f.shape[2],()=>`Error in fused depthwiseConv2d: number of input channels (${b.shape[3]}) must match the inChannels dimension in filter ${f.shape[2]}.`),o==null&&(o=[1,1]),A(cn(n,o),()=>`Error in fused depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),a!=null&&A(Le(s),()=>`Error in fused depthwiseConv2d: pad must be an integer when using dimRoundingMode ${a} but got pad ${s}.`);const L=kn(b.shape,f.shape,n,o,s,a,!0);let x;c!=null&&(x=W(c,"bias","fused conv2d"),[x]=Gt(x,m),nt(L.outShape,x.shape));let v;d!=null&&(v=W(d,"prelu weights","fused depthwiseConv2d"));const N=(F,U)=>{A(_r(o),()=>`Error in gradient of fused depthwiseConv2d: dilation rates greater than 1 are not yet supported. Got dilations '${o}'`);const[$,Y,j,Z]=U,ie=Np(F,j,h),de=WA(Y.shape,ie,$,n,s,o,a),he=_A(Y,ie,$.shape,n,s,o,a);if(Z!=null){const ue=Cp(x,ie);return[de,he,ue]}return[de,he]},O=F=>{const U=F.fusedDepthwiseConv2D({input:b,filter:f,convInfo:L,bias:x,activation:h,preluActivationWeights:v});return U},E={x:b,filter:f,bias:x,preluActivationWeights:v},k={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a,activation:h};if(c==null){const F=Ai((U,$,Y)=>{let j=G.runKernelFunc(O,E,null,kd,k);return Y([$,U,j]),w&&(j=K(j,[j.shape[1],j.shape[2],j.shape[3]])),{value:j,gradFunc:N}});return F(b,f)}else{const F=Ai((U,$,Y,j)=>{let Z=G.runKernelFunc(O,E,null,kd,k);return j([$,U,Z,Y]),w&&(Z=K(Z,[Z.shape[1],Z.shape[2],Z.shape[3]])),{value:Z,gradFunc:N}});return F(b,f,x)}}const $A=z({fusedDepthwiseConv2d_:tB});function nB({a:e,b:t,transposeA:n=!1,transposeB:s=!1,bias:i,activation:o="linear",preluActivationWeights:a}){if(Op(G.state.gradientDepth,o)===!1){let Z=ct(e,t,n,s);return i!=null&&(Z=be(Z,i)),Rp(Z,o,a)}let c=W(e,"a","fused matMul"),h=W(t,"b","fused matMul");[c,h]=Gt(c,h);const d=n?c.shape[c.rank-2]:c.shape[c.rank-1],m=s?h.shape[h.rank-1]:h.shape[h.rank-2],f=n?c.shape[c.rank-1]:c.shape[c.rank-2],b=s?h.shape[h.rank-2]:h.shape[h.rank-1],w=c.shape.slice(0,-2),L=h.shape.slice(0,-2),x=P(w),v=P(L);A(c.rank>=2&&h.rank>=2&&c.rank===h.rank,()=>`Error in fused matMul: inputs must have the same rank of at least 2, got ranks ${c.rank} and ${h.rank}.`),A(ae(w,L),()=>`Error in fused matMul: outer dimensions (${w}) and (${L}) of Tensors with shapes ${c.shape} and ${h.shape} must match.`),A(d===m,()=>`Error in fused matMul: inner shapes (${d}) and (${m}) of Tensors with shapes ${c.shape} and ${h.shape} and transposeA=${n} and transposeB=${s} must match.`);const N=c.shape.slice(0,-2).concat([f,b]),O=n?K(c,[x,d,f]):K(c,[x,f,d]),E=s?K(h,[v,b,m]):K(h,[v,m,b]);let k;i!=null&&(k=W(i,"bias","fused matMul"),[k]=Gt(k,c),nt(N,k.shape));let F;a!=null&&(F=W(a,"prelu weights","fused matMul"));const U=(Z,ie)=>{const[de,he,ue,me]=ie,ce=Np(K(Z,ue.shape),ue,o);let ye,pe;if(!n&&!s?(ye=ct(ce,he,!1,!0),pe=ct(de,ce,!0,!1)):!n&&s?(ye=ct(ce,he,!1,!1),pe=ct(ce,de,!0,!1)):n&&!s?(ye=ct(he,ce,!1,!0),pe=ct(de,ce,!1,!1)):(ye=ct(he,ce,!0,!0),pe=ct(ce,de,!0,!0)),i!=null){const we=Cp(me,ce);return[ye,pe,we]}else return[ye,pe]},$=Z=>{const ie=Z.fusedBatchMatMul({a:O,b:E,transposeA:n,transposeB:s,bias:k,activation:o,preluActivationWeights:F});return ie},Y={a:O,b:E,bias:k,preluActivationWeights:F},j={transposeA:n,transposeB:s,activation:o};if(i==null){const Z=Ai((ie,de,he)=>{const ue=G.runKernelFunc($,Y,null,Ed,j);return he([ie,de,ue]),{value:K(ue,N),gradFunc:U}});return Z(O,E)}else{const Z=Ai((ie,de,he,ue)=>{const me=G.runKernelFunc($,Y,null,Ed,j);return ue([ie,de,me,he]),{value:K(me,N),gradFunc:U}});return Z(O,E,k)}}const Ep=z({fusedMatMul_:nB});var sB=Object.freeze({__proto__:null,conv2d:Kb,depthwiseConv2d:$A,matMul:Ep});function iB(e){return qb(e,.54,.46)}const rB=z({hammingWindow_:iB});function oB(e){return qb(e,.5,.5)}const UA=z({hannWindow_:oB});function aB(e,t,n,s=!1,i=0){let o=0;const a=[];for(;o+t<=e.size;)a.push(tt(e,o,t)),o+=n;if(s)for(;o`Error in cropAndResize: image must be rank 4,but got rank ${a.rank}.`),A(c.rank===2&&c.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${d},4] but had shape ${c.shape}.`),A(h.rank===1&&h.shape[0]===d,()=>`Error in cropAndResize: boxInd must be have size [${d}] but had shape ${c.shape}.`),A(s.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${s.length}.`),A(s[0]>=1&&s[1]>=1,()=>`cropSize must be atleast [1,1], but was ${s}`),A(i==="bilinear"||i==="nearest",()=>`method must be bilinear or nearest, but was ${i}`);const m=L=>L.cropAndResize(a,c,h,s,i,o),f={image:a,boxes:c,boxInd:h},b={method:i,extrapolationValue:o,cropSize:s},w=G.runKernelFunc(m,f,null,Dx,b);return w}const uB=z({cropAndResize_:hB});function dB(e){const t=W(e,"image","flipLeftRight","float32");A(t.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${t.rank}.`);const n={image:t},s=G.runKernel(md,n,{});return s}const pB=z({flipLeftRight_:dB});function mB(e,t,n=0,s=.5){const i=W(e,"image","rotateWithOffset","float32");A(i.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${i.rank}.`);const o={image:i},a={radians:t,fillValue:n,center:s},c=G.runKernel(Od,o,a);return c}const fB=z({rotateWithOffset_:mB});function Xa(e,t,n,s,i,o){s==null&&(s=.5),i==null&&(i=Number.NEGATIVE_INFINITY),o==null&&(o=0);const a=e.shape[0];return n=Math.min(n,a),A(0<=s&&s<=1,()=>`iouThreshold must be in [0, 1], but was '${s}'`),A(e.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${e.rank}'`),A(e.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${e.shape[1]}`),A(t.rank===1,()=>"scores must be a 1D tensor"),A(t.shape[0]===a,()=>`scores has incompatible shape with boxes. Expected ${a}, but was ${t.shape[0]}`),A(0<=o&&o<=1,()=>`softNmsSigma must be in [0, 1], but was '${o}'`),{maxOutputSize:n,iouThreshold:s,scoreThreshold:i,softNmsSigma:o}}function gB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY){const o=W(e,"boxes","nonMaxSuppression"),a=W(t,"scores","nonMaxSuppression"),c=Xa(o,a,n,s,i);n=c.maxOutputSize,s=c.iouThreshold,i=c.scoreThreshold;const h={maxOutputSize:n,iouThreshold:s,scoreThreshold:i};return G.runKernelFunc(d=>d.nonMaxSuppression(o,a,n,s,i),{boxes:o,scores:a},null,fy,h)}const yB=z({nonMaxSuppression_:gB});function bB(e,t,n){const s=wB(e,t,n),i=s<0?-(s+1):s;e.splice(i,0,t)}function wB(e,t,n){return SB(e,t,n||LB)}function LB(e,t){return e>t?1:e>>1);const c=n(t,e[o]);c>0?s=o+1:(i=o,a=!c)}return a?s:-s-1}function Dp(e,t,n,s,i){return Xb(e,t,n,s,i,0).selectedIndices}function kp(e,t,n,s,i,o){return Xb(e,t,n,s,i,0,!1,o,!0)}function Fp(e,t,n,s,i,o){return Xb(e,t,n,s,i,o,!0)}function Xb(e,t,n,s,i,o,a=!1,c=!1,h=!1){const d=[];for(let v=0;vi&&d.push({score:t[v],boxIndex:v,suppressBeginIndex:0});d.sort(MA);const m=o>0?-.5/o:0,f=[],b=[];for(;f.length0;){const v=d.pop(),{score:N,boxIndex:O,suppressBeginIndex:E}=v;if(N=E;--F){const U=IB(e,O,f[F]);if(U>=s){k=!0;break}if(v.score=v.score*xB(s,m,U),v.score<=i)break}v.suppressBeginIndex=f.length,k||(v.score===N?(f.push(O),b.push(v.score)):v.score>i&&bB(d,v,MA))}const w=f.length,L=n-w;c&&L>0&&(f.push(...new Array(L).fill(0)),b.push(...new Array(L).fill(0)));const x={selectedIndices:ls(f,"int32")};return a&&(x.selectedScores=ls(b,"float32")),h&&(x.validOutputs=Ce(w,"int32")),x}function IB(e,t,n){const s=e.subarray(t*4,t*4+4),i=e.subarray(n*4,n*4+4),o=Math.min(s[0],s[2]),a=Math.min(s[1],s[3]),c=Math.max(s[0],s[2]),h=Math.max(s[1],s[3]),d=Math.min(i[0],i[2]),m=Math.min(i[1],i[3]),f=Math.max(i[0],i[2]),b=Math.max(i[1],i[3]),w=(c-o)*(h-a),L=(f-d)*(b-m);if(w<=0||L<=0)return 0;const x=Math.max(o,d),v=Math.max(a,m),N=Math.min(c,f),O=Math.min(h,b),E=Math.max(N-x,0)*Math.max(O-v,0);return E/(w+L-E)}function xB(e,t,n){const s=Math.exp(t*n*n);return n<=e?s:0}function MA(e,t){return e.score-t.score||e.score===t.score&&t.boxIndex-e.boxIndex}async function TB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY){const o=W(e,"boxes","nonMaxSuppressionAsync"),a=W(t,"scores","nonMaxSuppressionAsync"),c=Xa(o,a,n,s,i);n=c.maxOutputSize,s=c.iouThreshold,i=c.scoreThreshold;const h=await Promise.all([o.data(),a.data()]),d=h[0],m=h[1],f=Dp(d,m,n,s,i);return o!==e&&o.dispose(),a!==t&&a.dispose(),f}const AB=TB;function vB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=0){const a=W(e,"boxes","nonMaxSuppression"),c=W(t,"scores","nonMaxSuppression"),h=Xa(a,c,n,s,i,o);n=h.maxOutputSize,s=h.iouThreshold,i=h.scoreThreshold,o=h.softNmsSigma;const d={boxes:a,scores:c},m={maxOutputSize:n,iouThreshold:s,scoreThreshold:i,softNmsSigma:o},f=G.runKernel(Sd,d,m);return{selectedIndices:f[0],selectedScores:f[1]}}const NB=z({nonMaxSuppressionWithScore_:vB});async function CB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=0){const a=W(e,"boxes","nonMaxSuppressionAsync"),c=W(t,"scores","nonMaxSuppressionAsync"),h=Xa(a,c,n,s,i,o);n=h.maxOutputSize,s=h.iouThreshold,i=h.scoreThreshold,o=h.softNmsSigma;const d=await Promise.all([a.data(),c.data()]),m=d[0],f=d[1],b=Fp(m,f,n,s,i,o);return a!==e&&a.dispose(),c!==t&&c.dispose(),b}const RB=CB;function OB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=!1){const a=W(e,"boxes","nonMaxSuppression"),c=W(t,"scores","nonMaxSuppression"),h=Xa(a,c,n,s,i,null),d=h.maxOutputSize,m=h.iouThreshold,f=h.scoreThreshold,b={boxes:a,scores:c},w={maxOutputSize:d,iouThreshold:m,scoreThreshold:f,padToMaxOutputSize:o},L=G.runKernel(Ld,b,w);return{selectedIndices:L[0],validOutputs:L[1]}}const EB=z({nonMaxSuppressionPadded_:OB});async function DB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=!1){const a=W(e,"boxes","nonMaxSuppressionAsync"),c=W(t,"scores","nonMaxSuppressionAsync"),h=Xa(a,c,n,s,i,null),d=h.maxOutputSize,m=h.iouThreshold,f=h.scoreThreshold,[b,w]=await Promise.all([a.data(),c.data()]),L=kp(b,w,d,m,f,o);return a!==e&&a.dispose(),c!==t&&c.dispose(),L}const kB=DB;function FB(e,t,n=!1){const s=W(e,"images","resizeBilinear");A(s.rank===3||s.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${s.rank}.`),A(t.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${t}.`);let i=s,o=!1;s.rank===3&&(o=!0,i=K(s,[1,s.shape[0],s.shape[1],s.shape[2]]));const[a,c]=t,h=(b,w)=>(w([i]),b.resizeBilinear(i,a,c,n)),d={images:i},m={alignCorners:n,size:t},f=G.runKernelFunc(h,d,null,Ly,m);return o?K(f,[f.shape[1],f.shape[2],f.shape[3]]):f}const PA=z({resizeBilinear_:FB});function _B(e,t,n=!1){const s=W(e,"images","resizeNearestNeighbor");A(s.rank===3||s.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${s.rank}.`),A(t.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${t}.`),A(s.dtype==="float32"||s.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let i=s,o=!1;s.rank===3&&(o=!0,i=K(s,[1,s.shape[0],s.shape[1],s.shape[2]]));const[a,c]=t,h={images:i},d={alignCorners:n,size:t},m=(b,w)=>(w([i]),b.resizeNearestNeighbor(i,a,c,n)),f=G.runKernelFunc(m,h,null,wy,d);return o?K(f,[f.shape[1],f.shape[2],f.shape[3]]):f}const zA=z({resizeNearestNeighbor_:_B});function WB(e,t,n){A(t%1===0,()=>`bandPart(): numLower must be an integer, got ${t}.`),A(n%1===0,()=>`bandPart(): numUpper must be an integer, got ${n}.`);const s=W(e,"a","bandPart");A(s.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${s.rank}.`);const i=s.shape,[o,a]=s.shape.slice(-2);if(!(t<=o))throw new Error(`bandPart(): numLower (${t}) must not be greater than the number of rows (${o}).`);if(!(n<=a))throw new Error(`bandPart(): numUpper (${n}) must not be greater than the number of columns (${a}).`);t<0&&(t=o),n<0&&(n=a);const c=K(bh(0,o,1,"int32"),[-1,1]),h=bh(0,a,1,"int32"),d=Re(c,h),m=Us(Ur(d,Ce(+t,"int32")),Zi(d,Ce(-n,"int32"))),f=dt([o,a],s.dtype);return K(es(Qs(K(s,[-1,o,a])).map(b=>Bn(m,b,f))),i)}const $B=z({bandPart_:WB});function UB(e){let t;if(Array.isArray(e)){t=!1,A(e!=null&&e.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");const i=e[0].shape[0];for(let o=1;o`Gram-Schmidt: Non-unique lengths found in the input vectors: (${e[o].shape[0]} vs. ${i})`)}else t=!0,e=hs(e,e.shape[0],0).map(i=>Mr(i,[0]));A(e.length<=e[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${e.length}) exceeds number of dimensions (${e[0].shape[0]}).`);const n=[],s=e;for(let i=0;i{let o=s[i];if(i>0)for(let a=0;a=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${e.rank}`),e.rank===2)return VA(e,t);{const n=e.shape.slice(0,e.shape.length-2).reduce((h,d)=>h*d),s=Qs(K(e,[n,e.shape[e.shape.length-2],e.shape[e.shape.length-1]]),0),i=[],o=[];s.forEach(h=>{const[d,m]=VA(h,t);i.push(d),o.push(m)});const a=K(es(i,0),e.shape),c=K(es(o,0),e.shape);return[a,c]}}function VA(e,t=!1){return G.tidy(()=>{A(e.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${e.shape.length}D Tensor.`);const n=e.shape[0],s=e.shape[1];let i=cp(n),o=kr(e);const a=Pr([[1]],[1,1]);let c=kr(a);const h=n>=s?s:n;for(let d=0;d{const w=tt(o,[d,d],[n-d,1]),L=vp(w),x=tt(o,[d,d],[1,1]),v=Bn(xs(x,0),Pr([[-1]]),Pr([[1]])),N=Re(x,X(v,L)),O=We(w,N);O.shape[0]===1?c=kr(a):c=Yt([a,tt(O,[1,0],[O.shape[0]-1,O.shape[1]])],0);const E=Ht(We(ct(v,N),L)),k=tt(o,[d,0],[n-d,s]),F=X(E,c),U=Ye(c);if(d===0)o=Re(k,ct(F,ct(U,k)));else{const j=Re(k,ct(F,ct(U,k)));o=Yt([tt(o,[0,0],[d,s]),j],0)}const $=Ye(F),Y=tt(i,[0,d],[n,i.shape[1]-d]);if(d===0)i=Re(Y,ct(ct(Y,c),$));else{const j=Re(Y,ct(ct(Y,c),$));i=Yt([tt(i,[0,0],[n,d]),j],1)}return[c,o,i]}),He([m,f,b])}return!t&&n>s&&(i=tt(i,[0,0],[n,s]),o=tt(o,[0,0],[s,s])),[i,o]})}const PB=z({qr_:MB});(function(e){e[e.NONE=0]="NONE",e[e.MEAN=1]="MEAN",e[e.SUM=2]="SUM",e[e.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(r.Reduction||(r.Reduction={}));function zB(e,t,n=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const s=W(e,"losses","computeWeightedLoss");let i=null;t!=null&&(i=W(t,"weights","computeWeightedLoss"));const o=i==null?s:X(s,i);if(n===r.Reduction.NONE)return o;if(n===r.Reduction.SUM)return $e(o);if(n===r.Reduction.MEAN){if(i==null)return qt(o);{const a=s.size/i.size,c=We($e(o),$e(i));return a>1?We(c,Ce(a)):c}}if(n===r.Reduction.SUM_BY_NONZERO_WEIGHTS){if(i==null)return We($e(o),Ce(s.size));{const a=X(i,Js(s.shape)),c=Ae($e(Br(a,Ce(0))),"float32");return We($e(o),c)}}throw Error(`Unknown reduction: ${n}`)}const Qi=z({computeWeightedLoss_:zB});function VB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const i=W(e,"labels","absoluteDifference"),o=W(t,"predictions","absoluteDifference");let a=null;n!=null&&(a=W(n,"weights","absoluteDifference")),B(i.shape,o.shape,"Error in absoluteDifference: ");const c=dn(Re(i,o));return Qi(c,a,s)}const GB=z({absoluteDifference_:VB});function YB(e,t,n,s,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","cosineDistance"),a=W(t,"predictions","cosineDistance");let c=null;s!=null&&(c=W(s,"weights","cosineDistance")),B(o.shape,a.shape,"Error in cosineDistance: ");const h=Ce(1),d=Re(h,$e(X(o,a),n,!0));return Qi(d,c,i)}const HB=z({cosineDistance_:YB});function qB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let i=W(e,"labels","hingeLoss");const o=W(t,"predictions","hingeLoss");let a=null;n!=null&&(a=W(n,"weights","hingeLoss")),B(i.shape,o.shape,"Error in hingeLoss: ");const c=Ce(1);i=Re(X(Ce(2),i),c);const h=Ni(Re(c,X(i,o)));return Qi(h,a,s)}const jB=z({hingeLoss_:qB});function KB(e,t,n,s=1,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","huberLoss"),a=W(t,"predictions","huberLoss");let c=null;n!=null&&(c=W(n,"weights","huberLoss")),B(o.shape,a.shape,"Error in huberLoss: ");const h=Ce(s),d=dn(Re(a,o)),m=Oo(d,h),f=Re(d,m),b=be(X(Ce(.5),At(m)),X(h,f));return Qi(b,c,i)}const XB=z({huberLoss_:KB});function JB(e,t,n,s=1e-7,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","logLoss"),a=W(t,"predictions","logLoss");let c=null;n!=null&&(c=W(n,"weights","logLoss")),B(o.shape,a.shape,"Error in logLoss: ");const h=Ce(1),d=Ce(s),m=Ht(X(o,cs(be(a,d)))),f=X(Re(h,o),cs(be(Re(h,a),d))),b=Re(m,f);return Qi(b,c,i)}const ZB=z({logLoss_:JB});function QB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const i=W(e,"labels","meanSquaredError"),o=W(t,"predictions","meanSquaredError");let a=null;n!=null&&(a=W(n,"weights","meanSquaredError")),B(i.shape,o.shape,"Error in meanSquaredError: ");const c=Ih(i,o);return Qi(c,a,s)}const eM=z({meanSquaredError_:QB});function tM(e,t){const n=W(e,"labels","sigmoidCrossEntropyWithLogits"),s=W(t,"logits","sigmoidCrossEntropyWithLogits");B(n.shape,s.shape,"Error in sigmoidCrossEntropyWithLogits: ");const i=Ni(s),o=X(s,n),a=hp(Is(Ht(dn(s))));return be(Re(i,o),a)}function nM(e,t,n,s=0,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=W(e,"multiClassLabels","sigmoidCrossEntropy");const a=W(t,"logits","sigmoidCrossEntropy");let c=null;if(n!=null&&(c=W(n,"weights","sigmoidCrossEntropy")),B(o.shape,a.shape,"Error in sigmoidCrossEntropy: "),s>0){const d=Ce(s),m=Ce(1),f=Ce(.5);o=be(X(o,Re(m,d)),X(f,d))}const h=tM(o,a);return Qi(h,c,i)}const sM=z({sigmoidCrossEntropy_:nM});function iM(e,t,n=-1){if(n===-1&&(n=t.rank-1),n!==t.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${t.rank} and dim was ${n}`);const s=Ai((i,o,a)=>{const c=!0,h=Rb(o,[n],c),d=Re(Ae(o,"float32"),h);a([i,d]);const m=Ht(X(d,i)),f=$e(m,[n]),b=(w,L)=>{const[x,v]=L,N=vn(w.shape,[n]);return[X(K(w,N),Re(Ae(x,"float32"),Is(v))),X(K(w,N),Re(Is(v),Ae(x,"float32")))]};return{value:f,gradFunc:b}});return s(e,t)}function rM(e,t,n,s=0,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=W(e,"onehotLabels","softmaxCrossEntropy");const a=W(t,"logits","softmaxCrossEntropy");let c=null;if(n!=null&&(c=W(n,"weights","softmaxCrossEntropy")),B(o.shape,a.shape,"Error in softmaxCrossEntropy: "),s>0){const d=Ce(s),m=Ce(1),f=Ce(o.shape[1]);o=be(X(o,Re(m,d)),We(d,f))}const h=iM(o,a);return Qi(h,c,i)}const oM=z({softmaxCrossEntropy_:rM});const aM={fft:Lh,ifft:qa,rfft:Sh,irfft:xp},cM={hammingWindow:rB,hannWindow:UA,frame:BA,stft:lB},zr={flipLeftRight:pB,resizeNearestNeighbor:zA,resizeBilinear:PA,rotateWithOffset:fB,cropAndResize:uB,nonMaxSuppression:yB,nonMaxSuppressionAsync:AB,nonMaxSuppressionWithScore:NB,nonMaxSuppressionWithScoreAsync:RB,nonMaxSuppressionPadded:EB,nonMaxSuppressionPaddedAsync:kB},GA={bandPart:$B,gramSchmidt:BB,qr:PB},lM={absoluteDifference:GB,computeWeightedLoss:Qi,cosineDistance:HB,hingeLoss:jB,huberLoss:XB,logLoss:ZB,meanSquaredError:eM,sigmoidCrossEntropy:sM,softmaxCrossEntropy:oM};class er extends Ao{minimize(e,t=!1,n){const{value:s,grads:i}=this.computeGradients(e,n);if(n!=null){const o=n.map(a=>({name:a.name,tensor:i[a.name]}));this.applyGradients(o)}else this.applyGradients(i);return He(i),t?s:(s.dispose(),null)}get iterations(){return this.iterations_==null&&(this.iterations_=0),this.iterations_}incrementIterations(){this.iterations_=this.iterations+1}computeGradients(e,t){return Cb(e,t)}dispose(){this.iterations_!=null&&He(this.iterations_)}async saveIterations(){return this.iterations_==null&&(this.iterations_=0),{name:"iter",tensor:Ce(this.iterations_,"int32")}}async getWeights(){throw new Error("getWeights() is not implemented for this optimizer yet.")}async setWeights(e){throw new Error(`setWeights() is not implemented for this optimizer class ${this.getClassName()}`)}async extractIterations(e){return this.iterations_=(await e[0].tensor.data())[0],e.slice(1)}}Object.defineProperty(er,Symbol.hasInstance,{value:e=>e.minimize!=null&&e.computeGradients!=null&&e.applyGradients!=null});class Th extends er{constructor(e,t,n=null){super();this.learningRate=e,this.rho=t,this.epsilon=n,this.accumulatedGrads=[],this.accumulatedUpdates=[],n==null&&(this.epsilon=G.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=G.registeredVariables[n],o=!1;this.accumulatedGrads[s]==null&&(this.accumulatedGrads[s]={originalName:`${n}/accum_grad`,variable:Q(()=>et(i).variable(o))}),this.accumulatedUpdates[s]==null&&(this.accumulatedUpdates[s]={originalName:`${n}/accum_var`,variable:Q(()=>et(i).variable(o))});const a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;const c=this.accumulatedGrads[s].variable,h=this.accumulatedUpdates[s].variable;Q(()=>{const d=be(X(c,this.rho),X(At(a),1-this.rho)),m=X(We(Nn(be(h,this.epsilon)),Nn(be(c,this.epsilon))),a),f=be(X(h,this.rho),X(At(m),1-this.rho));c.assign(d),h.assign(f);const b=be(X(m,-this.learningRate),i);i.assign(b)})}),this.incrementIterations()}dispose(){this.accumulatedUpdates!=null&&(He(this.accumulatedGrads.map(e=>e.variable)),He(this.accumulatedUpdates.map(e=>e.variable)))}async getWeights(){const e=[...this.accumulatedGrads,...this.accumulatedUpdates];return[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=e.length/2,n=!1;this.accumulatedGrads=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedUpdates=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)}))}getConfig(){return{learningRate:this.learningRate,rho:this.rho,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.rho,t.epsilon)}}Th.className="Adadelta",fe(Th);class Ah extends er{constructor(e,t=.1){super();this.learningRate=e,this.initialAccumulatorValue=t,this.accumulatedGrads=[]}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=G.registeredVariables[n];if(this.accumulatedGrads[s]==null){const c=!1;this.accumulatedGrads[s]={originalName:`${n}/accumulator`,variable:Q(()=>Ba(i.shape,this.initialAccumulatorValue).variable(c))}}const o=Array.isArray(e)?e[s].tensor:e[n];if(o==null)return;const a=this.accumulatedGrads[s].variable;Q(()=>{const c=be(a,At(o));a.assign(c);const h=be(X(We(o,Nn(be(c,G.backend.epsilon()))),-this.learningRate),i);i.assign(h)})}),this.incrementIterations()}dispose(){this.accumulatedGrads!=null&&He(this.accumulatedGrads.map(e=>e.variable))}async getWeights(){return[await this.saveIterations()].concat(this.accumulatedGrads.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=!1;this.accumulatedGrads=e.map(n=>({originalName:n.name,variable:n.tensor.variable(t)}))}getConfig(){return{learningRate:this.learningRate,initialAccumulatorValue:this.initialAccumulatorValue}}static fromConfig(e,t){return new e(t.learningRate,t.initialAccumulatorValue)}}Ah.className="Adagrad",fe(Ah);class vh extends er{constructor(e,t,n,s=null){super();this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.accumulatedFirstMoment=[],this.accumulatedSecondMoment=[],Q(()=>{this.accBeta1=Ce(t).variable(),this.accBeta2=Ce(n).variable()}),s==null&&(this.epsilon=G.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);Q(()=>{const n=Re(1,this.accBeta1),s=Re(1,this.accBeta2);t.forEach((i,o)=>{const a=G.registeredVariables[i],c=!1;this.accumulatedFirstMoment[o]==null&&(this.accumulatedFirstMoment[o]={originalName:`${i}/m`,variable:Q(()=>et(a).variable(c))}),this.accumulatedSecondMoment[o]==null&&(this.accumulatedSecondMoment[o]={originalName:`${i}/v`,variable:Q(()=>et(a).variable(c))});const h=Array.isArray(e)?e[o].tensor:e[i];if(h==null)return;const d=this.accumulatedFirstMoment[o].variable,m=this.accumulatedSecondMoment[o].variable,f=be(X(d,this.beta1),X(h,1-this.beta1)),b=be(X(m,this.beta2),X(At(h),1-this.beta2)),w=We(f,n),L=We(b,s);d.assign(f),m.assign(b);const x=be(X(We(w,be(Nn(L),this.epsilon)),-this.learningRate),a);a.assign(x)}),this.accBeta1.assign(X(this.accBeta1,this.beta1)),this.accBeta2.assign(X(this.accBeta2,this.beta2))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.accBeta2.dispose(),this.accumulatedFirstMoment!=null&&He(this.accumulatedFirstMoment.map(e=>e.variable)),this.accumulatedSecondMoment!=null&&He(this.accumulatedSecondMoment.map(e=>e.variable))}async getWeights(){const e=[...this.accumulatedFirstMoment,...this.accumulatedSecondMoment];return[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e),Q(()=>{this.accBeta1.assign(Zs(this.beta1,this.iterations_+1)),this.accBeta2.assign(Zs(this.beta2,this.iterations_+1))});const t=e.length/2,n=!1;this.accumulatedFirstMoment=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedSecondMoment=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)}))}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon)}}vh.className="Adam",fe(vh);class Nh extends er{constructor(e,t,n,s=null,i=0){super();this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.decay=i,this.accumulatedFirstMoment=[],this.accumulatedWeightedInfNorm=[],Q(()=>{this.iteration=Ce(0).variable(),this.accBeta1=Ce(t).variable()}),s==null&&(this.epsilon=G.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);Q(()=>{const n=Re(1,this.accBeta1),s=We(-this.learningRate,be(X(this.iteration,this.decay),1));t.forEach((i,o)=>{const a=G.registeredVariables[i],c=!1;this.accumulatedFirstMoment[o]==null&&(this.accumulatedFirstMoment[o]={originalName:`${i}/m`,variable:et(a).variable(c)}),this.accumulatedWeightedInfNorm[o]==null&&(this.accumulatedWeightedInfNorm[o]={originalName:`${i}/v`,variable:et(a).variable(c)});const h=Array.isArray(e)?e[o].tensor:e[i];if(h==null)return;const d=this.accumulatedFirstMoment[o].variable,m=this.accumulatedWeightedInfNorm[o].variable,f=be(X(d,this.beta1),X(h,1-this.beta1)),b=X(m,this.beta2),w=dn(h),L=$s(b,w);d.assign(f),m.assign(L);const x=be(X(We(s,n),We(f,be(L,this.epsilon))),a);a.assign(x)}),this.iteration.assign(be(this.iteration,1)),this.accBeta1.assign(X(this.accBeta1,this.beta1))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.iteration.dispose(),this.accumulatedFirstMoment!=null&&He(this.accumulatedFirstMoment.map(e=>e.variable)),this.accumulatedWeightedInfNorm!=null&&He(this.accumulatedWeightedInfNorm.map(e=>e.variable))}async getWeights(){throw new Error("getWeights() is not implemented for Adamax yet.")}async setWeights(e){throw new Error("setWeights() is not implemented for Adamax yet.")}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon,decay:this.decay}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon,t.decay)}}Nh.className="Adamax",fe(Nh);class Ja extends er{constructor(e){super();this.learningRate=e,this.setLearningRate(e)}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=Array.isArray(e)?e[s].tensor:e[n];if(i==null)return;const o=G.registeredVariables[n];Q(()=>{const a=be(X(this.c,i),o);o.assign(a)})}),this.incrementIterations()}setLearningRate(e){this.learningRate=e,this.c!=null&&this.c.dispose(),this.c=bn(Ce(-e))}dispose(){this.c.dispose()}async getWeights(){return[await this.saveIterations()]}async setWeights(e){if(e=await this.extractIterations(e),e.length!==0)throw new Error("SGD optimizer does not have settable weights.")}getConfig(){return{learningRate:this.learningRate}}static fromConfig(e,t){return new e(t.learningRate)}}Ja.className="SGD",fe(Ja);class Ch extends Ja{constructor(e,t,n=!1){super(e);this.learningRate=e,this.momentum=t,this.useNesterov=n,this.accumulations=[],this.m=Ce(this.momentum)}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=G.registeredVariables[n];if(this.accumulations[s]==null){const c=!1;this.accumulations[s]={originalName:`${n}/momentum`,variable:Q(()=>et(i).variable(c))}}const o=this.accumulations[s].variable,a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;Q(()=>{let c;const h=be(X(this.m,o),a);this.useNesterov?c=be(X(this.c,be(a,X(h,this.m))),i):c=be(X(this.c,h),i),o.assign(h),i.assign(c)})}),this.incrementIterations()}dispose(){this.m.dispose(),this.accumulations!=null&&He(this.accumulations.map(e=>e.variable))}setMomentum(e){this.momentum=e}async getWeights(){return[await this.saveIterations()].concat(this.accumulations.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=!1;this.accumulations=e.map(n=>({originalName:n.name,variable:n.tensor.variable(t)}))}getConfig(){return{learningRate:this.learningRate,momentum:this.momentum,useNesterov:this.useNesterov}}static fromConfig(e,t){return new e(t.learningRate,t.momentum,t.useNesterov)}}Ch.className="Momentum",fe(Ch);class Rh extends er{constructor(e,t=.9,n=0,s=null,i=!1){super();if(this.learningRate=e,this.decay=t,this.momentum=n,this.epsilon=s,this.accumulatedMeanSquares=[],this.accumulatedMoments=[],this.accumulatedMeanGrads=[],this.centered=i,s==null&&(this.epsilon=G.backend.epsilon()),e==null)throw new Error("learningRate for RMSPropOptimizer must be defined.")}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=G.registeredVariables[n],o=!1;this.accumulatedMeanSquares[s]==null&&(this.accumulatedMeanSquares[s]={originalName:`${n}/rms`,variable:Q(()=>et(i).variable(o))}),this.accumulatedMoments[s]==null&&(this.accumulatedMoments[s]={originalName:`${n}/momentum`,variable:Q(()=>et(i).variable(o))}),this.accumulatedMeanGrads[s]==null&&this.centered&&(this.accumulatedMeanGrads[s]={originalName:`${n}/mg`,variable:Q(()=>et(i).variable(o))});const a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;const c=this.accumulatedMeanSquares[s].variable,h=this.accumulatedMoments[s].variable;Q(()=>{const d=be(X(c,this.decay),X(At(a),1-this.decay));if(this.centered){const m=this.accumulatedMeanGrads[s].variable,f=be(X(m,this.decay),X(a,1-this.decay)),b=We(X(a,this.learningRate),Nn(Re(d,be(At(f),this.epsilon)))),w=be(X(h,this.momentum),b);c.assign(d),m.assign(f),h.assign(w);const L=Re(i,w);i.assign(L)}else{const m=be(X(c,this.decay),X(At(a),1-this.decay)),f=be(X(h,this.momentum),We(X(a,this.learningRate),Nn(be(m,this.epsilon))));c.assign(m),h.assign(f);const b=Re(i,f);i.assign(b)}})}),this.incrementIterations()}dispose(){this.accumulatedMeanSquares!=null&&He(this.accumulatedMeanSquares.map(e=>e.variable)),this.accumulatedMeanGrads!=null&&this.centered&&He(this.accumulatedMeanGrads.map(e=>e.variable)),this.accumulatedMoments!=null&&He(this.accumulatedMoments.map(e=>e.variable))}async getWeights(){const e=[...this.accumulatedMeanSquares,...this.accumulatedMoments];return this.centered&&e.push(...this.accumulatedMeanGrads),[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=this.centered?e.length/3:e.length/2,n=!1;this.accumulatedMeanSquares=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedMoments=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.centered&&(this.accumulatedMeanGrads=e.slice(t*2,t*3).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})))}getConfig(){return{learningRate:this.learningRate,decay:this.decay,momentum:this.momentum,epsilon:this.epsilon,centered:this.centered}}static fromConfig(e,t){return new e(t.learningRate,t.decay,t.momentum,t.epsilon,t.centered)}}Rh.className="RMSProp",fe(Rh);class _o{static sgd(e){return new Ja(e)}static momentum(e,t,n=!1){return new Ch(e,t,n)}static rmsprop(e,t=.9,n=0,s=null,i=!1){return new Rh(e,t,n,s,i)}static adam(e=.001,t=.9,n=.999,s=null){return new vh(e,t,n,s)}static adadelta(e=.001,t=.95,n=null){return new Th(e,t,n)}static adamax(e=.002,t=.9,n=.999,s=null,i=0){return new Nh(e,t,n,s,i)}static adagrad(e,t=.1){return new Ah(e,t)}}const Wo={sgd:_o.sgd,momentum:_o.momentum,adadelta:_o.adadelta,adagrad:_o.adagrad,rmsprop:_o.rmsprop,adamax:_o.adamax,adam:_o.adam};const hM=(()=>typeof requestAnimationFrame!="undefined"?requestAnimationFrame:typeof setImmediate!="undefined"?setImmediate:e=>e())();function _p(){return new Promise(e=>hM(()=>e()))}function Jb(e,t,n){const s=n*(typeof e=="number"?e:e[0]),i=t*(typeof e=="number"?e:e[1]);return[s,i]}function Oh(e,t,n,s=!0){let i=[];if(s)i=i.concat(t.slice(0)),i.push(e[0]/n),i=i.concat(e.slice(1));else{i=i.concat(e[0]);const o=t.length;for(let a=0;a=t*2+1||a%2===1?o.push(a):i.push(a);s.push(...i),s.push(0),s.push(...o)}return s}function Dh(e,t,n,s=!0){const i=[];s?i.push(e[0]/n):i.push(e[0]*n);for(let o=1;o{const a=[...i];a[n]=o;const c=tt(e,s,a);return s[n]+=o,c})}function hw(e,t){const n=new Array(e.rank);for(let i=0;iE.value-O.value);const x=f*s,v=h.subarray(x,x+s),N=d.subarray(x,x+s);for(let O=0;O{const[n]=t;return{x:()=>X(e,ja(Ae(n,"float32"),-1))}}};const fM={kernelName:ol,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=At(Ae(n,"float32")),i=Nn(Re(Ce(1),s));return Ht(We(e,i))}}}};const gM={kernelName:al,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Nn(Re(At(Ae(n,"float32")),1));return We(e,s)}}}};const yM={kernelName:wo,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{let c=e;const h=pn(n.shape,i);return h.length>0&&(c=$e(c,h)),K(c,n.shape)},a=()=>{let c=e;const h=pn(s.shape,i);return h.length>0&&(c=$e(c,h)),K(c,s.shape)};return{a:o,b:a}}};const bM={kernelName:Gg,saveAllInputs:!0,gradFunc:(e,t)=>{const n={};return t.forEach((s,i)=>{n[i]=()=>e.clone()}),n}};const wM={kernelName:Yg,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>et(n)}}};const LM={kernelName:Hg,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>et(n)}}};const SM={kernelName:cl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,Nn(Re(Ce(1),At(Ae(n,"float32")))))}}};const IM={kernelName:ll,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Nn(be(Ce(1),At(Ae(n,"float32"))));return We(e,s)}}}};const xM={kernelName:nd,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=be(At(n),At(s));let h=X(e,We(s,c));const d=pn(n.shape,i);return d.length>0&&(h=$e(h,d)),K(h,n.shape)},a=()=>{const c=be(At(n),At(s));let h=Ht(X(e,We(n,c)));const d=pn(s.shape,i);return d.length>0&&(h=$e(h,d)),K(h,s.shape)};return{a:o,b:a}}};const TM={kernelName:hl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,be(At(Ae(n,"float32")),1))}}};const AM={kernelName:ul,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,Re(Ce(1),At(Ae(n,"float32"))))}}};function vM(e,t,n,s,i=[1,1,1],o,a){const c=W(e,"dy","avgPool3dBackprop"),h=W(t,"input","avgPool3dBackprop");let d=c,m=h,f=!1;h.rank===4&&(f=!0,d=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]]),m=K(h,[1,h.shape[0],h.shape[1],h.shape[2],h.shape[3]])),A(d.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${d.rank}.`),A(m.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${m.rank}.`),A(cn(s,i),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${s} and dilations '${i}'`),a!=null&&A(Le(o),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const b=v=>{const N=oh(m.shape,n,s,i,o,a);return v.avgPool3dBackprop(d,m,N)},w={dy:d,input:m},L={filterSize:n,strides:s,dilations:i,pad:o,dimRoundingMode:a},x=G.runKernelFunc(b,w,null,Ex,L);return f?K(x,[x.shape[1],x.shape[2],x.shape[3],x.shape[4]]):x}const NM=z({avgPool3dBackprop_:vM});const CM={kernelName:qg,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:i,strides:o,dilations:a,pad:c,dimRoundingMode:h}=n,d=a==null?[1,1,1]:a;return{x:()=>NM(e,s,i,o,d,c,h)}}};function RM(e,t,n,s,i){const o=W(e,"dy","avgPoolBackprop"),a=W(t,"input","avgPoolBackprop");A(a.rank===o.rank,()=>`Rank of input (${a.rank}) does not match rank of dy (${o.rank})`);let c=a,h=o,d=!1;a.rank===3&&(d=!0,c=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]),h=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),A(h.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${h.rank}.`),A(c.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${c.rank}.`);const m=L=>{const x=Un(c.shape,n,s,1,i);return L.avgPoolBackprop(h,c,x)},f={dy:h,input:c},b={filterSize:n,strides:s,pad:i},w=G.runKernelFunc(m,f,null,sd,b);return d?K(w,[w.shape[1],w.shape[2],w.shape[3]]):w}const OM=z({avgPoolBackprop_:RM});const EM={kernelName:dl,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:i,strides:o,pad:a}=n;return{x:()=>OM(e,s,i,o,a)}}};const DM={kernelName:id,inputsToSave:["a","b"],gradFunc:(e,t,n)=>{const[s,i]=t,{transposeA:o,transposeB:a}=n;return!o&&!a?{a:()=>ct(e,i,!1,!0),b:()=>ct(s,e,!0,!1)}:!o&&a?{a:()=>ct(e,i,!1,!1),b:()=>ct(e,s,!0,!1)}:o&&!a?{a:()=>ct(i,e,!1,!0),b:()=>ct(s,e,!1,!1)}:{a:()=>ct(i,e,!0,!0),b:()=>ct(e,s,!0,!0)}}};const kM={kernelName:jg,gradFunc:(e,t,n)=>{const{blockShape:s,crops:i}=n;return{x:()=>gh(e,s,i)}}};const FM={kernelName:Kg,gradFunc:(e,t,n)=>{const s=n,i=s.inputShape,o=s.shape,a=Array.from(o);for(let h=i.length-1;h>=0;h--)if(i[h]===o[h])a[h]=1;else if(i[h]!==1)throw new Error(`broadcastTo(): [${i}] cannot be broadcast to [${o}].`);const c=[];for(let h=0;h1&&c.push(h);return{x:()=>$e(e,c,!0)}}};const _M={kernelName:Sa,gradFunc:e=>({x:()=>e.clone()})};const WM={kernelName:pl,gradFunc:e=>({x:()=>et(e)})};const $M={kernelName:ml,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{clipValueMin:i,clipValueMax:o}=n;return{x:()=>Bn(Us(Zi(s,i),Ur(s,o)),e,et(e))}}};const UM={kernelName:fl,saveAllInputs:!0,gradFunc:(e,t,n)=>{const s=t.map(h=>h.shape),{axis:i}=n,o=qe(i,t[0].shape)[0],a=s.map(h=>h[o]),c=hs(e,a,o);return c.map(h=>()=>h)}};const BM={kernelName:od,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,{dilations:o,strides:a,pad:c,dataFormat:h}=n;return A(_r(o),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${o}'`),{x:()=>wb(s.shape,e,i,a,c,h),filter:()=>jb(s,e,i.shape,a,c,h)}}};const MM={kernelName:ad,inputsToSave:["dy","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,{strides:o,pad:a,dataFormat:c,dimRoundingMode:h}=n;return{dy:()=>Ji(e,i,o,a,c,1,h),filter:()=>jb(e,s,i.shape,o,a,c,h)}}};function PM(e,t,n,s,i){let o=e;e.rank===4&&(o=K(e,[1,e.shape[0],e.shape[1],e.shape[2],e.shape[3]]));let a=t;a.rank===4&&(a=K(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]])),A(o.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${o.shape}.`),A(a.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${a.shape}.`),A(n.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${n}.`),A(o.shape[4]===n[3],()=>`Error in conv3dDerFilter: depth of input ${o.shape[4]}) must match input depth in filter (${n[3]}.`),A(a.shape[4]===n[4],()=>`Error in conv3dDerFilter: depth of dy (${a.shape[4]}) must match output depth for filter (${n[4]}).`);const c=m=>{const f=1,b=Fr(o.shape,n,s,f,i);return m.conv3dDerFilter(o,a,b)},h={x:o,dy:a},d={strides:s,pad:i,filterShape:n};return G.runKernelFunc(c,h,null,Jg,d)}const zM=z({conv3DBackpropFilter_:PM});const VM={kernelName:cd,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:i,pad:o}=n;A(_r(s),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${s}'`);const[a,c]=t;return{x:()=>eA(a.shape,e,c,i,o),filter:()=>zM(a,e,c.shape,i,o)}}};const GM={kernelName:Ia,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Ht(wp(Ae(n,"float32"))),e)}}};const YM={kernelName:gl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Lp(Ae(n,"float32")),e)}}};const HM={kernelName:Qg,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{axis:i,exclusive:o,reverse:a}=n;return{x:()=>{const c=Xn([i],s.rank);let h=ap(e,i,o,!a);return c!=null&&(h=Ye(h,c)),h}}}};const qM={kernelName:ld,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:i,pad:o,dimRoundingMode:a}=n,c=s==null?[1,1]:s;A(_r(c),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${c}'`);const[h,d]=t;return A(h.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${h.rank}.`),A(d.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${d.rank}.`),A(h.shape[3]===d.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${h.shape[3]}) must match the inChannels dimension in filter ${d.shape[2]}.`),A(cn(i,c),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${i} and dilations '${c}'.`),a!=null&&A(Le(o),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`),{x:()=>WA(h.shape,e,d,i,o,s,a),filter:()=>_A(h,e,d.shape,i,o,s,a)}}};const jM={kernelName:hd,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,o={x:s,filter:i,dy:e},a={x:s,filter:i,dy:e};return{x:()=>G.runKernel(ud,o,n),filter:()=>G.runKernel(dd,a,n)}}};const KM={kernelName:xa,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=We(e,Ae(s,"float32")),h=pn(n.shape,i);return h.length>0?K($e(c,h),n.shape):c},a=()=>{let c=X(e,Ae(n,"float32"));const h=pn(s.shape,i);h.length>0&&(c=K($e(c,h),s.shape));const d=At(s);return Ht(We(c,Ae(d,"float32")))};return{a:o,b:a}}};const XM={kernelName:yl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t,s=o=>o.eluDer(e,n),i={dy:e,y:n};return{x:()=>G.runKernelFunc(s,i,null,_x)}}};const JM={kernelName:bl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=X(Is(Ht(At(n))),2/Math.sqrt(Math.PI));return{x:()=>X(e,s)}}};const ZM={kernelName:wl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,n)}}};const QM={kernelName:Ll,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,Is(n))}}};const eP={kernelName:Sl,gradFunc:e=>({x:()=>et(e)})};const tP={kernelName:sy,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=We(e,Ae(s,"float32")),h=pn(n.shape,i);return h.length>0?K($e(c,h),n.shape):c},a=()=>{let c=X(e,Ae(n,"float32"));const h=pn(s.shape,i);h.length>0&&(c=K($e(c,h),s.shape));const d=At(s);return Ht(We(c,Ae(d,"float32")))};return{a:o,b:a}}};const nP={kernelName:Il,inputsToSave:["x","mean","variance","scale"],gradFunc:(e,t,n)=>{const{varianceEpsilon:s}=n,[i,o,a,c]=t,h=c==null?Ce(1):c,d=pn(o.shape,i.shape),m=[];if(o.rank===1){for(let k=0;ko.rank===1?K(X(X(e,$r(K(w,[1,1,1,o.shape[0]]),m)),h),i.shape):K(X(X(e,w),h),i.shape),v=()=>{let k=X(X(w,Ce(-1)),b);return o.rank===1&&(k=$e(k,d)),K(k,o.shape)},N=()=>{let k=X(X(L,f),b);return o.rank===1&&(k=$e(k,d)),K(k,o.shape)},O=()=>{const k=X(f,w);let F=X(e,k);return o.rank===1&&(F=$e(F,d)),K(F,o.shape)},E=()=>{let k=e;return o.rank===1&&(k=$e(k,d)),K(k,o.shape)};return{x,mean:v,variance:N,scale:O,offset:E}}};const sP={kernelName:iy,inputsToSave:["x","indices"],gradFunc:(e,t,n)=>{const[s,i]=t,{axis:o}=n,a=qe(o,s.shape)[0],c=()=>{const h=s.shape,d=i.size,m=h.slice(0,a),f=m.length,b=h.slice(o,h.length).slice(1),w=b.length,L=ZA(0,f),x=ZA(f+1,f+1+w),v=QA([m,[d],b]),N=K(e,v),O=K(i,[d]),E=QA([[f],L,x]),k=Ye(N,E);let F=Gb(k,O,s.shape[a]);const U=sh(E);return F=Ye(F,U),F};return{x:c,indices:()=>i}}};function ZA(e,t){const n=[];for(let s=e;s{const[n,s]=t;return{a:()=>et(n),b:()=>et(s)}}};const rP={kernelName:xl,gradFunc:e=>({x:()=>Ae(e,"float32")})};const oP={kernelName:Tl,gradFunc:e=>({x:()=>et(e)})};const aP={kernelName:Al,gradFunc:e=>({x:()=>et(e)})};const cP={kernelName:vl,gradFunc:e=>({x:()=>et(e)})};const lP={kernelName:Cl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,be(n,1))}}};const hP={kernelName:Nl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,Ae(n,"float32"))}}};const uP={kernelName:oy,inputsToSave:[],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{axis:i}=n;return{logits:()=>{const o=!0,a=Is(s);return Re(e,X($e(e,i,o),a))}}}};function dP(e,t,n,s=5,i=1,o=1,a=.5){const c=m=>m.LRNGrad(n,e,t,s,i,o,a),h={x:e,y:t,dy:n},d={depthRadius:s,bias:i,alpha:o,beta:a};return G.runKernelFunc(c,h,null,Gx,d)}const pP=z({localResponseNormalizationBackprop_:dP});const mP={kernelName:ay,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{depthRadius:o,bias:a,alpha:c,beta:h}=n;return{x:()=>pP(s,i,e,o,a,c,h)}}};function ev(e,t,n,s){return t.rank{const i=X(e,Ae(Xs(n,t),e.dtype));return i}}}const tv={kernelName:Rl,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{reductionIndices:i}=s,o=t[0],a=t[1],c=qe(i,o.shape),h=ev(e,a,o,c);return{x:()=>h.x()}}};const fP={kernelName:cy,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=()=>X(e,Ae(Zi(n,s),"float32")),o=()=>X(e,Ae(ph(n,s),"float32"));return{a:i,b:o}}};function gP(e,t,n,s,i,o=[1,1,1],a,c){const h=W(e,"dy","maxPool3dBackprop"),d=W(t,"input","maxPool3dBackprop"),m=W(n,"output","maxPool3dBackprop");let f=h,b=d,w=m,L=!1;d.rank===4&&(L=!0,f=K(h,[1,h.shape[0],h.shape[1],h.shape[2],h.shape[3]]),b=K(d,[1,d.shape[0],d.shape[1],d.shape[2],d.shape[3]]),w=K(m,[1,m.shape[0],m.shape[1],m.shape[2],m.shape[3]])),A(f.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${f.rank}.`),A(b.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${b.rank}.`),A(w.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${w.rank}.`),A(cn(i,o),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${i} and dilations '${o}'`),c!=null&&A(Le(a),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${c} but got pad ${a}.`);const x=E=>{const k=oh(b.shape,s,i,o,a,c);return E.maxPool3dBackprop(f,b,w,k)},v={dy:f,input:b,output:w},N={filterSize:s,strides:i,dilations:o,pad:a,dimRoundingMode:c},O=G.runKernelFunc(x,v,null,Yx,N);return L?K(O,[O.shape[1],O.shape[2],O.shape[3],O.shape[4]]):O}const yP=z({maxPool3dBackprop_:gP});const bP={kernelName:ly,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{filterSize:o,strides:a,dilations:c,pad:h,dimRoundingMode:d}=n,m=c==null?[1,1,1]:c;return{x:()=>yP(e,s,i,o,a,m,h,d)}}};function wP(e,t,n,s,i,o,a){const c=W(e,"dy","maxPoolBackprop"),h=W(t,"input","maxPoolBackprop"),d=W(n,"output","maxPoolBackprop");A(h.rank===c.rank,()=>`Rank of input (${h.rank}) does not match rank of dy (${c.rank})`),A(c.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${c.rank}.`),A(h.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${h.rank}.`),a!=null&&A(Le(o),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const m=w=>{const L=Un(h.shape,s,i,1,o,a);return w.maxPoolBackprop(c,h,d,L)},f={dy:c,input:h,output:d},b={filterSize:s,strides:i,pad:o,dimRoundingMode:a};return G.runKernelFunc(m,f,null,bd,b)}const LP=z({maxPoolBackprop_:wP});const SP={kernelName:Ol,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{filterSize:o,strides:a,pad:c}=n;return{x:()=>LP(e,s,i,o,a,c)}}};const IP={kernelName:uy,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{axis:i}=s,[o,a]=t,c=qe(i,o.shape),h=ev(e,a,o,c);return{x:()=>h.x()}}};const xP={kernelName:dy,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=()=>X(e,Ae(Ur(n,s),"float32")),o=()=>X(e,Ae(xs(n,s),"float32"));return{a:i,b:o}}};const TP={kernelName:El,inputsToSave:["x"],gradFunc:(e,t,n)=>{const s=t[0],{paddings:i}=n,o=i.map(a=>a[0]);return{x:()=>tt(e,o,s.shape)}}};const AP={kernelName:py,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=pn(n.shape,i);return c.length>0?K($e(e,c),n.shape):e},a=()=>{const c=X(e,Ht(Ma(We(n,s)))),h=pn(s.shape,i);return h.length>0?K($e(c,h),s.shape):c};return{a:o,b:a}}};const vP={kernelName:Ta,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=X(e,Ae(s,"float32")),h=pn(n.shape,i);return h.length>0?K($e(c,h),n.shape):c},a=()=>{const c=X(e,Ae(n,"float32")),h=pn(s.shape,i);return h.length>0?K($e(c,h),s.shape):c};return{a:o,b:a}}};const NP={kernelName:my,gradFunc:e=>({x:()=>Ht(e)})};const CP={kernelName:yy,inputsToSave:["indices"],gradFunc:(e,t)=>{const n=t[0];return{indices:()=>dt(n.shape,"float32")}}};const RP={kernelName:gy,gradFunc:e=>({x:()=>et(e)})};const nv={kernelName:Id,inputsToSave:["x"],gradFunc:(e,t,n)=>{const s=t[0],{paddings:i}=n,o=i.map(a=>a[0]);return{x:()=>tt(e,o,s.shape)}}};const OP={kernelName:by,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(e,t)=>{const[n,s,i]=t,o=n,a=s,c=nt(o.shape,a.shape),h=()=>{const m=Ae(a,"float32");let f=X(e,X(m,Zs(o,Re(m,Ce(1)))));const b=pn(o.shape,c);return b.length>0&&(f=$e(f,b)),K(f,o.shape)},d=()=>{const m=xs(o,0),f=Bn(m,cs(o),et(o));let b=X(e,X(i,f));const w=pn(a.shape,c);return w.length>0&&(b=$e(b,w)),K(b,a.shape)};return{a:h,b:d}}};const EP={kernelName:xd,inputsToSave:["x","alpha"],gradFunc:(e,t)=>{const[n,s]=t,i=xs(n,0);return{x:()=>Bn(i,e,X(e,s)),alpha:()=>{let o=Bn(i,et(e),X(e,n));const a=pn(s.shape,e.shape);return a.length>0&&(o=$e(o,a)),K(o,s.shape)}}}};const DP={kernelName:kl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,Ht(At(n)))}}};const kP={kernelName:Wl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=X(Ur(n,6),ja(n));return{x:()=>X(e,Ae(s,"float32"))}}};const FP={kernelName:Fl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,Ae(ja(n),"float32"))}}};const _P={kernelName:_l,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>K(e,n.shape)}}};const WP={kernelName:Ly,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,i=c=>{const{alignCorners:h}=n;return c.resizeBilinearBackprop(e,s,h)},o={images:s},a=()=>G.runKernelFunc(i,o,null,Kx,n);return{images:a}}};const $P={kernelName:wy,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,i=c=>{const{alignCorners:h}=n;return c.resizeNearestNeighborBackprop(e,s,h)},o={images:s},a=()=>G.runKernelFunc(i,o,null,jx,n);return{images:a}}};const UP={kernelName:Sy,gradFunc:(e,t,n)=>{const{dims:s}=n,i=qe(s,e.shape);return{x:()=>Ts(e,i)}}};const BP={kernelName:$l,gradFunc:e=>({x:()=>et(e)})};const MP={kernelName:Ul,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Ht(We(e,X(Zs(n,1.5),2)))}}};const PP={kernelName:Iy,inputsToSave:["condition"],gradFunc:(e,t)=>{const[n]=t;return{condition:()=>Ae(et(n),"float32"),t:()=>X(e,Ae(n,e.dtype)),e:()=>X(e,Ae(mh(n),e.dtype))}}};const zP={kernelName:Bl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=xs(n,Ce(0)),i=Ce(Wp),o=Ce($p),a=X(e,o),c=X(X(e,i),Is(Ae(n,"float32")));return Bn(s,a,c)}}}};const VP={kernelName:zl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,X(n,Re(Ce(1),n)))}}};const GP={kernelName:Pl,gradFunc:e=>({x:()=>et(e)})};const YP={kernelName:Aa,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(hh(Ae(n,"float32")),e)}}};const HP={kernelName:Ml,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(op(Ae(n,"float32")),e)}}};const qP={kernelName:Ad,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{begin:i,size:o}=n,a=s.shape,[c,h]=Kd(s,i,o),d=[];for(let m=0;mvi(e,d)}}};const jP={kernelName:Ay,outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{dim:i}=n,o=!0,a=X(e,s);return{logits:()=>Re(a,X($e(a,[i],o),s))}}};const KP={kernelName:Vl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,Ti(n))}}};const sv={kernelName:vd,gradFunc:(e,t,n)=>{const{blockShape:s,paddings:i}=n;return{x:()=>ch(e,s,i)}}};const iv={kernelName:Ty,gradFunc:(e,t,n)=>{const{axis:s}=n;return{x:()=>Yt(e,s)}}};const XP={kernelName:Gl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,X(Nn(Ae(n,"float32")),2))}}};const JP={kernelName:Nd,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,X(Ae(n,"float32"),2))}}};const ZP={kernelName:va,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=Ce(2),o=()=>X(e,X(i,Re(n,s))),a=()=>X(e,X(i,Re(s,n)));return{a:o,b:a}}};const QP={kernelName:ql,gradFunc:e=>({x:()=>et(e)})};const ez={kernelName:Na,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{let c=e;const h=pn(n.shape,i);return h.length>0&&(c=$e(c,h)),K(c,n.shape)},a=()=>{let c=e;const h=pn(s.shape,i);return h.length>0&&(c=$e(c,h)),K(Ht(c),s.shape)};return{a:o,b:a}}};const tz={kernelName:xy,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,i=s.shape.slice(),{axis:o}=n,a=qe(o,s.shape);a.forEach(d=>{i[d]=1});const c=K(e,i),h=X(c,Js(s.shape,"float32"));return{x:()=>h}}};const nz={kernelName:Ca,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,At(hh(n)))}}};const sz={kernelName:Yl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Re(Ce(1),At(n)),e)}}};const iz={kernelName:vy,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{reps:i}=n,o=()=>{let a=et(s);if(s.rank===1)for(let c=0;c{const s=n,{perm:i}=s,o=sh(i);return{x:()=>Ye(e,o)}}};const oz={kernelName:Ny,gradFunc:(e,t,n)=>{const s=n,{axis:i}=s;return{value:()=>es(e,i)}}};const az={kernelName:Cy,inputsToSave:["segmentIds"],gradFunc:(e,t)=>{const[n]=t,s=()=>cz(e,n);return{x:s}}};function cz(e,t){const n=$s(t,et(t)),s=Pa(e,n);let i=Zi(t,Ce(0,"int32"));const o=s.rank-i.rank;for(let c=0;c({x:()=>et(e)})};const hz=[mM,fM,gM,yM,bM,wM,LM,SM,IM,xM,TM,AM,CM,EM,DM,kM,FM,_M,WM,$M,UM,MM,BM,VM,GM,YM,HM,qM,jM,KM,XM,JM,ZM,QM,tP,eP,nP,sP,iP,rP,oP,aP,cP,lP,hP,uP,mP,tv,tv,fP,bP,SP,IP,xP,TP,AP,vP,NP,CP,RP,nv,nv,OP,EP,DP,kP,FP,_P,WP,$P,UP,BP,MP,PP,zP,VP,GP,YP,HP,qP,jP,KP,sv,sv,iv,iv,XP,ZP,JP,QP,ez,tz,nz,sz,iz,rz,oz,az,lz];for(const e of hz)eT(e);ee.prototype.abs=function(){return this.throwIfDisposed(),dn(this)};ee.prototype.acos=function(){return this.throwIfDisposed(),ob(this)};ee.prototype.acosh=function(){return this.throwIfDisposed(),ab(this)};ee.prototype.addStrict=function(e){return this.throwIfDisposed(),SA(this,e)};ee.prototype.add=function(e){return this.throwIfDisposed(),be(this,e)};ee.prototype.all=function(e,t){return this.throwIfDisposed(),Qd(this,e,t)};ee.prototype.any=function(e,t){return this.throwIfDisposed(),ih(this,e,t)};ee.prototype.argMax=function(e){return this.throwIfDisposed(),rh(this,e)};ee.prototype.argMin=function(e){return this.throwIfDisposed(),lb(this,e)};ee.prototype.asScalar=function(){return this.throwIfDisposed(),A(this.size===1,()=>"The array must have only 1 element."),K(this,[])};ee.prototype.asType=function(e){return this.throwIfDisposed(),Ae(this,e)};ee.prototype.as1D=function(){return this.throwIfDisposed(),K(this,[this.size])};ee.prototype.as2D=function(e,t){return this.throwIfDisposed(),K(this,[e,t])};ee.prototype.as3D=function(e,t,n){return this.throwIfDisposed(),K(this,[e,t,n])};ee.prototype.as4D=function(e,t,n,s){return this.throwIfDisposed(),K(this,[e,t,n,s])};ee.prototype.as5D=function(e,t,n,s,i){return this.throwIfDisposed(),K(this,[e,t,n,s,i])};ee.prototype.asin=function(){return this.throwIfDisposed(),hb(this)};ee.prototype.asinh=function(){return this.throwIfDisposed(),ub(this)};ee.prototype.atan=function(){return this.throwIfDisposed(),db(this)};ee.prototype.atan2=function(e){return this.throwIfDisposed(),pb(this,e)};ee.prototype.atanh=function(){return this.throwIfDisposed(),mb(this)};ee.prototype.avgPool=function(e,t,n,s){return this.throwIfDisposed(),ah(this,e,t,n,s)};ee.prototype.batchToSpaceND=function(e,t){return this.throwIfDisposed(),ch(this,e,t)};ee.prototype.batchNorm=function(e,t,n,s,i){return this.throwIfDisposed(),No(this,e,t,n,s,i)};ee.prototype.broadcastTo=function(e){return this.throwIfDisposed(),lh(this,e)};ee.prototype.cast=function(e){return this.throwIfDisposed(),Ae(this,e)};ee.prototype.ceil=function(){return this.throwIfDisposed(),bb(this)};ee.prototype.clipByValue=function(e,t){return this.throwIfDisposed(),Jn(this,e,t)};ee.prototype.concat=function(e,t){return this.throwIfDisposed(),e instanceof ee&&(e=[e]),Yt([this,...e],t)};ee.prototype.conv1d=function(e,t,n,s,i,o){return this.throwIfDisposed(),ip(this,e,t,n,s,i,o)};ee.prototype.conv2dTranspose=function(e,t,n,s,i){return this.throwIfDisposed(),rp(this,e,t,n,s,i)};ee.prototype.conv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Ji(this,e,t,n,s,i,o)};ee.prototype.cos=function(){return this.throwIfDisposed(),hh(this)};ee.prototype.cosh=function(){return this.throwIfDisposed(),op(this)};ee.prototype.cumsum=function(e,t,n){return this.throwIfDisposed(),ap(this,e,t,n)};ee.prototype.depthToSpace=function(e,t){return this.throwIfDisposed(),Sb(this,e,t)};ee.prototype.depthwiseConv2D=function(e,t,n,s,i,o){return un("depthwiseConv2D is deprecated, use depthwiseConv2d instead"),this.throwIfDisposed(),Co(this,e,t,n,s,i,o)};ee.prototype.depthwiseConv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Co(this,e,t,n,s,i,o)};ee.prototype.dilation2d=function(e,t,n,s,i){return this.throwIfDisposed(),Ib(this,e,t,n,s,i)};ee.prototype.divNoNan=function(e){return this.throwIfDisposed(),xb(this,e)};ee.prototype.divStrict=function(e){return this.throwIfDisposed(),IA(this,e)};ee.prototype.div=function(e){return this.throwIfDisposed(),We(this,e)};ee.prototype.dot=function(e){return this.throwIfDisposed(),tA(this,e)};ee.prototype.elu=function(){return this.throwIfDisposed(),Ua(this)};ee.prototype.equalStrict=function(e){return this.throwIfDisposed(),fA(this,e)};ee.prototype.equal=function(e){return this.throwIfDisposed(),Xs(this,e)};ee.prototype.erf=function(){return this.throwIfDisposed(),Tb(this)};ee.prototype.exp=function(){return this.throwIfDisposed(),Is(this)};ee.prototype.expandDims=function(e){return this.throwIfDisposed(),Zn(this,e)};ee.prototype.expm1=function(){return this.throwIfDisposed(),Ab(this)};ee.prototype.fft=function(){return this.throwIfDisposed(),Lh(this)};ee.prototype.flatten=function(){return this.throwIfDisposed(),K(this,[this.size])};ee.prototype.floor=function(){return this.throwIfDisposed(),Ma(this)};ee.prototype.floorDiv=function(e){return this.throwIfDisposed(),Zd(this,e)};ee.prototype.gather=function(e,t){return this.throwIfDisposed(),Pa(this,e,t)};ee.prototype.greaterEqualStrict=function(e){return this.throwIfDisposed(),gA(this,e)};ee.prototype.greaterEqual=function(e){return this.throwIfDisposed(),Zi(this,e)};ee.prototype.greaterStrict=function(e){return this.throwIfDisposed(),yA(this,e)};ee.prototype.greater=function(e){return this.throwIfDisposed(),xs(this,e)};ee.prototype.ifft=function(){return this.throwIfDisposed(),qa(this)};ee.prototype.irfft=function(){return this.throwIfDisposed(),xp(this)};ee.prototype.isFinite=function(){return this.throwIfDisposed(),sA(this)};ee.prototype.isInf=function(){return this.throwIfDisposed(),iA(this)};ee.prototype.isNaN=function(){return this.throwIfDisposed(),rA(this)};ee.prototype.leakyRelu=function(e){return this.throwIfDisposed(),lp(this,e)};ee.prototype.lessEqualStrict=function(e){return this.throwIfDisposed(),bA(this,e)};ee.prototype.lessEqual=function(e){return this.throwIfDisposed(),Ur(this,e)};ee.prototype.lessStrict=function(e){return this.throwIfDisposed(),wA(this,e)};ee.prototype.less=function(e){return this.throwIfDisposed(),ph(this,e)};ee.prototype.localResponseNormalization=function(e,t,n,s){return this.throwIfDisposed(),Nb(this,e,t,n,s)};ee.prototype.logSigmoid=function(){return this.throwIfDisposed(),aA(this)};ee.prototype.logSoftmax=function(e){return this.throwIfDisposed(),dp(this,e)};ee.prototype.logSumExp=function(e,t){return this.throwIfDisposed(),Rb(this,e,t)};ee.prototype.log=function(){return this.throwIfDisposed(),cs(this)};ee.prototype.log1p=function(){return this.throwIfDisposed(),hp(this)};ee.prototype.logicalAnd=function(e){return this.throwIfDisposed(),Us(this,e)};ee.prototype.logicalNot=function(){return this.throwIfDisposed(),mh(this)};ee.prototype.logicalOr=function(e){return this.throwIfDisposed(),pp(this,e)};ee.prototype.logicalXor=function(e){return this.throwIfDisposed(),cA(this,e)};ee.prototype.matMul=function(e,t,n){return this.throwIfDisposed(),ct(this,e,t,n)};ee.prototype.maxPool=function(e,t,n,s){return this.throwIfDisposed(),fh(this,e,t,n,s)};ee.prototype.max=function(e,t){return this.throwIfDisposed(),Qn(this,e,t)};ee.prototype.maximumStrict=function(e){return this.throwIfDisposed(),xA(this,e)};ee.prototype.maximum=function(e){return this.throwIfDisposed(),$s(this,e)};ee.prototype.mean=function(e,t){return this.throwIfDisposed(),qt(this,e,t)};ee.prototype.min=function(e,t){return this.throwIfDisposed(),Va(this,e,t)};ee.prototype.minimumStrict=function(e){return this.throwIfDisposed(),TA(this,e)};ee.prototype.minimum=function(e){return this.throwIfDisposed(),Oo(this,e)};ee.prototype.mirrorPad=function(e,t){return this.throwIfDisposed(),Eb(this,e,t)};ee.prototype.modStrict=function(e){return this.throwIfDisposed(),AA(this,e)};ee.prototype.mod=function(e){return this.throwIfDisposed(),mp(this,e)};ee.prototype.mulStrict=function(e){return this.throwIfDisposed(),vA(this,e)};ee.prototype.mul=function(e){return this.throwIfDisposed(),X(this,e)};ee.prototype.neg=function(){return this.throwIfDisposed(),Ht(this)};ee.prototype.norm=function(e,t,n){return this.throwIfDisposed(),vp(this,e,t,n)};ee.prototype.notEqualStrict=function(e){return this.throwIfDisposed(),LA(this,e)};ee.prototype.notEqual=function(e){return this.throwIfDisposed(),Br(this,e)};ee.prototype.oneHot=function(e,t=1,n=0){return this.throwIfDisposed(),To(this,e,t,n)};ee.prototype.onesLike=function(){return this.throwIfDisposed(),Fn(this)};ee.prototype.pad=function(e,t){return this.throwIfDisposed(),vi(this,e,t)};ee.prototype.pool=function(e,t,n,s,i){return this.throwIfDisposed(),uA(this,e,t,n,s,i)};ee.prototype.powStrict=function(e){return this.throwIfDisposed(),NA(this,e)};ee.prototype.pow=function(e){return this.throwIfDisposed(),Zs(this,e)};ee.prototype.prelu=function(e){return this.throwIfDisposed(),yh(this,e)};ee.prototype.prod=function(e,t){return this.throwIfDisposed(),gp(this,e,t)};ee.prototype.reciprocal=function(){return this.throwIfDisposed(),_b(this)};ee.prototype.relu=function(){return this.throwIfDisposed(),Ni(this)};ee.prototype.relu6=function(){return this.throwIfDisposed(),Wb(this)};ee.prototype.reshapeAs=function(e){return this.throwIfDisposed(),K(this,e.shape)};ee.prototype.reshape=function(e){return this.throwIfDisposed(),K(this,e)};ee.prototype.resizeBilinear=function(e,t){return this.throwIfDisposed(),PA(this,e,t)};ee.prototype.resizeNearestNeighbor=function(e,t){return this.throwIfDisposed(),zA(this,e,t)};ee.prototype.reverse=function(e){return this.throwIfDisposed(),Ts(this,e)};ee.prototype.rfft=function(){return this.throwIfDisposed(),Sh(this)};ee.prototype.round=function(){return this.throwIfDisposed(),$b(this)};ee.prototype.rsqrt=function(){return this.throwIfDisposed(),yp(this)};ee.prototype.selu=function(){return this.throwIfDisposed(),bp(this)};ee.prototype.separableConv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Ub(this,e,t,n,s,i,o)};ee.prototype.sigmoid=function(){return this.throwIfDisposed(),Ti(this)};ee.prototype.sign=function(){return this.throwIfDisposed(),Bb(this)};ee.prototype.sin=function(){return this.throwIfDisposed(),wp(this)};ee.prototype.sinh=function(){return this.throwIfDisposed(),Lp(this)};ee.prototype.slice=function(e,t){return this.throwIfDisposed(),tt(this,e,t)};ee.prototype.softmax=function(e){return this.throwIfDisposed(),Fo(this,e)};ee.prototype.softplus=function(){return this.throwIfDisposed(),za(this)};ee.prototype.spaceToBatchND=function(e,t){return this.throwIfDisposed(),gh(this,e,t)};ee.prototype.split=function(e,t){return this.throwIfDisposed(),hs(this,e,t)};ee.prototype.sqrt=function(){return this.throwIfDisposed(),Nn(this)};ee.prototype.square=function(){return this.throwIfDisposed(),At(this)};ee.prototype.squaredDifference=function(e){return this.throwIfDisposed(),Ih(this,e)};ee.prototype.squaredDifferenceStrict=function(e){return this.throwIfDisposed(),CA(this,e)};ee.prototype.squeeze=function(e){return this.throwIfDisposed(),Mr(this,e)};ee.prototype.stack=function(e,t){this.throwIfDisposed();const n=e instanceof ee?[this,e]:[this,...e];return es(n,t)};ee.prototype.step=function(e){return this.throwIfDisposed(),ja(this,e)};ee.prototype.stridedSlice=function(e,t,n,s,i,o,a,c){return this.throwIfDisposed(),Pb(this,e,t,n,s,i,o,a,c)};ee.prototype.subStrict=function(e){return this.throwIfDisposed(),RA(this,e)};ee.prototype.sub=function(e){return this.throwIfDisposed(),Re(this,e)};ee.prototype.sum=function(e,t){return this.throwIfDisposed(),$e(this,e,t)};ee.prototype.tan=function(){return this.throwIfDisposed(),zb(this)};ee.prototype.tanh=function(){return this.throwIfDisposed(),$a(this)};ee.prototype.tile=function(e){return this.throwIfDisposed(),$r(this,e)};ee.prototype.toBool=function(){return this.throwIfDisposed(),Ae(this,"bool")};ee.prototype.toFloat=function(){return this.throwIfDisposed(),Ae(this,"float32")};ee.prototype.toInt=function(){return this.throwIfDisposed(),Ae(this,"int32")};ee.prototype.topk=function(e,t){return this.throwIfDisposed(),Vb(this,e,t)};ee.prototype.transpose=function(e){return this.throwIfDisposed(),Ye(this,e)};ee.prototype.unique=function(e){return this.throwIfDisposed(),Tp(this,e)};ee.prototype.unsortedSegmentSum=function(e,t){return this.throwIfDisposed(),Gb(this,e,t)};ee.prototype.unstack=function(e){return this.throwIfDisposed(),Qs(this,e)};ee.prototype.where=function(e,t){return this.throwIfDisposed(),Bn(e,this,t)};ee.prototype.zerosLike=function(){return this.throwIfDisposed(),et(this)};let Up;function mn(){return Up==null&&(Up=GT().epsilon()),Up}function Cee(e){Up=e}function ei(){return"channelsLast"}class nr extends Error{constructor(e){super(e);Object.setPrototypeOf(this,nr.prototype)}}class ti extends Error{constructor(e){super(e);Object.setPrototypeOf(this,ti.prototype)}}class q extends Error{constructor(e){super(e);Object.setPrototypeOf(this,q.prototype)}}class Pe extends Error{constructor(e){super(e);Object.setPrototypeOf(this,Pe.prototype)}}class rv extends Error{constructor(e){super(e);Object.setPrototypeOf(this,rv.prototype)}}class uz extends Error{constructor(e){super(e);Object.setPrototypeOf(this,uz.prototype)}}function $o(e,t){if(Array.isArray(e)){let n=[];for(let s=0;sn.toUpperCase())}let Bs={};function dw(e){if(e==null)return null;const t={};return t.className=e.getClassName(),t.config=e.getConfig(),t}function pw(e){if(e==null||typeof e!="object")return;if(Array.isArray(e))e.forEach(t=>pw(t));else{const t=Object.keys(e);for(const n of t){const s=e[n];s!=null&&typeof s=="object"&&(!Array.isArray(s)&&s.type==="ndarray"&&typeof s.value=="number"?e[n]=s.value:pw(s))}}}function kh(e,t={},n={},s="object",i=!1){if(typeof e=="string"){const o=e;let a;if(o in n)a=n[o];else if(o in Bs)a=Bs[o];else if(a=t[o],a==null)throw new q(`Unknown ${s}: ${e}. This may be due to one of the following reasons: 1. The ${s} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code. -2. The custom ${s} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);return a}else{const o=e;if(o.className==null||o.config==null)throw new j(`${s}: Improper config format: ${JSON.stringify(o)}. -'className' and 'config' must set.`);const a=o.className;let c,h;if(a in n?[c,h]=n[a]:a in $s?[c,h]=$s.className:a in t&&([c,h]=t[a]),c==null)throw new j(`Unknown ${s}: ${a}. This may be due to one of the following reasons: +2. The custom ${s} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);return a}else{const o=e;if(o.className==null||o.config==null)throw new q(`${s}: Improper config format: ${JSON.stringify(o)}. +'className' and 'config' must set.`);const a=o.className;let c,h;if(a in n?[c,h]=n[a]:a in Bs?[c,h]=Bs.className:a in t&&([c,h]=t[a]),c==null)throw new q(`Unknown ${s}: ${a}. This may be due to one of the following reasons: 1. The ${s} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code. -2. The custom ${s} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);if(h!=null){const d={};for(const w of Object.keys($s))d[w]=$s[w];for(const w of Object.keys(n))d[w]=n[w];const m=o.config;m.customObjects=d;const y=Object.assign({},$s);for(const w of Object.keys(n))$s[w]=n[w];Vb(o.config);const b=h(c,o.config,n,i);return $s=Object.assign({},y),b}else{const d=Object.assign({},$s);for(const y of Object.keys(n))$s[y]=n[y];const m=new c(o.config);return $s=Object.assign({},d),m}}}function OP(e,t){return et?1:0}function cp(e,t){return-1*OP(e,t)}function YZ(e){switch(e){case"float32":return"float32";default:throw new j(`Invalid dtype: ${e}`)}}function HZ(e,t){if(e==null||t==null)return e===t;if(e.length!==t.length)return!1;for(let n=0;n=0),xs(s>=n),Array.isArray(e)&&e.length>=n&&e.length<=s&&e.every(i=>typeof i===t)}function pn(e,t){Array.isArray(e)?(k(e.length>0,()=>`${t} is unexpectedly an empty array.`),e.forEach((n,s)=>pn(n,`element ${s+1} of ${t}`))):k(Number.isInteger(e)&&e>0,()=>`Expected ${t} to be a positive integer, but got ${GA(e)}.`)}function GA(e){return e===null?"null":Array.isArray(e)?"["+e.map(t=>GA(t)).join(",")+"]":typeof e=="string"?`"${e}"`:`${e}`}function DP(e,t){let n=Vn(),s;const i=(...o)=>{const a=Vn();return a-n0,"arrayOfValues is empty");for(const t of e)xs(Array.isArray(t),"one of the values is not an array"),xs(t.length>0,"one of the values is empty");return e.reduce((t,n)=>t.length===0?n.map(s=>[s]):n.map(s=>t.map(i=>[...i,s])).reduce((s,i)=>s.concat(i),[]),[])}function Hb(e,t){return ee(()=>Ln(Ue(X(e,e),t,!0)))}class wh extends go{getConfig(){return{}}}class qb extends wh{constructor(e){super();this.defaultMaxValue=2,this.defaultAxis=0,this.maxValue=e.maxValue!=null?e.maxValue:this.defaultMaxValue,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return ee(()=>{const t=Hb(e,this.axis),n=Yn(t,0,this.maxValue);return X(e,_e(n,be(an(),t)))})}getConfig(){return{maxValue:this.maxValue,axis:this.axis}}}qb.className="MaxNorm",me(qb);class jb extends wh{constructor(e){super();this.defaultAxis=0,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return ee(()=>_e(e,be(an(),Hb(e,this.axis))))}getConfig(){return{axis:this.axis}}}jb.className="UnitNorm",me(jb);class Kb extends wh{apply(e){return Ri(e)}}Kb.className="NonNeg",me(Kb);class Xb extends wh{constructor(e){super();this.defaultMinValue=0,this.defaultMaxValue=1,this.defaultRate=1,this.defaultAxis=0,this.minValue=e.minValue!=null?e.minValue:this.defaultMinValue,this.maxValue=e.maxValue!=null?e.maxValue:this.defaultMaxValue,this.rate=e.rate!=null?e.rate:this.defaultRate,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return ee(()=>{const t=Hb(e,this.axis),n=be(X(this.rate,Yn(t,this.minValue,this.maxValue)),X(1-this.rate,t));return X(e,_e(n,be(an(),t)))})}getConfig(){return{minValue:this.minValue,maxValue:this.maxValue,rate:this.rate,axis:this.axis}}}Xb.className="MinMaxNorm",me(Xb);const YA={maxNorm:"MaxNorm",minMaxNorm:"MinMaxNorm",nonNeg:"NonNeg",unitNorm:"UnitNorm"};function cn(e){return Gb(e)}function HA(e,t={}){return bh(e,ks.getMap().classNameMap,t,"constraint")}function ln(e){if(e==null)return null;if(typeof e=="string"){const t=e in YA?YA[e]:e,n={className:t,config:{}};return HA(n)}else return e instanceof wh?e:HA(e)}function kP(e){return new qb(e)}function FP(e){return new jb(e)}function _P(){return new Kb}function WP(e){return new Xb(e)}var $P=Object.freeze({__proto__:null,maxNorm:kP,unitNorm:FP,nonNeg:_P,minMaxNorm:WP});const UP=["channelsFirst","channelsLast"],BP=["valid","same","causal"],MP=["max","avg"],PP=["sum","mul","concat","ave"],jZ=["temporal"];const Ga=new Map;function Gt(e){za(UP,"DataFormat",e)}function Ts(e){za(BP,"PaddingMode",e)}function qA(e){za(MP,"PoolMode",e)}const Lh=[],jA="/";function Do(e,t){Lh.push(e);try{const n=t();return Lh.pop(),n}catch(n){throw Lh.pop(),n}}function zP(){return Lh.length===0?"":Lh.join(jA)+jA}function KA(e){if(!JA(e))throw new Error("Not a valid tensor name: '"+e+"'");return zP()+e}function XA(e){if(!JA(e))throw new Error("Not a valid tensor name: '"+e+"'");Ga.has(e)||Ga.set(e,0);const t=Ga.get(e);if(Ga.set(e,Ga.get(e)+1),t>0){const n=`${e}_${t}`;return Ga.set(n,1),n}else return e}const GP=new RegExp(/^[A-Za-z0-9][-A-Za-z0-9\._\/]*$/);function JA(e){return!!e.match(GP)}function VP(e){return e===parseInt(e.toString(),10)}function Ur(e,t,n){t==null&&(t=0),n==null&&(n=e.length);let s=1;for(let i=t;ii-o),n=Math.floor((t.length-1)/2),s=Math.ceil((t.length-1)/2);return n===s?t[n]:(t[n]+t[s])/2}function si(e,t){if(t0?t.reduce((n,s)=>n*s):1}function Sh(e,t){return e.asType(t)}function Ih(e,t=-1){const n=e.shape.slice();return t<0&&(t=n.length+t+1),n.splice(t,0,1),e.reshape(n)}function qP(e,t){return ee(()=>{if(e.shape.length!==2)throw new j(`repeat() expects a rank-2 tensor, but received a rank-${e.shape.length} tensor.`);const n=Ih(e,1);return Qb(n,[1,t,1])})}function jP(e){const t=[Ur(e.shape)];return e.reshape(t)}function KP(e){if(e.rank<=1)throw new j(`batchFlatten requires a minimum rank of 2. Got rank: ${e.rank}.`);const t=[e.shape[0],Ur(e.shape,1)];return e.reshape(t)}function ko(e,t,n){return ee(()=>{switch(e.rank){case 1:return Vd(e,t,n);case 2:return yb(e,[t,0],[n,e.shape[1]]);case 3:return Yd(e,[t,0,0],[n,e.shape[1],e.shape[2]]);case 4:return ih(e,[t,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3]]);case 5:return nt(e,[t,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4]]);case 6:return nt(e,[t,0,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4],e.shape[5]]);default:throw new j(`sliceAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}})}function Jb(e,t,n){return ee(()=>{switch(e.rank){case 1:return Vd(e,t,n);case 2:return yb(e,[0,t],[e.shape[0],n]);case 3:return Yd(e,[0,0,t],[e.shape[0],e.shape[1],n]);case 4:return ih(e,[0,0,0,t],[e.shape[0],e.shape[1],e.shape[2],n]);default:throw new j(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}})}function hp(e,t,n,s){return ee(()=>{switch(e.rank){case 1:return Vd(e,t,n);case 2:switch(s){case 1:return ko(e,t,n);case 2:return Jb(e,t,n);default:throw new j(`The axis is not within the rank of the tensor ${s}`)}case 3:switch(s){case 1:return ko(e,t,n);case 2:return Yd(e,[0,t,0],[e.shape[0],n,e.shape[2]]);case 3:return Jb(e,t,n);default:throw new j(`The axis is not within the rank of the tensor ${s}`)}case 4:switch(s){case 1:return ko(e,t,n);case 2:return ih(e,[0,t,0,0],[e.shape[0],n,e.shape[2],e.shape[3]]);case 3:return ih(e,[0,0,t,0],[e.shape[0],e.shape[1],n,e.shape[3]]);case 4:return Jb(e,t,n);default:throw new j(`The axis is not within the rank of the tensor ${s}`)}default:throw new j(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}})}function Zb(e,t=-1){let n;return t<0&&(n=e[0].rank,n!==0?t=n:t=0),t===e[0].rank&&(t=-1),Mt(e,t)}function QA(e,t){switch(e.rank){case 1:return ET([e,t]);case 2:return DT([e,t],0);case 3:return kT([e,t],0);case 4:return FT([e,t],0);default:throw new j(`concatAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}}function Qb(e,t){if(Array.isArray(t)||(t=[t]),e.rank!==t.length)throw new j(`The length of input n (${t.length}) does not match the number of dimensions in input x (${e.rank})`);return Er(e,t)}function up(e,t=0,n=1,s,i){return ub(e,t,n,s,i)}function Ei(e,t,n,s){if(e.rank<2||t.rank<2)throw new ze(`dot requires both inputs to be rank >= 2 but got x shape = ${e.shape} and y shape = ${t.shape}`);if(t.rank>=3){const i=e.shape.slice(-1)[0],o=t.shape.slice(-2)[0];if(i!==o)throw new ze(`If rank y >= 3, then the second last dim of y must equal the last dim of x but got x shape = ${e.shape} and y shape = ${t.shape}`)}if(e.rank===2&&t.rank===2){const i=!1,o=!1;return ep({a:e,b:t,transposeA:i,transposeB:o,bias:s?ew(e.rank,s,ti()):null,activation:n})}else{const i=e.shape.slice(),o=i.pop();e=e.reshape([-1,o]);const a=t.shape.slice(),c=a.pop(),h=a.pop(),d=[...a,c],m=Array.from({length:t.rank},(L,T)=>T===0?t.rank-2:T<=t.rank-2?T-1:T);t=t.transpose(m).reshape([h,-1]);const y=[...i,...d],b=!1,w=!1;return ep({a:e,b:t,transposeA:b,transposeB:w,bias:s?ew(e.rank,s,ti()):null,activation:n}).reshape(y)}}function tQ(e){return ee(()=>{const t=Qe(e),n=On(e);return _n(Zs(e,t),t,_n(Ss(e,Qe(e)),n,X(-1,n)))})}function nQ(e,t){return ee(()=>{if(e.rank!==1)throw new Error("Only 1D one-hot tensors are supported in the deeplearn backend, at present.");return e=e.toInt(),fo(e,t).toFloat()})}function ev(e,t,n){return ee(()=>(Array.isArray(t)?t=ns(t,"int32"):t=t.toInt(),Oa(e,t,n)))}function xh(e){return X(e,e)}function sQ(e,t){return ee(()=>{if(typeof t=="number"&&(t=Ne(Math.round(t),"int32")),t.dtype!=="int32")throw new ze(`Non-int32 dtype (${t.dtype}) is not supported by pow() yet`);return ei(e,t)})}function ew(e,t,n){const s=t.shape;if(t.rank!==1&&t.rank!==e)throw new j(`Unexpected bias dimensions: ${t.rank}; expected it to be 1 or ${e}`);if(e===5){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1,1,1]):t.reshape([1,s[3],s[0],s[1],s[2]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,1,1,s[0]]):t.reshape([1].concat(s))}else if(e===4){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1,1]):t.reshape([1,s[2],s[0],s[1]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,1,s[0]]):t.reshape([1].concat(s))}else if(e===3){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1]):t.reshape([1,s[1],s[0]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,s[0]]):t.reshape([1].concat(s))}else if(e<3)return t;throw new j(`Unsupported input rank by biasAdd: ${t.rank}`)}function Di(e,t,n){return ee(()=>(n==null&&(n=ti()),Gt(n),e.add(ew(e.rank,t,n))))}function XP(e,t=1){if(t!==1)throw new ze(`Support for alpha values other than 1 (${t}) is not implemented yet.`);return So(e)}function JP(e){return ee(()=>_e(e,sn(e).add(1)))}function tv(e,t,n,s){return ee(()=>mA(e,t,n,s))}function ZP(e){return ee(()=>{const t=be(.5,X(.2,e));return Yn(t,0,1)})}function Th(e,t,n=!1){return n?e():t()}const QP=["fanIn","fanOut","fanAvg"],ez=["normal","uniform","truncatedNormal"],iQ=["Zeros","Ones","Constant","RandomNormal","RandomUniform","TruncatedNormal","VarianceScaling","Orthogonal","Identity"];function tz(e){za(QP,"FanMode",e)}function nz(e){za(ez,"Distribution",e)}class Us extends go{fromConfigUsesCustomObjects(){return!1}getConfig(){return{}}}class tw extends Us{apply(e,t){return ct(e,t)}}tw.className="Zeros",me(tw);class dp extends Us{apply(e,t){return Qs(e,t)}}dp.className="Ones",me(dp);class nw extends Us{constructor(e){super();if(typeof e!="object")throw new j(`Expected argument of type ConstantConfig but got ${e}`);if(e.value===void 0)throw new j(`config must have value set but got ${e}`);this.value=e.value}apply(e,t){return ee(()=>X(Ne(this.value),Qs(e,t)))}getConfig(){return{value:this.value}}}nw.className="Constant",me(nw);class sw extends Us{constructor(e){super();this.DEFAULT_MINVAL=-.05,this.DEFAULT_MAXVAL=.05,this.minval=e.minval||this.DEFAULT_MINVAL,this.maxval=e.maxval||this.DEFAULT_MAXVAL,this.seed=e.seed}apply(e,t){return vo(e,this.minval,this.maxval,t)}getConfig(){return{minval:this.minval,maxval:this.maxval,seed:this.seed}}}sw.className="RandomUniform",me(sw);class iw extends Us{constructor(e){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if(t=t||"float32",t!=="float32"&&t!=="int32")throw new ze(`randomNormal does not support dType ${t}.`);return up(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}iw.className="RandomNormal",me(iw);class rw extends Us{constructor(e){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if(t=t||"float32",t!=="float32"&&t!=="int32")throw new ze(`truncatedNormal does not support dType ${t}.`);return ch(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}rw.className="TruncatedNormal",me(rw);class ow extends Us{constructor(e){super();this.gain=e.gain!=null?e.gain:1}apply(e,t){return ee(()=>{if(e.length!==2||e[0]!==e[1])throw new j("Identity matrix initializer can only be used for 2D square matrices.");return X(this.gain,Ed(e[0]))})}getConfig(){return{gain:this.gain}}}ow.className="Identity",me(ow);function sz(e,t="channelsLast"){let n,s;if(Gt(t),e.length===2)n=e[0],s=e[1];else if([3,4,5].indexOf(e.length)!==-1){if(t==="channelsFirst"){const i=Ur(e,2);n=e[1]*i,s=e[0]*i}else if(t==="channelsLast"){const i=Ur(e,0,e.length-2);n=e[e.length-2]*i,s=e[e.length-1]*i}}else{const i=Ur(e);n=Math.sqrt(i),s=Math.sqrt(i)}return[n,s]}class Kn extends Us{constructor(e){super();if(e.scale<0)throw new j(`scale must be a positive float. Got: ${e.scale}`);this.scale=e.scale==null?1:e.scale,this.mode=e.mode==null?"fanIn":e.mode,tz(this.mode),this.distribution=e.distribution==null?"normal":e.distribution,nz(this.distribution),this.seed=e.seed}apply(e,t){const n=sz(e),s=n[0],i=n[1];let o=this.scale;if(this.mode==="fanIn"?o/=Math.max(1,s):this.mode==="fanOut"?o/=Math.max(1,i):o/=Math.max(1,(s+i)/2),this.distribution==="normal"){const a=Math.sqrt(o);if(t=t||"float32",t!=="float32"&&t!=="int32")throw new ze(`${this.getClassName()} does not support dType ${t}.`);return ch(e,0,a,t,this.seed)}else{const a=Math.sqrt(3*o);return vo(e,-a,a,t)}}getConfig(){return{scale:this.scale,mode:this.mode,distribution:this.distribution,seed:this.seed}}}Kn.className="VarianceScaling",me(Kn);class pp extends Kn{constructor(e){super({scale:1,mode:"fanAvg",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return Kn.className}}pp.className="GlorotUniform",me(pp);class mp extends Kn{constructor(e){super({scale:1,mode:"fanAvg",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return Kn.className}}mp.className="GlorotNormal",me(mp);class fp extends Kn{constructor(e){super({scale:2,mode:"fanIn",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return Kn.className}}fp.className="HeNormal",me(fp);class gp extends Kn{constructor(e){super({scale:2,mode:"fanIn",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return Kn.className}}gp.className="HeUniform",me(gp);class yp extends Kn{constructor(e){super({scale:1,mode:"fanIn",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return Kn.className}}yp.className="LeCunNormal",me(yp);class bp extends Kn{constructor(e){super({scale:1,mode:"fanIn",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return Kn.className}}bp.className="LeCunNormal",me(bp);class aw extends Us{constructor(e){super();if(this.DEFAULT_GAIN=1,this.gain=e.gain==null?this.DEFAULT_GAIN:e.gain,this.seed=e.seed,this.seed!=null)throw new ze("Random seed is not implemented for Orthogonal Initializer yet.")}apply(e,t){return ee(()=>{if(e.length<2)throw new ze("Shape must be at least 2D.");e[0]*e[1]>2e3&&console.warn(`Orthogonal initializer is being called on a matrix with more than 2000 (${e[0]*e[1]}) elements: Slowness may result.`);const n=e[0]>e[1]?[e[1],e[0]]:e,s=up(n,0,1,"float32");let i=AA.gramSchmidt(s);return e[0]>e[1]&&(i=i.transpose()),X(this.gain,i)})}getConfig(){return{gain:this.gain,seed:this.seed}}}aw.className="Orthogonal",me(aw);const nv={constant:"Constant",glorotNormal:"GlorotNormal",glorotUniform:"GlorotUniform",heNormal:"HeNormal",heUniform:"HeUniform",identity:"Identity",leCunNormal:"LeCunNormal",leCunUniform:"LeCunUniform",ones:"Ones",orthogonal:"Orthogonal",randomNormal:"RandomNormal",randomUniform:"RandomUniform",truncatedNormal:"TruncatedNormal",varianceScaling:"VarianceScaling",zeros:"Zeros"};function sv(e,t={}){return bh(e,ks.getMap().classNameMap,t,"initializer")}function Vt(e){return Gb(e)}function Ft(e){if(typeof e=="string"){const t=e in nv?nv[e]:e;if(t==="GlorotNormal")return new mp;if(t==="GlorotUniform")return new pp;if(t==="HeNormal")return new fp;if(t==="HeUniform")return new gp;if(t==="LeCunNormal")return new yp;if(t==="LeCunUniform")return new bp;{const n={};return n.className=t,n.config={},sv(n)}}else return e instanceof Us?e:sv(e)}function iz(){return new tw}function rz(){return new dp}function oz(e){return new nw(e)}function az(e){return new sw(e)}function cz(e){return new iw(e)}function lz(e){return new rw(e)}function hz(e){return new ow(e)}function uz(e){return new Kn(e)}function dz(e){return new pp(e)}function pz(e){return new mp(e)}function mz(e){return new fp(e)}function fz(e){return new gp(e)}function gz(e){return new yp(e)}function yz(e){return new bp(e)}function bz(e){return new aw(e)}var wz=Object.freeze({__proto__:null,zeros:iz,ones:rz,constant:oz,randomUniform:az,randomNormal:cz,truncatedNormal:lz,identity:hz,varianceScaling:uz,glorotUniform:dz,glorotNormal:pz,heNormal:mz,heUniform:fz,leCunNormal:gz,leCunUniform:yz,orthogonal:bz});let Lz=0;function iv(){return Lz++}const wp={};function Lp(e=""){return e in wp||(wp[e]=0),wp[e]+=1,e+wp[e].toString()}function cw(e){return Array.isArray(e)&&Array.isArray(e[0])}function Sp(e){return e.length===0?[]:Array.isArray(e[0])?e:[e]}function je(e){let t;if(Array.isArray(e)){if(e.length!==1)throw new j(`Expected Tensor length to be 1; got ${e.length}`);t=e[0]}else t=e;return t}function St(e){if(Array.isArray(e)&&Array.isArray(e[0])){if(e.length===1)return e=e,e[0];throw new j(`Expected exactly 1 Shape; got ${e.length}`)}else return e}function Ip(e){let t=0;for(const n of e)n.shape.length===0?t+=1:t+=n.shape.reduce((s,i)=>s*i);return t}const rv="Variable";class ii{constructor(e,t="float32",n=rv,s=!0,i=null){this.dtype=t==null?"float32":t,this.shape=e.shape,this.id=iv(),n=n==null?rv:n,this.originalName=KA(n),this.name=XA(this.originalName),this.trainable_=s,this.constraint=i,this.val=KT(e,this.trainable_,this.name,this.dtype)}read(){return this.assertNotDisposed(),this.val}write(e){return this.assertNotDisposed(),Sz(this.val,e),this.val.id!==e.id&&(this.val.assign(e),this.constraint!=null&&this.val.assign(this.constraint.apply(this.val))),this}dispose(){this.assertNotDisposed(),this.val.dispose()}assertNotDisposed(){if(this.val.isDisposed)throw new Error(`LayersVariable ${this.name} is already disposed.`)}get trainable(){return this.trainable_}set trainable(e){this.trainable_=e,this.val.trainable=e}}function Sz(e,t){if(e.shape.toString()!==t.shape.toString())throw new Error("Shape mismatch: "+JSON.stringify(e.shape)+" vs. "+JSON.stringify(t.shape))}function rQ(e,t,n,s){return new ii(e,t,n,!0,s)}function oQ(e,t,n){return new ii(ct(e),t,n)}function aQ(e,t,n){return new ii(Qe(e),t,n)}function cQ(e,t,n){const s=Qs(e);return new ii(s,t,n)}function lQ(e,t,n){const s=On(e);return new ii(s,t,n)}function hQ(e,t,n){return new ii(Ed(e),t,n)}function uQ(e,t,n,s,i,o="randomUniform"){return new ii(vo(e,t,n,s),s,o)}function dQ(e,t=0,n=1,s,i,o="truncatedNormal"){if(s=s||"float32",s!=="float32"&&s!=="int32")throw new ze(`randomNormal does not support dType ${s}.`);return new ii(ch(e,t,n,s,i),s,o)}function pQ(e,t=0,n=1,s,i,o="randomNormal"){if(s=s||"float32",s!=="float32"&&s!=="int32")throw new ze(`randomNormalVariable does not support dType ${s}.`);return new ii(ub(e,t,n,s,i),s,o)}function mQ(e,t){return e.write(t)}function fQ(e,t){return e.write(be(e.read(),t))}function gQ(e,t){return e.write(Ce(e.read(),t))}function lw(e){return e.map(t=>t.read())}function hw(e){e.forEach(t=>{const n=t[0];n.write(t[1])})}function yQ(e,t){const n=t.map(i=>i.read()),s=ob(e,n);return t.map(i=>s.grads[i.name])}class mn{constructor(e){this.dtype=e.dtype,this.shape=e.shape,e.shape!=null?this.ndim=e.shape.length:this.ndim=e.ndim,this.maxNDim=e.maxNDim,this.minNDim=e.minNDim,this.axes=e.axes||{}}}class ri{constructor(e,t,n,s,i,o,a){this.dtype=e,this.shape=t,this.sourceLayer=n,this.inputs=s,this.callArgs=i,this.outputTensorIndex=a,this.id=iv(),o!=null&&(this.originalName=KA(o),this.name=XA(this.originalName)),this.rank=t.length}}let Iz=0;class xp{constructor(e,t){this.callArgs=t,this.id=Iz++,this.outboundLayer=e.outboundLayer,this.inboundLayers=e.inboundLayers,this.nodeIndices=e.nodeIndices,this.tensorIndices=e.tensorIndices,this.inputTensors=e.inputTensors,this.outputTensors=e.outputTensors,this.inputMasks=e.inputMasks,this.outputMasks=e.outputMasks,this.inputShapes=e.inputShapes,this.outputShapes=e.outputShapes;for(const n of e.inboundLayers)n!=null&&n.outboundNodes.push(this);e.outboundLayer.inboundNodes.push(this)}getConfig(){const e=[];for(const t of this.inboundLayers)t!=null?e.push(t.name):e.push(null);return{outboundLayer:this.outboundLayer?this.outboundLayer.name:null,inboundLayers:e,nodeIndices:this.nodeIndices,tensorIndices:this.tensorIndices}}}let xz=0;class lt extends go{constructor(e={}){super();this._callHook=null,this._addedWeightNames=[],this._stateful=!1,this.id=xz++,this.activityRegularizer=null,this.inputSpec=null,this.supportsMasking=!1,this._trainableWeights=[],this._nonTrainableWeights=[],this._losses=[],this._updates=[],this._built=!1,this.inboundNodes=[],this.outboundNodes=[];let t=e.name;if(!t){const n=this.getClassName();t=er(n)+"_"+Lp(n)}if(this.name=t,this.trainable_=e.trainable==null?!0:e.trainable,e.inputShape!=null||e.batchInputShape!=null){let n;if(e.batchInputShape!=null)n=e.batchInputShape;else if(e.inputShape!=null){let i=null;e.batchSize!=null&&(i=e.batchSize),n=[i].concat(e.inputShape)}this.batchInputShape=n;let s=e.dtype;s==null&&(s=e.inputDType),s==null&&(s="float32"),this.dtype=s}e.weights!=null?this.initialWeights=e.weights:this.initialWeights=null,this._refCount=null,this.fastWeightInitDuringBuild=!1}static nodeKey(e,t){return e.name+"_ib-"+t.toString()}getNodeAtIndex(e,t){if(this.inboundNodes.length===0)throw new ni(`The layer has never been called and thus has no defined ${t}.`);if(this.inboundNodes.length<=e)throw new j(`Asked to get ${t} at node ${e}, but the layer has only ${this.inboundNodes.length} inbound nodes.`);return this.inboundNodes[e]}getInputAt(e){return jn(this.getNodeAtIndex(e,"input").inputTensors)}getOutputAt(e){return jn(this.getNodeAtIndex(e,"output").outputTensors)}get input(){if(this.inboundNodes.length>1)throw new Qi(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer input" is ill-defined. Use \`getInputAt(nodeIndex)\` instead.`);if(this.inboundNodes.length===0)throw new Qi(`Layer ${this.name} is not connected, no input to return.`);return jn(this.getNodeAtIndex(0,"input").inputTensors)}get output(){if(this.inboundNodes.length===0)throw new Qi(`Layer ${this.name} has no inbound nodes.`);if(this.inboundNodes.length>1)throw new Qi(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer output" is ill-defined. Use \`getOutputAt(nodeIndex)\` instead.`);return jn(this.getNodeAtIndex(0,"output").outputTensors)}get losses(){return this._losses}calculateLosses(){return this.losses.map(e=>e())}get updates(){return this._updates}get built(){return this._built}set built(e){this._built=e}get trainable(){return this.trainable_}set trainable(e){this._trainableWeights.forEach(t=>t.trainable=e),this.trainable_=e}get trainableWeights(){return this.trainable_?this._trainableWeights.filter(e=>e.trainable):[]}set trainableWeights(e){this._trainableWeights=e}get nonTrainableWeights(){return this.trainable?this._trainableWeights.filter(e=>!e.trainable).concat(this._nonTrainableWeights):this._trainableWeights.concat(this._nonTrainableWeights)}set nonTrainableWeights(e){this._nonTrainableWeights=e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}get stateful(){return this._stateful}resetStates(){if(!this.stateful)throw new Error("Cannot call the resetStates() method of a non-stateful Layer object.")}assertInputCompatibility(e){if(e=Nt(e),this.inputSpec==null||this.inputSpec.length===0)return;const t=Nt(this.inputSpec);if(e.length!==t.length)throw new j(`Layer ${this.name} expects ${t.length} inputs, but it received ${e.length} input tensors. Input received: ${e}`);for(let n=0;ni.maxNDim)throw new j(`Input ${n} is incompatible with layer ${this.name}: expected max_ndim=${i.maxNDim}, found ndim=${o}`);if(i.minNDim!=null&&o=0?a[h]:a[a.length+h];if(d!=null&&[d,null].indexOf(m)===-1)throw new j(`Input ${n} is incompatible with layer ${this.name}: expected axis ${h} of input shape to have value ${d} but got shape ${a}.`)}}if(i.shape!=null)for(let a=0;a{if(!this.built){this.assertInputCompatibility(e);const o=[];for(const a of Nt(e))o.push(a.shape);this.build(jn(o)),this.built=!0,this.initialWeights&&this.setWeights(this.initialWeights),this._refCount===null&&i&&(this._refCount=1)}if(this.assertInputCompatibility(e),i){let o=this.call(e,t);const a=Nt(o),c=[];for(let h of a)n.indexOf(h)!==-1&&(h=h.clone()),c.push(h);if(o=jn(c),this.activityRegularizer!=null)throw new ze("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return o}else{const o=Tz(e),a=this.computeOutputShape(o);let c;const h=Az(e);if(this.warnOnIncompatibleInputShape(Array.isArray(e)?o[0]:o),a!=null&&a.length>0&&Array.isArray(a[0])?c=a.map((d,m)=>new ri(h,d,this,Nt(e),t,this.name,m)):c=new ri(h,a,this,Nt(e),t,this.name),this.addInboundNode(e,c,null,null,o,a,t),this._refCount++,this.activityRegularizer!=null)throw new ze("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return c}})}warnOnIncompatibleInputShape(e){if(this.batchInputShape==null)return;if(e.length!==this.batchInputShape.length)console.warn(`The rank of the input tensor provided (shape: ${JSON.stringify(e)}) does not match that of the batchInputShape (${JSON.stringify(this.batchInputShape)}) of the layer ${this.name}`);else{let t=!1;this.batchInputShape.forEach((n,s)=>{n!=null&&e[s]!=null&&e[s]!==n&&(t=!0)}),t&&console.warn(`The shape of the input tensor (${JSON.stringify(e)}) does not match the expectation of layer ${this.name}: ${JSON.stringify(this.batchInputShape)}`)}}get outputShape(){if(this.inboundNodes==null||this.inboundNodes.length===0)throw new Qi(`The layer ${this.name} has never been called and thus has no defined output shape.`);const e=[];for(const t of this.inboundNodes){const n=JSON.stringify(t.outputShapes);e.indexOf(n)===-1&&e.push(n)}if(e.length===1){const t=this.inboundNodes[0].outputShapes;return Array.isArray(t)&&Array.isArray(t[0])&&t.length===1?t[0]:t}else throw new Qi(`The layer ${this.name} has multiple inbound nodes with different output shapes. Hence the notion of "output shape" is ill-defined for the layer.`)}countParams(){if(!this.built)throw new ni(`You tried to call countParams() on ${this.name}, but the layer is not built yet. Build it first by calling build(batchInputShape).`);return Ip(this.weights)}build(e){this.built=!0}getWeights(e=!1){return lw(e?this.trainableWeights:this.weights)}setWeights(e){ee(()=>{const t=this.weights;if(t.length!==e.length)throw new j(`You called setWeights(weights) on layer "${this.name}" with a weight list of length ${e.length}, but the layer was expecting ${t.length} weights. Provided weights: ${e}...`);if(t.length===0)return;const n=[],s=lw(t);for(let i=0;ii.apply(h.read())),o==null&&(o=!0),o?this._trainableWeights.push(h):this._nonTrainableWeights.push(h),h}setFastWeightInitDuringBuild(e){this.fastWeightInitDuringBuild=e}addLoss(e){if(e==null||Array.isArray(e)&&e.length===0)return;e=Nt(e),this._losses!==void 0&&this._losses!==null&&this.losses.push(...e)}computeOutputShape(e){return e}computeMask(e,t){if(!this.supportsMasking){if(t!=null)if(Array.isArray(t))t.forEach(n=>{if(n!=null)throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`)});else throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`);return null}return t}addInboundNode(e,t,n,s,i,o,a=null){const c=Nt(e);t=Nt(t),n=Nt(n),s=Nt(s),i=Sp(i),o=Sp(o);const h=[],d=[],m=[];for(const y of c)h.push(y.sourceLayer),d.push(y.nodeIndex),m.push(y.tensorIndex);new xp({outboundLayer:this,inboundLayers:h,nodeIndices:d,tensorIndices:m,inputTensors:c,outputTensors:t,inputMasks:n,outputMasks:s,inputShapes:i,outputShapes:o},a);for(let y=0;ye.dispose()),this.weights.length}assertNotDisposed(){if(this._refCount===0)throw new Error(`Layer '${this.name}' is already disposed.`)}dispose(){if(!this.built)throw new Error(`Cannot dispose Layer ${this.name} because it has not been built yet.`);if(this._refCount===null)throw new Error(`Cannot dispose Layer ${this.name} because it has not been used yet.`);this.assertNotDisposed();let e=0;return--this._refCount===0&&(e=this.disposeWeights()),{refCountAfterDispose:this._refCount,numDisposedVariables:e}}}function Tz(e){e=Nt(e);const t=[];for(const n of e)t.push(n.shape);return jn(t)}function Az(e){return"float32"}function ov(e,t,n){if((t==null||n!=null&&n>0)&&(t=e.sourceLayer,n=e.nodeIndex),t.inboundNodes.length===0)return[e];{const s=t.inboundNodes[n];if(s.inboundLayers.length===0)return s.inputTensors;{const i=[];for(let o=0;o0){const i=await Promise.all(t);for(let o=0;obe(this.totals[s],X(i,n)));this.totals[s]=a,o!=null&&o.dispose()}}}async onEpochEnd(e,t){if(t!=null)for(const n of this.params.metrics){if(this.totals[n]==null)continue;typeof this.totals[n]=="number"?t[n]=this.totals[n]/this.seen:ee(()=>{const s=X(_e(1,this.seen),this.totals[n]);t[n]=s,this.totals[n].dispose(),Nn(t[n])})}}}class uv extends Ha{async onTrainBegin(e){this.epoch=[],this.history={}}async onEpochEnd(e,t){t==null&&(t={}),this.epoch.push(e);for(const n in t)this.history[n]==null&&(this.history[n]=[]),this.history[n].push(t[n])}async syncData(){const e=[],t=[],n=[];for(const i in this.history){const o=this.history[i];for(let a=0;anew dv(s,t))}class Bs{constructor(){}static registerCallbackConstructor(e,t){k(e>=0&&Number.isInteger(e),()=>`Verbosity level is expected to be an integer >= 0, but got ${e}`),Bs.checkForDuplicate(t),Bs.constructors[e]==null&&(Bs.constructors[e]=[]),Bs.constructors[e].push(t)}static checkForDuplicate(e){for(const t in Bs.constructors){const n=Bs.constructors[+t];n.forEach(s=>{if(s===e)throw new j("Duplicate callback constructor.")})}}static clear(){Bs.constructors={}}static createCallbacks(e){const t=[];for(const n in Bs.constructors){const s=+n;e>=s&&t.push(...Bs.constructors[s])}return t.map(n=>new n)}}Bs.constructors={};function mv(e,t,n,s,i,o,a,c,h){const d=new uv,m=[new Nz,...Bs.createCallbacks(t)];e!=null&&m.push(...e),m.push(d);const y=new hv(m);return y.setParams({epochs:n,initialEpoch:s,samples:i,steps:o,batchSize:a,verbose:t,doValidation:c,metrics:h}),{callbackList:y,history:d}}function oi(e,t={},n=!1){return bh(e,ks.getMap().classNameMap,t,"layer",n)}function Tp(e,t){return ee(()=>{e.dtype!=="float32"&&(e=e.asType("float32"));const n=Ue(xh(e),t,!0),s=Xl(n.shape,an()),i=Ln(_s(n,s));return _e(e,i)})}function tr(e,t){return ee(()=>zt(xh(Ce(t,e)),-1))}function qa(e,t){return ee(()=>zt(sn(Ce(t,e)),-1))}function Pr(e,t){return ee(()=>{const n=Ce(e,t),s=Yn(sn(e),an(),Number.MAX_VALUE),i=sn(_e(n,s));return X(100,zt(i,-1))})}function uw(e,t){return ee(()=>{const n=Yn(t,an(),Number.MAX_VALUE),s=ts(be(1,n)),i=Yn(e,an(),Number.MAX_VALUE),o=ts(be(1,i));return zt(xh(Ce(s,o)),-1)})}function Cz(e,t){return ee(()=>{const n=_s(0,Ce(1,X(e,t)));return zt(xh(n),-1)})}function Rz(e,t){return ee(()=>{const n=_s(0,Ce(1,X(e,t)));return zt(n,-1)})}function Oz(e,t){return ee(()=>{const n=Ue(X(e,t),-1),s=qn(X(Ce(1,e),t),-1);return _s(0,be(1,Ce(s,n)))})}function Ez(e,t){return ee(()=>{const n=Math.log(2),s=Ce(t,e),i=Ce(be(s,Da(X(-2,s))),n);return zt(i,-1)})}function Ah(e,t,n=!1){return ee(()=>{if(n)t=No(t);else{const s=Ue(t,t.shape.length-1,!0);t=_e(t,s)}return t=Yn(t,an(),1-an()),Pt(Ue(X(e.toFloat(),ts(t)),t.shape.length-1))})}function Ap(e,t,n=!1){return ee(()=>{const s=Ra(jP(e)).toInt();t=Yn(t,an(),1-an());const i=t.shape,o=fo(s,i[i.length-1]).reshape(i);return Ah(o,t,n)})}function Dz(e,t){if(!ot(e.shape,t.shape))throw new j(`logits and labels must have the same shape, but got shapes ${JSON.stringify(e.shape)} and ${JSON.stringify(t.shape)}`);return ee(()=>{const n=t.relu(),s=t.abs().neg();return n.sub(t.mul(e)).add(s.exp().log1p())})}function vp(e,t){return ee(()=>{let n;return n=Yn(t,an(),1-an()),n=ts(_e(n,Ce(1,n))),zt(Dz(e,n),-1)})}function dw(e,t){return ee(()=>{const n=Yn(e,an(),1),s=Yn(t,an(),1);return Ue(X(e,ts(_e(n,s))),-1)})}function kz(e,t){return ee(()=>{const n=ts(be(an(),t));return zt(Ce(t,X(e,n)),-1)})}function Np(e,t){return ee(()=>{const n=Tp(e,-1),s=Tp(t,-1),i=X(n,s);return Pt(Ue(i,-1))})}const bQ=tr,wQ=tr,LQ=qa,SQ=qa,IQ=Pr,xQ=Pr,TQ=uw,AQ=uw,vQ=dw,NQ=dw,CQ=Np,Cp={meanSquaredError:tr,meanAbsoluteError:qa,meanAbsolutePercentageError:Pr,meanSquaredLogarithmicError:uw,squaredHinge:Cz,hinge:Rz,categoricalHinge:Oz,logcosh:Ez,categoricalCrossentropy:Ah,sparseCategoricalCrossentropy:Ap,binaryCrossentropy:vp,kullbackLeiblerDivergence:dw,poisson:kz,cosineProximity:Np};function pw(e){if(typeof e=="string"){if(e in Cp)return Cp[e];let t=`Unknown loss ${e}`;throw e.toLowerCase().includes("softmaxcrossentropy")&&(t=`Unknown loss ${e}. Use "categoricalCrossentropy" as the string name for tf.losses.softmaxCrossEntropy`),new j(t)}else return e}function mw(e,t){return ee(()=>{const n=X(.5,On(t)),s=Sh(Ss(t,n),e.dtype);return zt(Zs(e,s),-1)})}function fw(e,t){return ee(()=>Sh(Zs(zl(e,-1),zl(t,-1)),"float32"))}function fv(e,t){return ee(()=>Ws(e.equal(1),t.equal(1)).sum().cast("float32"))}function Fz(e,t){return ee(()=>Ws(e.equal(1),t.equal(0)).sum().cast("float32"))}function _z(e,t){return ee(()=>Ws(e.equal(0),t.equal(1)).sum().cast("float32"))}function gv(e,t){return ee(()=>{const n=fv(e,t),s=_z(e,t),i=n.add(s);return _n(Ss(i,0),n.div(i),0).cast("float32")})}function Wz(e,t){return ee(()=>{const n=fv(e,t),s=Fz(e,t),i=n.add(s);return _n(Ss(i,0),n.div(i),0).cast("float32")})}function yv(e,t){return vp(e,t)}function bv(e,t){return e.rank===t.rank&&(e=e.squeeze([e.rank-1])),t=t.argMax(-1),t.dtype!==e.dtype&&(t=t.asType(e.dtype)),Zs(e,t).asType("float32")}function RQ(e,t){throw new ze}function OQ(e,t){throw new ze}const $z=tr,Uz=tr,Bz=qa,Mz=qa,Pz=Pr,zz=Pr,gw=Ah,Gz=Np,wv=Ap,Rp={binaryAccuracy:mw,categoricalAccuracy:fw,precision:gv,categoricalCrossentropy:gw,sparseCategoricalCrossentropy:wv,mse:$z,MSE:Uz,mae:Bz,MAE:Mz,mape:Pz,MAPE:zz,cosine:Gz};function Vz(e){if(typeof e=="string"&&e in Rp)return Rp[e];if(typeof e!="string"&&e!=null)return e;throw new j(`Unknown metric ${e}`)}function Op(e){if(xs(e!==null,`Unknown LossOrMetricFn ${e}`),typeof e=="string")return e;{let t;for(const n of Object.keys(Cp))if(Cp[n]===e){t=n;break}if(t!==void 0)return t;for(const n of Object.keys(Rp))if(Rp[n]===e){t=n;break}return t!==void 0?t:e.name}}function Yz(e){const t={Adagrad:()=>Ro.adagrad(.01),Adadelta:()=>Ro.adadelta(1,.95,an()),Adam:()=>Ro.adam(.001,.9,.999,an()),Adamax:()=>Ro.adamax(.002,.9,.999,an(),0),RMSProp:()=>Ro.rmsprop(.001,.9,0,an()),SGD:()=>Ro.sgd(.01)};if(t.adagrad=t.Adagrad,t.adadelta=t.Adadelta,t.adam=t.Adam,t.adamax=t.Adamax,t.rmsprop=t.RMSProp,t.sgd=t.SGD,e in t)return t[e]();throw new j(`Unknown Optimizer ${e}`)}const Lv=1*1024*1024;function Sv(e,t,n=!1){if(e==null||typeof e!="object"||Object.getPrototypeOf(e)!==Object.prototype||!yw(e))throw new Error("User-defined metadata is expected to be a JSON object, but is not.");if(n){const s=JSON.stringify(e);s.length>Lv&&console.warn(`User-defined metadata of model "${t}" is too large in size (length=${s.length} when serialized). It is not recommended to store such large objects in user-defined metadata. Please make sure its serialized length is <= ${Lv}.`)}}function yw(e){if(e===null)return!0;if(typeof e=="object")if(Object.getPrototypeOf(e)===Object.prototype){const t=Object.keys(e);for(const n of t){if(typeof n!="string")return!1;if(!yw(e[n]))return!1}return!0}else if(Array.isArray(e)){for(const t of e)if(!yw(t))return!1;return!0}else return!1;else{const t=typeof e;return t==="string"||t==="number"||t==="boolean"}}function Hz(e,t,n,s=console.log){const i=jz(e),o=["Layer (type)","Output shape","Param #"];i?(t=t||65,n=n||[.45,.85,1]):(t=t||98,n=n||[.33,.55,.67,1]),n[n.length-1]<=1&&(n=n.map(m=>Math.floor(t*m)));let a;if(!i){o.push("Receives inputs"),a=[];for(const m in e.nodesByDepth)a.push(...e.nodesByDepth[m])}s("_".repeat(t)),Ep(o,n,s),s("=".repeat(t));const c=e.layers;for(let m=0;m1||i.length===1&&i[0].inboundLayers.length>1){t=!1;break}s.push(...i)}if(t)for(const i of e.layers){let o=!1;for(const a of i.inboundNodes)if(s.indexOf(a)!==-1)if(o){t=!1;break}else o=!0;if(!t)break}return t}function Ep(e,t,n=console.log){let s="";for(let i=0;i0&&(s=s.slice(0,s.length-1)+" "),s+=e[i],s=s.slice(0,t[i]),s+=" ".repeat(t[i]-s.length);n(s)}function Kz(e,t,n){let s;try{s=JSON.stringify(e.outputShape)}catch(c){s="multiple"}const i=e.name,o=e.getClassName(),a=[`${i} (${o})`,s,e.countParams().toString()];Ep(a,t,n)}function Xz(e,t,n,s){let i;try{i=JSON.stringify(e.outputShape)}catch(m){i="multiple"}const o=[];for(const m of e.inboundNodes){if(n!=null&&n.length>0&&n.indexOf(m)===-1)continue;for(let y=0;yL.name),h=[],d=t.names();for(const L of c)d.indexOf(L)!==-1?h.push(t.getValue(L)):h.push(null);s!=null&&(s.maxNumTensors=-Infinity,s.minNumTensors=Infinity);const m=c.join(",")+"|"+t.names().join(",");let y,b;if(ww[m]==null){const L=Zz(a,t);y=L.sorted,b=L.recipientCounts,ww[m]=y,xv[m]=b}y=ww[m],b={},i||Object.assign(b,xv[m]);const w=new Fo(t);for(let L=0;Ls.maxNumTensors&&(s.maxNumTensors=q),q0,()=>"Expected at least one fetch, got none");let n=[],s={};if(e.length===1){const i=Tv(e[0],t);n=i.sorted,s=i.recipientMap}else{const i=new Set;for(const o of e){const{sorted:a,recipientMap:c}=Tv(o,t);for(const h of a)i.has(h.name)||(n.push(h),i.add(h.name));for(const h in c)s[h]==null&&(s[h]=new Set),c[h].forEach(d=>s[h].add(d))}}return{sorted:n,recipientCounts:Qz(s)}}function Qz(e){const t={};for(const n in e)t[n]=e[n].size;return t}function Tv(e,t){const n=new Set,s=[],i={};for(const c of t.names())n.add(c);const o=[],a=[];for(o.push(e);o.length>0;){const c=o[o.length-1];if(n.has(c.name)){o.pop();continue}const h=a[a.length-1]===o.length-1;if(c.inputs.length===0||h)o.pop(),s.push(c),n.add(c.name),h&&a.pop();else{a.push(o.length-1);for(const d of c.inputs){if(i[d.name]==null&&(i[d.name]=new Set),i[d.name].add(c.name),n.has(d.name))continue;o.push(d)}}}return{sorted:s,recipientMap:i}}function e3(e){let t;if(e.sourceLayer.inboundNodes.length===1)t=e.sourceLayer.output;else{let n=null;for(let s=0;sN.name)}`);$r(this.outputs).length!==this.outputs.length&&console.warn(`The list of outputs passed to the model is redundant. All outputs should only appear once. Found: ${this.outputs.map(N=>N.name)}`),this.inputLayers=[],this.inputLayersNodeIndices=[],this.inputLayersTensorIndices=[],this.outputLayers=[],this.outputLayersNodeIndices=[],this.outputLayersTensorIndices=[],this.layers=[],this.internalContainerRefs=[];for(const N of this.outputs){const E=N.sourceLayer,D=N.nodeIndex,F=N.tensorIndex;this.outputLayers.push(E),this.outputLayersNodeIndices.push(D),this.outputLayersTensorIndices.push(F)}for(const N of this.inputs){const E=N.sourceLayer,D=N.nodeIndex,F=N.tensorIndex;xs(D===0,"input layer has >1 nodes"),xs(F===0,"input layer has >1 tensors"),this.inputLayers.push(E),this.inputLayersNodeIndices.push(D),this.inputLayersTensorIndices.push(F)}this.inputNames=[],this.outputNames=[],this.feedInputShapes=[],this.feedInputNames=[],this.feedOutputNames=[];for(let N=0;NN.shape),this.internalOutputShapes=this.outputs.map(N=>N.shape);const t={},n={},s={},i={},o={},a=[],c=(N,E,D,F,_,B)=>{(F==null||_==null||B==null)&&(F=N.sourceLayer,_=N.nodeIndex,B=N.tensorIndex);const $=F.inboundNodes[_];if(D.indexOf($)!==-1)throw new ni(`The tensor ${N.name} at layer "${F.name}" is part of a cycle.`);if(E.indexOf($)!==-1)return;this.containerNodes.add(ki.nodeKey(F,_)),F.id in o||(o[F.id]=Object.keys(o).length),D.indexOf($)===-1&&D.push($);const H=$.inboundLayers.length;for(let q=0;q=0;)D.splice(D.indexOf($),1);a.push($)},h=[],d=[];for(const N of this.outputs)c(N,h,d);const m=a.slice().reverse();for(const N of m){n[N.id]=N,N.id in t||(t[N.id]=0);let E=t[N.id];const D=s[N.outboundLayer.id]==null?0:s[N.outboundLayer.id];E=Math.max(E,D),s[N.outboundLayer.id]=E,i[N.outboundLayer.id]=N.outboundLayer,t[N.id]=E;for(let F=0;FparseInt(N,10)).sort(cp);this.layers=[];for(const N of w){const E=b[N];E.sort((D,F)=>{const _=o[D.id],B=o[F.id];return _B?1:0});for(const D of E)D instanceof ki&&this.internalContainerRefs.push(D),this.layers.push(D)}this.layersByDepth=b,w=Object.keys(y).map(N=>parseInt(N,10)).sort(cp);const L=this.inputs.slice(),T=[];for(const N of w)for(const E of y[N]){const D=E.outboundLayer;if(D!=null){for(const F of E.inputTensors)if(L.indexOf(F)===-1)throw new ni(`Graph disconnected: cannot obtain value for tensor ${F} at layer "${D.name}". The following previous layers were accessed without issue: ${T}`);for(const F of E.outputTensors)L.push(F);T.push(D.name)}}this.nodesByDepth=y;const A=this.layers.map(N=>N.name);for(const N of A){const E=A.filter(D=>D===N).length;if(E!==1)throw new ni(`The name "${N}" is used ${E} times in the model. All layer names should be unique. Layer names: `+JSON.stringify(A))}this.outboundNodes=[],this.inboundNodes=[],new xp({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:this.inputs.map(N=>null),outputMasks:this.outputs.map(N=>null),inputShapes:this.inputs.map(N=>N.shape),outputShapes:this.outputs.map(N=>N.shape)}),this.built=!0,this._refCount=1}assertNotDisposed(){if(this._refCount===0)throw new Error(`Container '${this.name}' is already disposed.`)}dispose(){this.assertNotDisposed();const e={refCountAfterDispose:null,numDisposedVariables:0};if(--this._refCount===0){for(const t of this.layers)e.numDisposedVariables+=t.dispose().numDisposedVariables;for(const t of this.internalContainerRefs)e.numDisposedVariables+=t.dispose().numDisposedVariables}return e.refCountAfterDispose=this._refCount,e}get trainable(){return this.trainable_}set trainable(e){this.layers.forEach(t=>{t._trainableWeights.forEach(n=>n.trainable=e)}),this.trainable_=e}get trainableWeights(){if(this._trainableWeights.length>0)throw new j("Container instance unexpectedly contains _trainableWeights.The trainable weights of a Container are a union of the trainable weights of its consituent Layers. Its own _trainableWeights must remain an empty Array.");if(!this.trainable)return[];let e=[];for(const t of this.layers)e=e.concat(t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.layers)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const n of this.layers)t.push(...n.trainableWeights);return t.concat(e)}return e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}loadWeights(e,t=!0){const n={};let s=0;for(const o of this.layers)for(const a of o.weights){if(n[a.originalName]!=null)throw new j(`Duplicate weight name: ${a.originalName}`);n[a.originalName]=a,s++}const i=[];for(const o in e){let a=o;if(n[o]==null){const c=o.split("/"),h=c.slice(0,-2).concat([c[c.length-1]]);a=h.join("/")}if(n[a]!=null)i.push([n[a],e[o]]);else if(t)throw new j(`Provided weight data has no target variable: ${o}`);delete n[a]}if(t){const o=[];for(const a in n)o.push(a);if(o.length>0)throw new j(`${o.length} of ${s} weights are not set: ${o}`)}hw(i)}updatedConfig(){const e=this.getConfig(),t={};return t.className=this.getClassName(),t.config=e,t.kerasVersion=`tfjs-layers ${Dp}`,t.backend="TensorFlow.js",t}toJSON(e,t=!0){const n=bw(this.updatedConfig());return t?JSON.stringify(n):n}call(e,t){return ee(()=>{e=Nt(e);const n=new Fo;for(let s=0;s{e=Nt(e);let n;return t==null?n=Oo(null,e.length):n=Nt(t),this.runInternalGraph(e,n)[1]})}computeOutputShape(e){const t=Sp(e);if(t.length!==this.inputLayers.length)throw new j(`Invalid inputShape argument ${e}: model has ${this.inputLayers.length} tensor inputs.`);const n={};for(let a=0;aparseInt(a,10)).sort(cp);if(s.length>1)for(const a of s){const c=this.nodesByDepth[a];for(const h of c){const d=h.outboundLayer;if(this.inputLayers.map(L=>L.id).indexOf(d.id)!==-1)continue;const m=[];for(let L=0;LparseInt(c,10)).sort(cp);for(const c of s){const h=this.nodesByDepth[c];for(const d of h){const m=d.outboundLayer,y=d.inputTensors,b=d.outputTensors,w=new Array;for(const L of y)L.id in n&&w.push(n[L.id]);if(w.length===y.length){let L={},T,A,N,E;if(d.callArgs!=null&&(L=d.callArgs),w.length===1){const[D,F]=w[0];L.mask==null&&(L.mask=F),N=Nt(m.call(D,L)),E=Nt(m.computeMask(D,F)),T=[D],A=[F]}else T=w.map(D=>D[0]),A=w.map(D=>D[1]),L.mask==null&&(L.mask=A),N=Nt(m.call(T,L)),E=Nt(m.computeMask(T,A));if(m.activityRegularizer)throw new ze("LayersModel invocation with concrete Tensor value(s) in the presence of activity regularizer(s) is not supported yet.");for(let D=0;D{const e=[];for(const t of this.layers)for(let n=0;n0){const L=[];for(let T=0;T0&&T.apply(jn(N),E)}function h(T){const A=T.name,N=oi(T,t.customObjects!=null?t.customObjects:{});N.setFastWeightInitDuringBuild(s),i[A]=N;const E=T.inboundNodes;E.forEach(D=>{if(!(D instanceof Array))throw new j(`Corrupted configuration, expected array for nodeData: ${D}`);a(N,D)})}const d=t.name,m=t.layers;for(const T of m)h(T);for(;!EP(o);)for(const T of m){const A=i[T.name];if(A.name in o){const N=o[A.name];delete o[A.name];for(const E of N)c(A,E)}}const y=[],b=[],w=t.inputLayers;for(const T of w){const A=T[0],N=T[1],E=T[2];xs(A in i);const D=i[A],F=D.inboundNodes[N].outputTensors;y.push(F[E])}const L=t.outputLayers;for(const T of L){const A=T[0],N=T[1],E=T[2];xs(A in i);const D=i[A],F=D.inboundNodes[N].outputTensors;b.push(F[E])}return new e({inputs:y,outputs:b,name:d})}get stateful(){if(this._stateful)throw new j("Container instance unexpectedly has _stateful = true. The statefulness of a Container is determined by the Layers it contains. Its _stateful property must remain the default false.");for(const e of this.layers)if(e.stateful)return!0;return!1}resetStates(){ee(()=>{this.layers.forEach(e=>{e.stateful&&e.resetStates()})})}}function Av(e,t,n){const s=t.length;if(e==null||Array.isArray(e)&&e.length===0)return t.map(i=>null);if(s===1)return Array.isArray(e)&&e.length===1?e:typeof e=="object"&&t[0]in e?[e[t[0]]]:[e];if(Array.isArray(e)){if(e.length!==s)throw new Error(`Provided ${n} is an array of ${e.length} element(s), but the model has ${s} outputs. Make sure a set of weights is provided for each model output.`);return e}else if(typeof e=="object"&&Object.keys(e).length>0&&typeof e[Object.keys(e)[0]]=="object"){const i=[];return t.forEach(o=>{o in e?i.push(e[o]):i.push(null)}),i}else throw new Error(`The model has multiple (${s}) outputs, so ${n} must be either an array with ${s} elements or an object with ${t} keys. Provided ${n} not understood: ${JSON.stringify(e)}`)}function vv(e,t){return Av(e,t,"classWeight")}function EQ(e,t){return Av(e,t,"sampleWeight")}async function Nv(e,t,n,s){if(t!=null||s!=null)throw new Error("Support sampleWeight is not implemented yet");if(n!=null){const i=ee(()=>{if(e.shape.length===1)return e.clone();if(e.shape.length===2)if(e.shape[1]>1){const c=1;return e.argMax(c)}else{if(e.shape[1]===1)return e.reshape([e.shape[0]]);throw new Error(`Encountered unexpected last-dimension size (${e.shape[1]}) during handling of class weights. The size is expected to be >= 1.`)}else throw new Error(`Unexpected rank of target (y) tensor (${e.rank}) during handling of class weights. The rank is expected to be 1 or 2.`)}),o=Array.from(await i.data());He(i);const a=[];return o.forEach(c=>{if(n[c]==null)throw new Error(`classWeight must contain all classes in the training data. The class ${c} exists in the data but not in classWeight`);a.push(n[c])}),ns(a,"float32")}else return null}function t3(e,t){return X(e,t)}const n3=32;function Cv(e,t){let n,s;const i=t;n=i.xs,s=i.ys,k(n!=null&&s!=null,()=>`A Dataset iterator for fitDataset() is expected to generate objects of the form \`{xs: xVal, ys: yVal}\`, where the two values may be \`tf.Tensor\`, an array of Tensors, or a map of string to Tensor. The provided Dataset instead generates ${t}`);const o=Rv("input",e.inputNames,n),a=Rv("output",e.outputNames,s),c=o[0].shape[0];k(o.length===e.inputs.length,()=>`LayersModel has ${e.inputs.length} inputs, but the dataset provides ${o.length} inputs. (Expected input keys: ${JSON.stringify(e.inputNames)})`),k(a.length===e.outputs.length,()=>`LayersModel has ${e.outputs.length} outputs, but the dataset provides ${a.length} outputs. (Expected output keys: ${JSON.stringify(e.outputNames)})`);for(let h=0;h`Batch size mismatch: input ${e.inputNames[h]} has ${o[h].shape[0]}; expected ${c} based on input ${e.inputNames[0]}.`);for(let h=0;h`Batch size mismatch: output ${e.outputNames[h]} has ${a[h].shape[0]}; expected ${c} based on input ${e.inputNames[0]}.`);return{xs:o,ys:a}}function Rv(e,t,n){if(n instanceof Q)return[n];if(Array.isArray(n))return k(n.length===t.length,()=>`Received an array of ${n.length} Tensors, but expected ${t.length} to match the ${e} keys ${t}.`),n;{const s=[];for(const i of t){if(n[i]==null)throw new j(`The feature data generated by the dataset lacks the required ${e} key '${i}'.`);s.push(n[i])}return s}}function s3(e){if(e.length===3)throw new ze("Validation with sample weights is not implemented yet.");return{xs:e[0],ys:e[1]}}async function i3(e,t,n){const s=n.batchesPerEpoch!=null;if(k(e.optimizer!=null,()=>"You must compile a model before training/testing. Use LayersModel.compile(modelCompileConfig)."),k(n!=null,()=>"For fitDataset(), the 2nd argument (config) is required, but it is not provided in this call."),k(n.epochs!=null&&n.epochs>0&&Number.isInteger(n.epochs),()=>`For fitDataset(), config.epochs is expected to be a positive integer, but got ${n.epochs}`),k(!s||n.batchesPerEpoch>0&&Number.isInteger(n.batchesPerEpoch),()=>`For fitDataset(), config.batchesPerEpoch is expected to be a positive integer if specified, but got ${n.batchesPerEpoch}`),k(n.validationSplit==null,()=>"`validationSplit` is not supported by `fitDataset()`. Use validationData instead."),e.isTraining)throw new Error("Cannot start training because another fit() call is ongoing.");e.isTraining=!0;try{const i=n.validationData!=null;let o,a;if(i)if(Ov(n.validationData))k(n.validationBatches==null||n.validationBatches>0&&Number.isInteger(n.validationBatches),()=>`For fitDataset() with dataset-based validation, config.validationBatches is expected not to be provided, or to be a positive integer, but got ${n.validationBatches}`);else{const A=s3(n.validationData);o=A.xs,a=A.ys}const c=e.makeTrainFunction(),h=e.getDedupedMetricsNames();let d;i?d=h.slice().concat(h.map(A=>"val_"+A)):d=h.slice();const m=pv(n.callbacks,n.yieldEvery),y=n.verbose==null?1:n.verbose,{callbackList:b,history:w}=mv(m,y,n.epochs,null,null,r3(t,n),null,i,d);b.setModel(e),e.history=w,await b.onTrainBegin(),e.stopTraining_=!1;let L=n.initialEpoch==null?0:n.initialEpoch,T=await t.iterator();for(;L=n.batchesPerEpoch:D.done){if(i){let F;Ov(n.validationData)?F=Nt(await e.evaluateDataset(n.validationData,{batches:n.validationBatches})):F=Nt(e.evaluate(o,a,{batchSize:n.validationBatchSize==null?n3:n.validationBatchSize,verbose:0}));for(let _=0;_0)throw new ze("Verbose mode is not implemented yet.");k(!s||n.batches>0&&Number.isInteger(n.batches),()=>`Test loop expects \`batches\` to be a positive integer, but received ${JSON.stringify(n.batches)}`);const a=o3(t)?t:await t.iterator();let c=0,h=0;for(;s?h{if(d.value){const{xs:m,ys:y}=Cv(e,d.value),b=m.concat(y),w=ee(()=>i(b));if(He(b),h===0)for(let T=0;Tbe(o[T],X(L,A))),h>0&&He(N)}He(w),c+=L,++h}return o}),d.done){s&&console.warn(`Your dataset iterator ran out of data during evaluateDataset(). Interrupting evalution. Make sure that your dataset can generate at least \`batches\` batches (in this case, ${n.batches} batches). You may need to use the repeat() function when building your dataset.`);break}}for(let d=0;d0&&Number.isInteger(e),()=>`batchSize is required to be a positive integer, but got ${e}`)}function Ch(e,t,n){return e==null?[null]:Array.isArray(e)?e.map(s=>ko(s,t,n-t)):ko(e,t,n-t)}function Sw(e,t){return ee(()=>e==null?null:Array.isArray(e)?e.map(n=>Sw(n,t)):ev(e,t.dtype==="int32"?t:t.toInt()))}function Iw(e,t){const n=[];let s=0,i=null;for(;s=e&&(i=e),n.push([s,i]),s=i;return n}async function c3(e,t,n,s,i,o,a,c,h,d,m,y,b,w,L){i==null&&(i=32),o==null&&(o=1),m==null&&(m=!0),b==null&&(b=0);let T=!1;if(h!=null&&d!=null&&(T=!0),L!=null&&(T=!0,w==null))throw new j("Can only use `validationSteps` when doing step-wise training, i.e., `stepsPerEpoch` must be set.");const A=e.checkNumSamples(n,i,w,"steps_per_epoch");let N;A!=null&&(N=si(0,A)),a==null&&(a=1);const{callbackList:E,history:D}=mv(c,a,o,b,A,w,i,T,y);E.setModel(e),e.history=D,await E.onTrainBegin(),e.stopTraining_=!1;for(let F=b;F{const J=$[H][0],re=$[H][1],ce=ko(B,J,re-J);q.batch=H,q.size=re-J;const ue=Sw(n,ce),he=t(ue);for(let de=0;de0){if(L=!0,s.validationData.length===2)a=s.validationData[0],c=s.validationData[1];else throw s.validationData.length===3?new ze("validationData including sample weights is not supported yet."):new j(`When passing validation data, it must contain 2 (valX, valY) or 3 (valX, valY, valSampleWeight) items; ${s.validationData} is invalid.`);const $=!0,H=await e.standardizeUserData(a,c,null,null,$,y);h=H[0],d=H[1],T=h.concat(d)}else if(s.validationSplit!=null&&s.validationSplit>0&&s.validationSplit<1){L=!0;const $=Math.floor(i[0].shape[0]*(1-s.validationSplit)),H=i[0].shape[0];h=Ch(i,$,H),i=Ch(i,0,$),d=Ch(o,$,H),o=Ch(o,0,$),T=h.concat(d)}else s.validationSteps!=null&&(L=!0);const A=i.concat(o).concat(m);e.checkTrainableWeightsConsistency();const N=e.makeTrainFunction(),E=e.getDedupedMetricsNames();let D,F;L?(e.makeTestFunction(),D=e.testFunction,F=E.slice().concat(E.map($=>"val_"+$))):(D=null,T=[],F=E.slice());const _=pv(s.callbacks,s.yieldEvery),B=await c3(e,N,A,E,y,s.epochs,s.verbose,_,D,T,s.shuffle,F,s.initialEpoch,null,null);return B}finally{e.isTraining=!1,_o(i,t),_o(o,n),_o(h,a),_o(d,c),m!=null&&He(m)}}function Ev(e){const t=[];e instanceof Q&&(e=[e]);for(let n=0;nn.push(i.id));else if(t!=null)for(const i in t){const o=t[i];n.push(o.id)}const s=[];if(e instanceof Q)n.indexOf(e.id)===-1&&s.push(e);else if(Array.isArray(e))e.forEach(i=>{n.indexOf(i.id)===-1&&s.push(i)});else if(e!=null)for(const i in e){const o=e[i];n.indexOf(o.id)===-1&&s.push(o)}s.forEach(i=>{i.isDisposed||i.dispose()})}function h3(e){return e instanceof Q}function xw(e){return Array.isArray(e)}function Dv(e){return!h3(e)&&!xw(e)}function kv(e,t,n,s=!0,i=""){if(t==null||t.length===0){if(e!=null){let a=!1;if(xw(e)&&e.length>0)a=!0;else if(Dv(e)){for(const c in e)if(e.hasOwnProperty(c)){a=!0;break}}else a=!0;if(a)throw new j(`Error when checking model ${i} expected no data, but got ${e}`)}return[]}if(e==null)return t.map(a=>null);let o;if(Dv(e)){e=e,o=[];for(const a of t){if(e[a]==null)throw new j(`No data provided for "${a}". Need data for each key in: ${t}`);o.push(e[a])}}else if(xw(e)){if(e=e,e.length!==t.length)throw new j(`Error when checking model ${i}: the Array of Tensors that you are passing to your model is not the size the model expected. Expected to see ${t.length} Tensor(s), but instead got the following list of Tensor(s): ${e}`);o=e}else{if(e=e,t.length>1)throw new j(`The model ${i} expects ${t.length} Tensor(s), but only received one Tensor. Found: Tensor with shape ${e.shape}`);o=[e]}if(o=Ev(o),n!=null)for(let a=0;a=0&&d!==m)throw new j(`Error when checking ${i}: expected ${t[a]} to have shape [${n[a]}], but got array with shape [${c.shape}].`)}}return o}function u3(e,t,n){const s=$r(e.map(o=>o.shape[0]));s.sort();const i=$r(t.map(o=>o.shape[0]));if(i.sort(),s.length>1)throw new j(`All input Tensors (x) should have the same number of samples. Got array shapes: ${JSON.stringify(e.map(o=>o.shape))}`);if(i.length>1)throw new j(`All target Tensors (y) should have the same number of samples. Got array shapes: ${JSON.stringify(t.map(o=>o.shape))}`);if(s.length>0&&i.length>0&&!ot(s,i))throw new j(`Input Tensors should have the same number of samples as target Tensors. Found ${s[0]} input sample(s) and ${i[0]} target sample(s).`)}function d3(e,t,n){const s=[tr,vp,Ah];for(let i=0;i1)throw new j(`The model expects ${t.length} ${i} Tensors, but only received one Tensor. Found: array with shape ${JSON.stringify(e.shape)}.`);o=[e]}if(n!=null)for(let a=0;a[]);let n;if(typeof e=="string"||typeof e=="function")n=[e];else if(Array.isArray(e)||typeof e=="object")n=e;else throw new TypeError(`Type of metrics argument not understood. Expected an string,function, Array, or Object, found: ${e}`);if(Array.isArray(n))return t.map(s=>n);{const s=[];for(const i of t){let o=n.hasOwnProperty(i)?n[i]:[];Array.isArray(o)||(o=[o]),s.push(o)}return s}}const m3="layers-model";class nr extends ki{constructor(e){super(e);this.isTraining=!1}summary(e,t,n=console.log){if(!this.built)throw new j("This model has never been called, thus its weights have not been created yet. So no summary can be displayed. Build the model first (e.g., by calling it on some test data).");Hz(this,e,t,n)}compile(e){if(e.loss==null&&(e.loss=[]),this.loss=e.loss,typeof e.optimizer=="string")this.optimizer_=Yz(e.optimizer),this.isOptimizerOwned=!0;else{if(!(e.optimizer instanceof Ji))throw new j("User-defined optimizer must be an instance of tf.Optimizer.");this.optimizer_=e.optimizer,this.isOptimizerOwned=!1}let t=[];if(!Array.isArray(e.loss)&&typeof e.loss!="string"&&typeof e.loss!="function"){e.loss=e.loss;for(const o in e.loss)if(this.outputNames.indexOf(o)===-1)throw new j(`Unknown entry in loss dictionary: "${o}". Only expected the following keys: ${this.outputNames}`);for(const o of this.outputNames)e.loss[o]==null&&console.warn(`Output "${o}" is missing from loss dictionary. We assume this was done on purpose, and we will not be expecting data to be passed to ${o} during training`),t.push(pw(e.loss[o]))}else if(Array.isArray(e.loss)){if(e.loss.length!==this.outputs.length)throw new j(`When passing an Array as loss, it should have one entry per model output. The model has ${this.outputs.length} output(s), but you passed loss=${e.loss}.`);const o=e.loss;t=o.map(a=>pw(a))}else{const o=pw(e.loss);this.outputs.forEach(a=>{t.push(o)})}this.lossFunctions=t,this.feedOutputNames=[],this.feedOutputShapes=[],this.feedLossFns=[];for(let o=0;o{for(let o=0;o1&&(this.metricsTensors.push([a,o]),this.metricsNames.push(this.outputNames[o]+"_loss"))}});const s=p3(e.metrics,this.outputNames),i=(o,a,c)=>{this.outputNames.length>1&&(a=this.outputNames[o]+"_"+a),this.metricsNames.push(a),this.metricsTensors.push([c,o])};Do("metric",()=>{for(let o=0;o{const d="";let m,y,b;for(const w of h){if(typeof w=="string"&&["accuracy","acc","crossentropy","ce"].indexOf(w)!==-1){const T=this.internalOutputShapes[o];T[T.length-1]===1||this.lossFunctions[o]===vp?["accuracy","acc"].indexOf(w)!==-1?y=mw:["crossentropy","ce"].indexOf(w)!==-1&&(y=yv):this.lossFunctions[o]===Ap?["accuracy","acc"].indexOf(w)!==-1?y=bv:["crossentropy","ce"].indexOf(w)!==-1&&(y=wv):["accuracy","acc"].indexOf(w)!==-1?y=fw:["crossentropy","ce"].indexOf(w)!==-1&&(y=gw);let A;["accuracy","acc"].indexOf(w)!==-1?A="acc":["crossentropy","ce"].indexOf(w)!==-1&&(A="ce"),b=y,m=d+A}else{const T=Vz(w);b=T,m=d+Op(w)}let L;Do(m,()=>{L=b}),i(o,m,L)}};c(a)}}),this.collectedTrainableWeights=this.trainableWeights}checkTrainableWeightsConsistency(){if(this.collectedTrainableWeights==null)return;this.trainableWeights.length!==this.collectedTrainableWeights.length&&console.warn("Discrepancy between trainableweights and collected trainable weights. Did you set `model.trainable` without calling `model.compile()` afterwards?")}evaluate(e,t,n={}){const s=n.batchSize==null?32:n.batchSize;Lw(s);const i=!0,o=this.standardizeUserDataXY(e,t,i,s);try{const a=o[0].concat(o[1]);this.makeTestFunction();const c=this.testFunction,h=this.testLoop(c,a,s,n.verbose,n.steps);return jn(h)}finally{_o(o[0],e),_o(o[1],t)}}async evaluateDataset(e,t){return this.makeTestFunction(),a3(this,e,t)}checkNumSamples(e,t,n,s="steps"){let i;if(n!=null){if(i=null,t!=null)throw new j(`If ${s} is set, batchSize must be null or undefined.Got batchSize = ${t}`)}else if(e!=null)Array.isArray(e)?i=e[0].shape[0]:i=e.shape[0];else throw new j(`Either the input data should have a defined shape, or ${s} shoud be specified.`);return i}execute(e,t){if(Array.isArray(t)&&t.length===0)throw new j("`outputs` is an empty Array, which is not allowed.");const n=Array.isArray(t),s=n?t:[t],i=this.retrieveSymbolicTensors(s),o=new Fo;if(e instanceof Q&&(e=[e]),Array.isArray(e)){if(e.length!==this.inputs.length)throw new j(`The number of inputs provided (${e.length}) does not match the number of inputs of this model (${this.inputs.length}).`);for(let c=0;ca.name);for(let a=0;a0){const s=[];throw t.forEach((i,o)=>{i==null&&s.push(e[o])}),new j(`Cannot find SymbolicTensors for output name(s): ${JSON.stringify(s)}`)}return t}predictLoop(e,t=32,n=!1){return ee(()=>{const s=this.checkNumSamples(e);if(n)throw new ze("Verbose predictLoop() is not implemented yet.");const i=Iw(s,t),o=this.outputs.map(a=>[]);for(let a=0;a{const h=i[a][0],d=i[a][1],m=Ch(e,h,d),y=[];if(Array.isArray(m))for(let w=0;wo[d].push(h))}return jn(o.map(a=>Mt(a,0)))})}predict(e,t={}){const n=Ev(e);Fv(n,this.inputNames,this.feedInputShapes,!1);try{const s=t.batchSize==null?32:t.batchSize;return Lw(s),this.predictLoop(n,s)}finally{_o(n,e)}}predictOnBatch(e){Fv(e,this.inputNames,this.feedInputShapes,!0);const t=(Array.isArray(e)?e[0]:e).shape[0];return this.predictLoop(e,t)}standardizeUserDataXY(e,t,n=!0,s){if(this.optimizer_==null)throw new ni("You must compile a model before training/testing. Use LayersModel.compile(modelCompileArgs).");const i=[];for(let o=0;o0&&e[0].shape[0]%s!==0)throw new j(`In a stateful network, you should only pass inputs with a number of samples that is divisible by the batch size ${s}. Found: ${e[0].shape[0]} sample(s).`);return[e,t]}async standardizeUserData(e,t,n,s,i=!0,o){const[a,c]=this.standardizeUserDataXY(e,t,i,o);if(n!=null)throw new Error("sample weight is not supported yet.");let h=null;if(s!=null){const d=vv(s,this.outputNames);h=[];for(let m=0;m{const o=this.checkNumSamples(t,n,i,"steps"),a=[];if(s>0)throw new ze("Verbose mode is not implemented yet.");if(i!=null)throw new ze("steps mode in testLoop() is not implemented yet");{const c=Iw(o,n),h=ns(si(0,o));for(let d=0;d1){const o=zA(e.slice(0,n),s);i+=`_${o}`}t.push(i)}return t}makeTrainFunction(){return e=>{const t=[],n=e.slice(0,this.inputs.length),s=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),i=e.slice(this.inputs.length+this.outputs.length,this.inputs.length+this.outputs.length*2),o=[],a=()=>{const m=[];for(let L=0;L1&&L{w=be(w,L)}),w},c=this.collectedTrainableWeights.map(m=>m.read()),h=!0,d=this.optimizer_.minimize(a,h,c);return[d].concat(o)}}makeTestFunction(){this.testFunction=e=>ee(()=>{const t=[];let n;const s=e.slice(0,this.inputs.length),i=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),o=[];for(let h=0;her(t))}else{const t=Object.keys(this.loss);e={};const n=this.loss;for(const s of t)if(typeof n[s]=="string")e[s]=er(n[s]);else throw new Error("Serialization of non-string loss is not supported.")}return e}getMetricIdentifiers(){if(typeof this.metrics=="string"||typeof this.metrics=="function")return[er(Op(this.metrics))];if(Array.isArray(this.metrics))return this.metrics.map(e=>er(Op(e)));{const e={};for(const t in this.metrics)e[t]=er(Op(this.metrics[t]));return e}}getTrainingConfig(){return{loss:this.getLossIdentifiers(),metrics:this.getMetricIdentifiers(),optimizer_config:{class_name:this.optimizer.getClassName(),config:this.optimizer.getConfig()}}}loadTrainingConfig(e){if(e.weighted_metrics!=null)throw new Error("Loading weight_metrics is not supported yet.");if(e.loss_weights!=null)throw new Error("Loading loss_weights is not supported yet.");if(e.sample_weight_mode!=null)throw new Error("Loading sample_weight_mode is not supported yet.");const t=vh(e.optimizer_config),n=oi(t);let s;if(typeof e.loss=="string")s=Eo(e.loss);else if(Array.isArray(e.loss))s=e.loss.map(o=>Eo(o));else if(e.loss!=null){s={};for(const o in e.loss)s[o]=Eo(e.loss[o])}let i;if(Array.isArray(e.metrics))i=e.metrics.map(o=>Eo(o));else if(e.metrics!=null){i={};for(const o in e.metrics)i[o]=Eo(e.metrics[o])}this.compile({loss:s,metrics:i,optimizer:n})}async save(e,t){if(typeof e=="string"){const h=wy(e);if(h.length===0)throw new j(`Cannot find any save handlers for URL '${e}'`);if(h.length>1)throw new j(`Found more than one (${h.length}) save handlers for URL '${e}'`);e=h[0]}if(e.save==null)throw new j("LayersModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");const n=await yy(this.getNamedWeights(t)),s=!1,i=null,o=this.toJSON(i,s),a={modelTopology:o,format:m3,generatedBy:`TensorFlow.js tfjs-layers v${Dp}`,convertedBy:null},c=t==null?!1:t.includeOptimizer;if(c&&this.optimizer!=null){a.trainingConfig=this.getTrainingConfig();const h="optimizer",{data:d,specs:m}=await yy(await this.optimizer.getWeights(),h);n.specs.push(...m),n.data=dd([n.data,d])}if(this.userDefinedMetadata!=null){const h=!0;Sv(this.userDefinedMetadata,this.name,h),a.userDefinedMetadata=this.userDefinedMetadata}return a.weightData=n.data,a.weightSpecs=n.specs,e.save(a)}setUserDefinedMetadata(e){Sv(e,this.name),this.userDefinedMetadata=e}getUserDefinedMetadata(){return this.userDefinedMetadata}}nr.className="Model",me(nr);class _v extends nr{}_v.className="Functional",me(_v);async function f3(e,t){"modelTopology"in e||(e={modelTopology:e}),e=e;let n=e.modelTopology;n.model_config!=null&&(n=n.model_config);const s=vh(n),i=oi(s,t);if(e.weightsManifest!=null){const o=await aT(e.weightsManifest,e.pathPrefix,i.weights.map(c=>c.originalName)),a={};for(const c of i.weights)a[c.originalName]=o[c.originalName];i.loadWeights(a),He(o)}return i}async function g3(e,t){if(t==null&&(t={}),typeof e=="string"){const n=Ly(e,t);if(n.length===0)n.push(fd(e,t));else if(n.length>1)throw new j(`Found more than one (${n.length}) load handlers for URL '${e}'`);e=n[0]}return y3(e,void 0,t)}async function y3(e,t,n){if(n==null&&(n={}),e.load==null)throw new j("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const s=await e.load();let i=s.modelTopology;i.model_config!=null&&(i=i.model_config);const o=n.strict==null?!0:n.strict,a=s.weightData!=null&&s.weightSpecs!=null&&o,c=oi(vh(i),t,a),h=s.trainingConfig;if(h!=null&&c.loadTrainingConfig(h),s.userDefinedMetadata!=null&&c.setUserDefinedMetadata(s.userDefinedMetadata),s.weightData!=null){if(s.weightSpecs==null)throw new j("LayersModel artifacts contains weight data, but not weight specs. Therefore loading of weights cannot proceed.");const{modelWeights:d,optimizerWeights:m}=b3(s.weightData,s.weightSpecs);c.loadWeights(d,o),c.optimizer!=null&&m.length>0&&await c.optimizer.setWeights(m),He(d),He(m.map(y=>y.tensor))}return c}function b3(e,t){const n=ud(e,t),s={},i=[];return t.forEach(o=>{o.group==="optimizer"?i.push({name:o.name,tensor:n[o.name]}):s[o.name]=n[o.name]}),{modelWeights:s,optimizerWeights:i}}class ja extends nr{constructor(e){super({inputs:[],outputs:[]});if(e=e||{},this.trainable=!0,this.built=!1,this.name=e.name!=null?e.name:Lp("sequential_"),e.layers!=null)for(const t of e.layers)this.add(t)}checkShape(e){const t=e.inboundNodes[0].outputTensors[0].shape;if(t.some(n=>n<0))throw new j(`Negative dimension size caused by adding layer ${e.name} with input shape [${e.inboundNodes[0].inputTensors[0].shape}]`)}add(e){const t=e instanceof ja||e instanceof nr;let n;if(t){if(n=e,n.outputs.length!==1)throw new j("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");if(n.inputs.length!==1)throw new j("All layers in a Sequential model should have a single input tensor. For multi-input layers, use the functional API.")}if(this.outputs.length===0){if(e.inboundNodes.length===0){if(e.batchInputShape==null)throw new j("The first layer in a Sequential model must get an `inputShape` or `batchInputShape` argument.");const s=av({batchShape:e.batchInputShape,dtype:e.dtype,name:e.name+"_input"});e.apply(s)}if(t)this.outputs=n.outputs,this.inputs=n.inputs;else{if(e.inboundNodes.length!==1)throw new j(`A layer added to a Sequential model must not already be connected somewhere else. LayersModel received layer ${e.name} which has ${e.inboundNodes.length} pre-existing inbound connections.`);if(e.inboundNodes[0].outputTensors.length!==1)throw new j("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[e.inboundNodes[0].outputTensors[0]],this.inputs=ov(this.outputs[0])}this.inboundNodes=[],new xp({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:Oo(null,this.inputs.length),outputMasks:[null],inputShapes:this.inputs.map(s=>s.shape),outputShapes:this.outputs[0].shape})}else{const s=e.apply(this.outputs[0]);if(Array.isArray(s))throw new TypeError("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[s],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}this.layers.push(e),this.built=!1}pop(){if(this.layers.length===0)throw new TypeError("There are no layers in the model.");if(this.layers.pop(),this.layers.length===0)this.outputs=[],this.inboundNodes=[],this.outboundNodes=[];else{const e=this.layers.length-1;this.layers[e].outboundNodes=[],this.outputs=[this.layers[e].output],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}}call(e,t){return this.model==null&&this.build(),this.model.call(e,t)}build(e){if(St(e),this.inputs.length===0||this.outputs.length===0)throw new TypeError("Sequential model cannot be built: model is empty. Add some layers first.");this.model=new nr({inputs:this.inputs,outputs:this.outputs[0],name:this.name+"_model"}),this.model.trainable=this.trainable,this.supportsMasking=this.model.supportsMasking,this.inputLayers=this.model.inputLayers,this.inputLayersNodeIndices=this.model.inputLayersNodeIndices,this.inputLayersTensorIndices=this.model.inputLayersTensorIndices,this.outputLayers=this.model.outputLayers,this.outputLayersNodeIndices=this.model.outputLayersNodeIndices,this.outputLayersTensorIndices=this.model.outputLayersTensorIndices,this.nodesByDepth=this.model.nodesByDepth,this.containerNodes=this.model.containerNodes,this.outputNames=this.model.outputNames,this.inputNames=this.model.inputNames,this.built=!0}countParams(){return this.built||this.build(),super.countParams()}summary(e,t,n=console.log){this.built||this.build(),super.summary(e,t,n)}setWeights(e){this.model==null&&this.build(),this.model.setWeights(e)}evaluate(e,t,n={}){if(!this.built)throw new ni("The model needs to be compiled before being used.");return this.model.evaluate(e,t,n)}async evaluateDataset(e,t){if(!this.built)throw new ni("The model needs to be compiled before being used.");return this.model.evaluateDataset(e,t)}predict(e,t={}){return this.model==null&&this.build(),this.model.predict(e,t)}predictOnBatch(e){return this.model==null&&this.build(),this.model.predictOnBatch(e)}compile(e){this.build(),this.model.compile(e),this.optimizer_=this.model.optimizer,this.isOptimizerOwned=this.model.isOptimizerOwned,this.loss=this.model.loss,this.metrics=this.model.metrics,this.metricsTensors=this.model.metricsTensors,this.metricsNames=this.model.metricsNames}get optimizer(){return this.model==null?void 0:this.model.optimizer}set optimizer(e){this.model.optimizer=e}async fit(e,t,n={}){if(!this.built)throw new ni("The model needs to be compiled before being used.");return this.model.fit(e,t,n)}async fitDataset(e,t){if(!this.built)throw new ni("The model needs to be compiled before being used.");return this.model.fitDataset(e,t)}async trainOnBatch(e,t){return this.model.trainOnBatch(e,t)}static fromConfig(e,t,n={},s=!1){let i,o={};if(t instanceof Array){if(!(t[0].className!=null)||t[0].className==="Merge")throw new j("Legacy serialization format not supported yet.");i=t}else k(t.layers!=null,()=>"When the config data for a Sequential model is not an Array, it must be an Object that contains the 'layers' field."),i=t.layers,delete t.layers,o=t;const a=new e(o);if(!(a instanceof ja))throw new ze(`Sequential.fromConfig called on non-Sequential input: ${a}`);for(const c of i){const h=void 0,d=oi(c,h,s);s&&d.setFastWeightInitDuringBuild(!0),a.add(d)}return a}set stopTraining(e){if(this.model==null)throw new j("Cannot set the stopTraining property of a sequential model before it is compiled.");this.model.stopTraining=e}get stopTraining(){if(this.model==null)throw new j("Cannot get the stopTraining property of a sequential model before it is compiled.");return this.model.stopTraining}getConfig(){const e=[];for(const t of this.layers){const n={};n.className=t.getClassName(),n.config=t.getConfig(),e.push(n)}return{name:this.name,layers:e}}}ja.className="Sequential",me(ja);function w3(e){return new nr(e)}function L3(e){return new ja(e)}function S3(e,t){return t==null&&(t={}),g3(e,t)}function Wv(e){return av(e)}function I3(e,t){Bs.registerCallbackConstructor(e,t)}class rs extends go{getConfig(){return{}}}class $v extends rs{apply(e,t=1){return XP(e,t)}}$v.className="elu",me($v);class Uv extends rs{apply(e){return Pd(e)}}Uv.className="selu",me(Uv);class Bv extends rs{apply(e){return Ri(e)}}Bv.className="relu",me(Bv);class Mv extends rs{apply(e){return ee(()=>Io(6,Ri(e)))}}Mv.className="relu6",me(Mv);class Pv extends rs{apply(e){return e}}Pv.className="linear",me(Pv);class zv extends rs{apply(e){return vi(e)}}zv.className="sigmoid",me(zv);class Gv extends rs{apply(e){return ZP(e)}}Gv.className="hardSigmoid",me(Gv);class Vv extends rs{apply(e){return Da(e)}}Vv.className="softplus",me(Vv);class Yv extends rs{apply(e){return JP(e)}}Yv.className="softsign",me(Yv);class Hv extends rs{apply(e){return Ca(e)}}Hv.className="tanh",me(Hv);class Tw extends rs{apply(e,t=-1){return No(e,t)}}Tw.className="softmax",me(Tw);class qv extends rs{apply(e,t=-1){return _d(e,t)}}qv.className="logSoftmax",me(qv);class jv extends rs{apply(e,t=1){return ee(()=>vi(e.mul(t)).mul(e))}}jv.className="swish",me(jv);function zr(e){return e.getClassName()}function Aw(e,t={}){return bh(e,ks.getMap().classNameMap,t,"activation")}function Gr(e){if(e==null){const t={};return t.className="linear",t.config={},Aw(t)}if(typeof e=="string"){const t={};return t.className=e,t.config={},Aw(t)}else return e instanceof rs?e:Aw(e)}function vw(e){if(e!=null&&typeof e!="object")throw new Error(`Argument to L1L2 regularizer's constructor is expected to be an object, but received: ${e}`)}class Kv extends go{}class Rh extends Kv{constructor(e){super();vw(e),this.l1=e==null||e.l1==null?.01:e.l1,this.l2=e==null||e.l2==null?.01:e.l2,this.hasL1=this.l1!==0,this.hasL2=this.l2!==0}apply(e){return ee(()=>{let t=ct([1]);return this.hasL1&&(t=be(t,Ue(X(this.l1,sn(e))))),this.hasL2&&(t=be(t,Ue(X(this.l2,xh(e))))),t.asScalar()})}getConfig(){return{l1:this.l1,l2:this.l2}}static fromConfig(e,t){return new e({l1:t.l1,l2:t.l2})}}Rh.className="L1L2",me(Rh);function x3(e){return vw(e),new Rh({l1:e!=null?e.l1:null,l2:0})}function T3(e){return vw(e),new Rh({l2:e!=null?e.l2:null,l1:0})}const Xv={l1l2:"L1L2"};function It(e){return Gb(e)}function Jv(e,t={}){return bh(e,ks.getMap().classNameMap,t,"regularizer")}function _t(e){if(e==null)return null;if(typeof e=="string"){const t=e in Xv?Xv[e]:e,n={className:t,config:{}};return Jv(n)}else return e instanceof Kv?e:Jv(e)}class Nw extends lt{constructor(e){super(e==null?{}:e);this.supportsMasking=!0,e!=null&&(this.maxValue=e.maxValue)}call(e,t){e=je(e);let n=Ri(e);return this.maxValue!=null&&(n=Yn(n,0,this.maxValue)),n}computeOutputShape(e){return e}getConfig(){const e={maxValue:this.maxValue},t=super.getConfig();return Object.assign(e,t),e}}Nw.className="ReLU",me(Nw);class Cw extends lt{constructor(e){super(e==null?{}:e);this.DEFAULT_ALPHA=.3,e==null&&(e={}),this.alpha=e.alpha==null?this.DEFAULT_ALPHA:e.alpha}call(e,t){const n=je(e);return Dd(n,this.alpha)}computeOutputShape(e){return e}getConfig(){const e={alpha:this.alpha},t=super.getConfig();return Object.assign(e,t),e}}Cw.className="LeakyReLU",me(Cw);class Rw extends lt{constructor(e){super(e==null?{}:e);if(this.DEFAULT_ALPHA_INITIALIZER="zeros",e==null&&(e={}),this.supportsMasking=!0,this.alphaInitializer=Ft(e.alphaInitializer||this.DEFAULT_ALPHA_INITIALIZER),this.alphaRegularizer=_t(e.alphaRegularizer),this.alphaConstraint=ln(e.alphaConstraint),e.sharedAxes==null)this.sharedAxes=null;else if(Array.isArray(e.sharedAxes))this.sharedAxes=e.sharedAxes;else if(typeof e.sharedAxes=="number")this.sharedAxes=[e.sharedAxes];else throw new j(`Expected sharedAxes to be a number or an array of numbers, but got ${e.sharedAxes}`)}build(e){e=St(e);const t=e.slice(1);if(this.sharedAxes!=null)for(const s of this.sharedAxes)t[s-1]=1;this.alpha=this.addWeight("alpha",t,"float32",this.alphaInitializer,this.alphaRegularizer,!0,this.alphaConstraint);const n={};if(this.sharedAxes!=null)for(let s=1;s(Gt(t),t==="channelsFirst"?Me(e,[0,2,3,1]):e))}function Zv(e,t){return ee(()=>(Gt(t),t==="channelsFirst"?Me(e,[0,2,3,4,1]):e))}function Qv(e,t,n,s=1,i="valid",o,a=1){return ee(()=>{if(o==null&&(o=ti()),Gt(o),e.shape.length!==3)throw new j(`The input of a conv1dWithBias operation should be 3, but is ${e.shape.length} instead.`);if(t.shape.length!==3)throw new j(`The kernel for a conv1dWithBias operation should be 3, but is ${t.shape.length} instead`);if(n!=null&&n.shape.length!==1)throw new j(`The bias for a conv1dWithBias operation should be 1, but is ${t.shape.length} instead`);if(o==="channelsFirst"&&(e=Me(e,[0,2,1])),i==="causal")throw new ze("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");let c=Nd(e,t,s,i==="same"?"same":"valid","NWC",a);return n!=null&&(c=Di(c,n)),c})}function DQ(e,t,n=1,s="valid",i,o=1){return ee(()=>(Gt(i),Qv(e,t,null,n,s,i,o)))}function kQ(e,t,n=[1,1],s="valid",i,o){return ee(()=>(Gt(i),Fw(e,t,null,n,s,i,o)))}function Fw(e,t,n,s=[1,1],i="valid",o,a,c=null){return ee(()=>{if(o==null&&(o=ti()),Gt(o),e.rank!==3&&e.rank!==4)throw new j(`conv2dWithBiasActivation expects input to be of rank 3 or 4, but received ${e.rank}.`);if(t.rank!==3&&t.rank!==4)throw new j(`conv2dWithBiasActivation expects kernel to be of rank 3 or 4, but received ${e.rank}.`);let h=kw(e,o);if(i==="causal")throw new ze("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");return h=vb({x:h,filter:t,strides:s,pad:i==="same"?"same":"valid",dilations:a,dataFormat:"NHWC",bias:n,activation:c}),o==="channelsFirst"&&(h=Me(h,[0,3,1,2])),h})}function FQ(e,t,n=[1,1,1],s="valid",i,o){return ee(()=>(Gt(i),eN(e,t,null,n,s,i,o)))}function eN(e,t,n,s=[1,1,1],i="valid",o,a){return ee(()=>{if(o==null&&(o=ti()),Gt(o),e.rank!==4&&e.rank!==5)throw new j(`conv3dWithBias expects input to be of rank 4 or 5, but received ${e.rank}.`);if(t.rank!==4&&t.rank!==5)throw new j(`conv3dWithBias expects kernel to be of rank 4 or 5, but received ${e.rank}.`);let c=Zv(e,o);if(i==="causal")throw new ze("The support for CAUSAL padding mode in conv3dWithBias is not implemented yet.");return c=Zy(c,t,s,i==="same"?"same":"valid","NDHWC",a),n!=null&&(c=Di(c,n)),o==="channelsFirst"&&(c=Me(c,[0,4,1,2,3])),c})}class _w extends lt{constructor(e,t){super(t);if(this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",_w.verifyArgs(t),this.rank=e,pn(this.rank,"rank"),this.rank!==1&&this.rank!==2&&this.rank!==3)throw new ze(`Convolution layer for rank other than 1, 2, or 3 (${this.rank}) is not implemented yet.`);if(this.kernelSize=Ka(t.kernelSize,e,"kernelSize"),this.strides=Ka(t.strides==null?1:t.strides,e,"strides"),this.padding=t.padding==null?"valid":t.padding,Ts(this.padding),this.dataFormat=t.dataFormat==null?"channelsLast":t.dataFormat,Gt(this.dataFormat),this.activation=Gr(t.activation),this.useBias=t.useBias==null?!0:t.useBias,this.biasInitializer=Ft(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.biasConstraint=ln(t.biasConstraint),this.biasRegularizer=_t(t.biasRegularizer),this.activityRegularizer=_t(t.activityRegularizer),this.dilationRate=Ka(t.dilationRate==null?1:t.dilationRate,e,"dilationRate"),this.rank===1&&Array.isArray(this.dilationRate)&&this.dilationRate.length!==1)throw new j(`dilationRate must be a number or an array of a single number for 1D convolution, but received ${JSON.stringify(this.dilationRate)}`);if(this.rank===2){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==2)throw new j(`dilationRate must be a number or array of two numbers for 2D convolution, but received ${JSON.stringify(this.dilationRate)}`)}else if(this.rank===3){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==3)throw new j(`dilationRate must be a number or array of three numbers for 3D convolution, but received ${JSON.stringify(this.dilationRate)}`)}}static verifyArgs(e){if(xs("kernelSize"in e,"required key 'kernelSize' not in config"),typeof e.kernelSize!="number"&&!Yb(e.kernelSize,"number",1,3))throw new j(`BaseConv expects config.kernelSize to be number or number[] with length 1, 2, or 3, but received ${JSON.stringify(e.kernelSize)}.`)}getConfig(){const e={kernelSize:this.kernelSize,strides:this.strides,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,activation:zr(this.activation),useBias:this.useBias,biasInitializer:Vt(this.biasInitializer),biasRegularizer:It(this.biasRegularizer),activityRegularizer:It(this.activityRegularizer),biasConstraint:cn(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}class Oh extends _w{constructor(e,t){super(e,t);this.kernel=null,Oh.verifyArgs(t),this.filters=t.filters,pn(this.filters,"filters"),this.kernelInitializer=Ft(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.kernelConstraint=ln(t.kernelConstraint),this.kernelRegularizer=_t(t.kernelRegularizer)}build(e){e=St(e);const t=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[t]==null)throw new j(`The channel dimension of the input should be defined. Found ${e[t]}`);const n=e[t],s=this.kernelSize.concat([n,this.filters]);this.kernel=this.addWeight("kernel",s,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[{ndim:this.rank+2,axes:{[t]:n}}],this.built=!0}call(e,t){return ee(()=>{e=je(e);let n;const s=this.bias==null?null:this.bias.read(),i=VA(this.activation.getClassName());if(i!=null&&this.rank===2)n=Fw(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate,i);else{if(this.rank===1)n=Qv(e,this.kernel.read(),s,this.strides[0],this.padding,this.dataFormat,this.dilationRate[0]);else if(this.rank===2)n=Fw(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate);else if(this.rank===3)n=eN(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate);else throw new ze("convolutions greater than 3D are not implemented yet.");this.activation!=null&&(n=this.activation.apply(n))}return n})}computeOutputShape(e){e=St(e);const t=[],n=this.dataFormat==="channelsLast"?e.slice(1,e.length-1):e.slice(2);for(let i=0;i 0 but got ${JSON.stringify(e.filters)}`)}}class Eh extends Oh{constructor(e){super(2,e);Eh.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!Yb(e.kernelSize,"number",1,2))throw new j(`Conv2D expects config.kernelSize to be number or number[] with length 1 or 2, but received ${JSON.stringify(e.kernelSize)}.`)}}Eh.className="Conv2D",me(Eh);class Fp extends Oh{constructor(e){super(3,e);Fp.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!(Array.isArray(e.kernelSize)&&(e.kernelSize.length===1||e.kernelSize.length===3)))throw new j(`Conv3D expects config.kernelSize to be number or [number, number, number], but received ${JSON.stringify(e.kernelSize)}.`)}}Fp.className="Conv3D",me(Fp);class Ww extends Eh{constructor(e){super(e);if(this.inputSpec=[new mn({ndim:4})],this.padding!=="same"&&this.padding!=="valid")throw new j(`Conv2DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`)}build(e){if(e=St(e),e.length!==4)throw new j("Input should have rank 4; Received input shape: "+JSON.stringify(e));const t=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[t]==null)throw new j("The channel dimension of the inputs should be defined. Found `None`.");const n=e[t],s=this.kernelSize.concat([this.filters,n]);this.kernel=this.addWeight("kernel",s,"float32",this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[new mn({ndim:4,axes:{[t]:n}})],this.built=!0}call(e,t){return ee(()=>{let n=je(e);if(n.shape.length!==4)throw new j(`Conv2DTranspose.call() expects input tensor to be rank-4, but received a tensor of rank-${n.shape.length}`);const s=n.shape,i=s[0];let o,a;this.dataFormat==="channelsFirst"?(o=2,a=3):(o=1,a=2);const c=s[o],h=s[a],d=this.kernelSize[0],m=this.kernelSize[1],y=this.strides[0],b=this.strides[1],w=kp(c,y,d,this.padding),L=kp(h,b,m,this.padding),T=[i,w,L,this.filters];this.dataFormat!=="channelsLast"&&(n=Me(n,[0,2,3,1]));let A=Cd(n,this.kernel.read(),T,this.strides,this.padding);return this.dataFormat!=="channelsLast"&&(A=Me(A,[0,3,1,2])),this.bias!=null&&(A=Di(A,this.bias.read(),this.dataFormat)),this.activation!=null&&(A=this.activation.apply(A)),A})}computeOutputShape(e){e=St(e);const t=e.slice();let n,s,i;this.dataFormat==="channelsFirst"?(n=1,s=2,i=3):(n=3,s=1,i=2);const o=this.kernelSize[0],a=this.kernelSize[1],c=this.strides[0],h=this.strides[1];return t[n]=this.filters,t[s]=kp(t[s],c,o,this.padding),t[i]=kp(t[i],h,a,this.padding),t}getConfig(){const e=super.getConfig();return delete e.dilationRate,e}}Ww.className="Conv2DTranspose",me(Ww);class tN extends Oh{constructor(e,t){super(e,t);if(this.DEFAULT_DEPTHWISE_INITIALIZER="glorotUniform",this.DEFAULT_POINTWISE_INITIALIZER="glorotUniform",this.depthwiseKernel=null,this.pointwiseKernel=null,t.filters==null)throw new j("The `filters` configuration field is required by SeparableConv, but is unspecified.");if(t.kernelInitializer!=null||t.kernelRegularizer!=null||t.kernelConstraint!=null)throw new j("Fields kernelInitializer, kernelRegularizer and kernelConstraint are invalid for SeparableConv2D. Use depthwiseInitializer, depthwiseRegularizer, depthwiseConstraint, pointwiseInitializer, pointwiseRegularizer and pointwiseConstraint instead.");if(t.padding!=null&&t.padding!=="same"&&t.padding!=="valid")throw new j(`SeparableConv${this.rank}D supports only padding modes: 'same' and 'valid', but received ${JSON.stringify(t.padding)}`);this.depthMultiplier=t.depthMultiplier==null?1:t.depthMultiplier,this.depthwiseInitializer=Ft(t.depthwiseInitializer||this.DEFAULT_DEPTHWISE_INITIALIZER),this.depthwiseRegularizer=_t(t.depthwiseRegularizer),this.depthwiseConstraint=ln(t.depthwiseConstraint),this.pointwiseInitializer=Ft(t.depthwiseInitializer||this.DEFAULT_POINTWISE_INITIALIZER),this.pointwiseRegularizer=_t(t.pointwiseRegularizer),this.pointwiseConstraint=ln(t.pointwiseConstraint)}build(e){if(e=St(e),e.length{e=je(e);let n;if(this.rank===1)throw new ze("1D separable convolution is not implemented yet.");return this.rank===2&&(this.dataFormat==="channelsFirst"&&(e=Me(e,[0,2,3,1])),n=fb(e,this.depthwiseKernel.read(),this.pointwiseKernel.read(),this.strides,this.padding,this.dilationRate,"NHWC")),this.useBias&&(n=Di(n,this.bias.read(),this.dataFormat)),this.activation!=null&&(n=this.activation.apply(n)),this.dataFormat==="channelsFirst"&&(n=Me(n,[0,3,1,2])),n})}getConfig(){const e=super.getConfig();return delete e.rank,delete e.kernelInitializer,delete e.kernelRegularizer,delete e.kernelConstraint,e.depthwiseInitializer=Vt(this.depthwiseInitializer),e.pointwiseInitializer=Vt(this.pointwiseInitializer),e.depthwiseRegularizer=It(this.depthwiseRegularizer),e.pointwiseRegularizer=It(this.pointwiseRegularizer),e.depthwiseConstraint=cn(this.depthwiseConstraint),e.pointwiseConstraint=cn(this.pointwiseConstraint),e}}tN.className="SeparableConv";class $w extends tN{constructor(e){super(2,e)}}$w.className="SeparableConv2D",me($w);class _p extends Oh{constructor(e){super(1,e);_p.verifyArgs(e),this.inputSpec=[{ndim:3}]}getConfig(){const e=super.getConfig();return delete e.rank,delete e.dataFormat,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!Yb(e.kernelSize,"number",1,1))throw new j(`Conv1D expects config.kernelSize to be number or number[] with length 1, but received ${JSON.stringify(e.kernelSize)}.`)}}_p.className="Conv1D",me(_p);class Uw extends lt{constructor(e){super(e);typeof e.cropping=="number"?this.cropping=[[e.cropping,e.cropping],[e.cropping,e.cropping]]:typeof e.cropping[0]=="number"?this.cropping=[[e.cropping[0],e.cropping[0]],[e.cropping[1],e.cropping[1]]]:this.cropping=e.cropping,this.dataFormat=e.dataFormat===void 0?"channelsLast":e.dataFormat,this.inputSpec=[{ndim:4}]}computeOutputShape(e){return this.dataFormat==="channelsFirst"?[e[0],e[1],e[2]-this.cropping[0][0]-this.cropping[0][1],e[3]-this.cropping[1][0]-this.cropping[1][1]]:[e[0],e[1]-this.cropping[0][0]-this.cropping[0][1],e[2]-this.cropping[1][0]-this.cropping[1][1],e[3]]}call(e,t){return ee(()=>{if(e=je(e),this.dataFormat==="channelsLast"){const n=hp(e,this.cropping[0][0],e.shape[1]-this.cropping[0][0]-this.cropping[0][1],2);return hp(n,this.cropping[1][0],e.shape[2]-this.cropping[1][1]-this.cropping[1][0],3)}else{const n=hp(e,this.cropping[0][0],e.shape[2]-this.cropping[0][0]-this.cropping[0][1],3);return hp(n,this.cropping[1][0],e.shape[3]-this.cropping[1][1]-this.cropping[1][0],4)}})}getConfig(){const e={cropping:this.cropping,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}Uw.className="Cropping2D",me(Uw);class Bw extends lt{constructor(e){super(e);this.DEFAULT_SIZE=[2,2],this.inputSpec=[{ndim:4}],this.size=e.size==null?this.DEFAULT_SIZE:e.size,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat}computeOutputShape(e){if(this.dataFormat==="channelsFirst"){const t=e[2]==null?null:this.size[0]*e[2],n=e[3]==null?null:this.size[1]*e[3];return[e[0],e[1],t,n]}else{const t=e[1]==null?null:this.size[0]*e[1],n=e[2]==null?null:this.size[1]*e[2];return[e[0],t,n,e[3]]}}call(e,t){return ee(()=>{let n=je(e);const s=n.shape;if(this.dataFormat==="channelsFirst"){n=Me(n,[0,2,3,1]);const i=this.size[0]*s[2],o=this.size[1]*s[3],a=n.resizeNearestNeighbor([i,o]);return Me(a,[0,3,1,2])}else{const i=this.size[0]*s[1],o=this.size[1]*s[2];return n.resizeNearestNeighbor([i,o])}})}getConfig(){const e={size:this.size,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}Bw.className="UpSampling2D",me(Bw);function A3(e,t,n=[1,1],s="valid",i,o){return ee(()=>{i==null&&(i=ti()),Gt(i);let a=kw(e,i);if(e.rank!==4)throw new j(`Input for depthwiseConv2d is required to be 4-D, but is instead ${e.rank}-D`);if(t.rank!==4)throw new j(`depthwiseKernel is required to be 4-D, but is instead ${t.rank}-D`);return a=wo(a,t,n,s==="same"?"same":"valid","NHWC",o),i==="channelsFirst"&&(a=Me(a,[0,3,1,2])),a})}class Mw extends _w{constructor(e){super(2,e);this.depthwiseKernel=null,this.depthMultiplier=e.depthMultiplier==null?1:e.depthMultiplier,this.depthwiseInitializer=Ft(e.depthwiseInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.depthwiseConstraint=ln(e.depthwiseConstraint),this.depthwiseRegularizer=_t(e.depthwiseRegularizer)}build(e){if(e=St(e),e.length<4)throw new j(`Inputs to DepthwiseConv2D should have rank 4. Received input shape: ${JSON.stringify(e)}.`);const t=this.dataFormat==="channelsFirst"?1:3;if(e[t]==null||e[t]<0)throw new j(`The channel dimension of the inputs to DepthwiseConv2D should be defined, but is not (${e[t]}).`);const n=e[t],s=[this.kernelSize[0],this.kernelSize[1],n,this.depthMultiplier];this.depthwiseKernel=this.addWeight("depthwise_kernel",s,null,this.depthwiseInitializer,this.depthwiseRegularizer,!0,this.depthwiseConstraint),this.useBias?this.bias=this.addWeight("bias",[n*this.depthMultiplier],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return ee(()=>{e=je(e);let n=A3(e,this.depthwiseKernel.read(),this.strides,this.padding,this.dataFormat,null);return this.useBias&&(n=Di(n,this.bias.read(),this.dataFormat)),this.activation!=null&&(n=this.activation.apply(n)),n})}computeOutputShape(e){e=St(e);const t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2],s=this.dataFormat==="channelsFirst"?e[1]*this.depthMultiplier:e[3]*this.depthMultiplier,i=ai(t,this.kernelSize[0],this.padding,this.strides[0]),o=ai(n,this.kernelSize[1],this.padding,this.strides[1]);return this.dataFormat==="channelsFirst"?[e[0],s,i,o]:[e[0],i,o,s]}getConfig(){const e=super.getConfig();return e.depthMultiplier=this.depthMultiplier,e.depthwiseInitializer=Vt(this.depthwiseInitializer),e.depthwiseRegularizer=It(this.depthwiseRegularizer),e.depthwiseConstraint=cn(this.depthwiseRegularizer),e}}Mw.className="DepthwiseConv2D",me(Mw);function nN(e,t,n,s){if(Array.isArray(e)){if(t!=null||n!=null)throw new j("When inputs is an array, neither initialState or constants should be provided");s!=null&&(n=e.slice(e.length-s,e.length),e=e.slice(0,e.length-s)),e.length>1&&(t=e.slice(1,e.length)),e=e[0]}function i(o){return o==null||Array.isArray(o)?o:[o]}return t=i(t),n=i(n),{inputs:e,initialState:t,constants:n}}function sN(e,t,n,s=!1,i,o,a=!1,c=!1){return ee(()=>{const h=t.shape.length;if(h<3)throw new j(`Input should be at least 3D, but is ${h}D.`);const d=[1,0].concat(si(2,h));if(t=Me(t,d),o!=null)throw new ze("The rnn() functoin of the deeplearn.js backend does not support constants yet.");a&&console.warn("Backend rnn(): the unroll = true option is not applicable to the imperative deeplearn.js backend."),i!=null&&(i=i.asType("bool").asType("float32"),i.rank===h-1&&(i=Hn(i,-1)),i=Me(i,d)),s&&(t=Is(t,0),i!=null&&(i=Is(i,0)));const m=[];let y,b=n;const w=t.shape[0],L=Oi(t);let T;i!=null&&(T=Oi(i));for(let N=0;Ne(E,b));if(i==null)y=D[0],b=D[1];else{const F=ee(()=>{const _=T[N],B=On(_).sub(_),$=D[0].mul(_).add(b[0].mul(B)),H=b.map((q,J)=>D[1][J].mul(_).add(q.mul(B)));return{output:$,newStates:H}});y=F.output,b=F.newStates}c&&m.push(y)}let A;if(c){const N=1;A=is(m,N)}return[y,A,b]})}class Fi extends lt{constructor(e){super(e);let t;if(e.cell==null)throw new j("cell property is missing for the constructor of RNN.");if(Array.isArray(e.cell)?t=new Up({cells:e.cell}):t=e.cell,t.stateSize==null)throw new j("The RNN cell should have an attribute `stateSize` (tuple of integers, one integer per RNN state).");this.cell=t,this.returnSequences=e.returnSequences==null?!1:e.returnSequences,this.returnState=e.returnState==null?!1:e.returnState,this.goBackwards=e.goBackwards==null?!1:e.goBackwards,this._stateful=e.stateful==null?!1:e.stateful,this.unroll=e.unroll==null?!1:e.unroll,this.supportsMasking=!0,this.inputSpec=[new mn({ndim:3})],this.stateSpec=null,this.states_=null,this.numConstants=null,this.keptStates=[]}getStates(){if(this.states_==null){const e=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;return si(0,e).map(t=>null)}else return this.states_}setStates(e){this.states_=e}computeOutputShape(e){cw(e)&&(e=e[0]),e=e;let t=this.cell.stateSize;Array.isArray(t)||(t=[t]);const n=t[0];let s;if(this.returnSequences?s=[e[0],e[1],n]:s=[e[0],n],this.returnState){const i=[];for(const o of t)i.push([e[0],o]);return[s].concat(i)}else return s}computeMask(e,t){return ee(()=>{Array.isArray(t)&&(t=t[0]);const n=this.returnSequences?t:null;if(this.returnState){const s=this.states.map(i=>null);return[n].concat(s)}else return n})}get states(){if(this.states_==null){const e=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1,t=[];for(let n=0;na.shape[a.shape.length-1]),o))throw new j(`An initialState was passed that is not compatible with cell.stateSize. Received stateSpec=${this.stateSpec}; However cell.stateSize is ${this.cell.stateSize}`)}else this.stateSpec=o.map(a=>new mn({shape:[null,a]}));this.stateful&&this.resetStates()}resetStates(e,t=!1){ee(()=>{if(!this.stateful)throw new Qi("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape[0];if(n==null)throw new j("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.states_==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(s=>ct([n,s])):this.states_=[ct([n,this.cell.stateSize])];else if(e==null)He(this.states_),this.keptStates!=null&&(He(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(s=>ct([n,s])):this.states_[0]=ct([n,this.cell.stateSize]);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new j(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);t===!0?this.keptStates.push(this.states_.slice()):He(this.states_);for(let s=0;sNn(s.clone()))})}apply(e,t){let n=t==null?null:t.initialState,s=t==null?null:t.constants;t==null&&(t={});const i=nN(e,n,s,this.numConstants);e=i.inputs,n=i.initialState,s=i.constants;let o=[],a=[];if(n!=null){t.initialState=n,o=o.concat(n),this.stateSpec=[];for(const h of n)this.stateSpec.push(new mn({shape:h.shape}));a=a.concat(this.stateSpec)}s!=null&&(t.constants=s,o=o.concat(s),this.numConstants=s.length);const c=o[0]instanceof ri;if(c){const h=[e].concat(o),d=this.inputSpec.concat(a),m=this.inputSpec;this.inputSpec=d;const y=super.apply(h,t);return this.inputSpec=m,y}else return super.apply(e,t)}call(e,t){return ee(()=>{const n=t==null?null:t.mask,s=t==null?null:t.training;let i=t==null?null:t.initialState;e=je(e),i==null&&(this.stateful?i=this.states_:i=this.getInitialState(e));const o=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;if(i.length!==o)throw new j(`RNN Layer has ${o} state(s) but was passed ${i.length} initial state(s).`);this.unroll&&console.warn("Ignoring unroll = true for RNN layer, due to imperative backend.");const a={training:s},c=(w,L)=>{const T=this.cell.call([w].concat(L),a);return[T[0],T.slice(1)]},h=sN(c,e,i,this.goBackwards,n,null,this.unroll,this.returnSequences),d=h[0],m=h[1],y=h[2];this.stateful&&this.resetStates(y,s);const b=this.returnSequences?m:d;return this.returnState?[b].concat(y):b})}getInitialState(e){return ee(()=>{let t=ct(e.shape);return t=Ue(t,[1,2]),t=Ih(t),Array.isArray(this.cell.stateSize)?this.cell.stateSize.map(n=>n>1?Qb(t,[1,n]):t):this.cell.stateSize>1?[Qb(t,[1,this.cell.stateSize])]:[t]})}get trainableWeights(){return this.trainable?this.cell.trainableWeights:[]}get nonTrainableWeights(){return this.trainable?this.cell.nonTrainableWeights:this.cell.weights}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.cell!=null&&this.cell.setFastWeightInitDuringBuild(e)}getConfig(){const e=super.getConfig(),t={returnSequences:this.returnSequences,returnState:this.returnState,goBackwards:this.goBackwards,stateful:this.stateful,unroll:this.unroll};this.numConstants!=null&&(t.numConstants=this.numConstants);const n=this.cell.getConfig();return this.getClassName()===Fi.className&&(t.cell={className:this.cell.getClassName(),config:n}),Object.assign({},n,e,t)}static fromConfig(e,t,n={}){const s=t.cell,i=oi(s,n);return new e(Object.assign(t,{cell:i}))}}Fi.className="RNN",me(Fi);class Xa extends lt{}class Wp extends Xa{constructor(e){super(e);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,pn(this.units,"units"),this.activation=Gr(e.activation==null?this.DEFAULT_ACTIVATION:e.activation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Ft(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=_t(e.kernelRegularizer),this.recurrentRegularizer=_t(e.recurrentRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.kernelConstraint=ln(e.kernelConstraint),this.recurrentConstraint=ln(e.recurrentConstraint),this.biasConstraint=ln(e.biasConstraint),this.dropout=Va([1,Br([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=Va([1,Br([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){e=St(e),this.kernel=this.addWeight("kernel",[e[e.length-1],this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return ee(()=>{if(e=e,e.length!==2)throw new j(`SimpleRNNCell expects 2 input Tensors, got ${e.length}.`);let n=e[1];e=e[0];const s=t.training==null?!1:t.training;0On(e),rate:this.dropout,training:s})),0On(n),rate:this.recurrentDropout,training:s}));let i;const o=this.dropoutMask,a=this.recurrentDropoutMask;o!=null?i=Ei(X(e,o),this.kernel.read()):i=Ei(e,this.kernel.read()),this.bias!=null&&(i=Di(i,this.bias.read())),a!=null&&(n=X(n,a));let c=be(i,Ei(n,this.recurrentKernel.read()));return this.activation!=null&&(c=this.activation.apply(c)),[c,c]})}getConfig(){const e=super.getConfig(),t={units:this.units,activation:zr(this.activation),useBias:this.useBias,kernelInitializer:Vt(this.kernelInitializer),recurrentInitializer:Vt(this.recurrentInitializer),biasInitializer:Vt(this.biasInitializer),kernelRegularizer:It(this.kernelRegularizer),recurrentRegularizer:It(this.recurrentRegularizer),biasRegularizer:It(this.biasRegularizer),activityRegularizer:It(this.activityRegularizer),kernelConstraint:cn(this.kernelConstraint),recurrentConstraint:cn(this.recurrentConstraint),biasConstraint:cn(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout};return Object.assign({},e,t)}}Wp.className="SimpleRNNCell",me(Wp);class Pw extends Fi{constructor(e){e.cell=new Wp(e),super(e)}call(e,t){return ee(()=>{this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return new e(t)}}Pw.className="SimpleRNN",me(Pw);class $p extends Xa{constructor(e){super(e);if(this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",e.resetAfter)throw new j("GRUCell does not support reset_after parameter set to true.");this.units=e.units,pn(this.units,"units"),this.activation=Gr(e.activation===void 0?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=Gr(e.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Ft(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=_t(e.kernelRegularizer),this.recurrentRegularizer=_t(e.recurrentRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.kernelConstraint=ln(e.kernelConstraint),this.recurrentConstraint=ln(e.recurrentConstraint),this.biasConstraint=ln(e.biasConstraint),this.dropout=Va([1,Br([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=Va([1,Br([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){e=St(e);const t=e[e.length-1];this.kernel=this.addWeight("kernel",[t,this.units*3],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*3],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units*3],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return ee(()=>{if(e=e,e.length!==2)throw new j(`GRUCell expects 2 input Tensors (inputs, h, c), got ${e.length}.`);const n=t.training==null?!1:t.training;let s=e[1];e=e[0],0On(e),rate:this.dropout,training:n,count:3})),0On(s),rate:this.recurrentDropout,training:n,count:3}));const i=this.dropoutMask,o=this.recurrentDropoutMask;let a,c,h;0{this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return t.implmentation===0&&(t.implementation=1),new e(t)}}zw.className="GRU",me(zw);class Dh extends Xa{constructor(e){super(e);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,pn(this.units,"units"),this.activation=Gr(e.activation===void 0?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=Gr(e.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Ft(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.unitForgetBias=e.unitForgetBias,this.kernelRegularizer=_t(e.kernelRegularizer),this.recurrentRegularizer=_t(e.recurrentRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.kernelConstraint=ln(e.kernelConstraint),this.recurrentConstraint=ln(e.recurrentConstraint),this.biasConstraint=ln(e.biasConstraint),this.dropout=Va([1,Br([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=Va([1,Br([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=[this.units,this.units],this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){var t;e=St(e);const n=e[e.length-1];this.kernel=this.addWeight("kernel",[n,this.units*4],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*4],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint);let s;if(this.useBias){if(this.unitForgetBias){const i=this.biasInitializer,o=this.units;s=new(t=class extends Us{apply(c,h){const d=i.apply([o]),m=new dp().apply([o]),y=i.apply([o*2]);return QA(QA(d,m),y)}},t.className="CustomInit",t)}else s=this.biasInitializer;this.bias=this.addWeight("bias",[this.units*4],null,s,this.biasRegularizer,!0,this.biasConstraint)}else this.bias=null;this.built=!0}call(e,t){return ee(()=>{const n=t.training==null?!1:t.training;if(e=e,e.length!==3)throw new j(`LSTMCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);let s=e[1];const i=e[2];e=e[0],0On(e),rate:this.dropout,training:n,count:4})),0On(s),rate:this.recurrentDropout,training:n,count:4}));const o=this.dropoutMask,a=this.recurrentDropoutMask;let c,h,d,m;0{this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return t.implmentation===0&&(t.implementation=1),new e(t)}}Gw.className="LSTM",me(Gw);class Up extends Xa{constructor(e){super(e);this.cells=e.cells}get stateSize(){const e=[];for(const t of this.cells.slice().reverse())Array.isArray(t.stateSize)?e.push(...t.stateSize):e.push(t.stateSize);return e}call(e,t){return ee(()=>{e=e;let n=e.slice(1);const s=[];for(const a of this.cells.slice().reverse())Array.isArray(a.stateSize)?s.push(n.splice(0,a.stateSize.length)):s.push(n.splice(0,1));s.reverse();const i=[];let o;for(let a=0;a{Do(`RNNCell_${s}`,()=>{n.build(e),Array.isArray(n.stateSize)?t=n.stateSize[0]:t=n.stateSize,e=[e[0],t]})}),this.built=!0}getConfig(){const e=super.getConfig(),t=i=>({className:i.getClassName(),config:i.getConfig()}),n=this.cells.map(t),s={cells:n};return Object.assign({},e,s)}static fromConfig(e,t,n={}){const s=[];for(const i of t.cells)s.push(oi(i,n));return new e({cells:s})}get trainableWeights(){if(!this.trainable)return[];const e=[];for(const t of this.cells)e.push(...t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.cells)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const n of this.cells)t.push(...n.trainableWeights);return t.concat(e)}return e}getWeights(){const e=[];for(const t of this.cells)e.push(...t.weights);return lw(e)}setWeights(e){const t=[];for(const n of this.cells){const s=n.weights.length,i=e.splice(s);for(let o=0;otv(t(),n),a=()=>Th(o,t,s);if(!i||i<=1)return Nn(a().clone());const c=Array(i).fill(void 0).map(a);return c.map(h=>Nn(h.clone()))}var v3=function(e,t){var n={};for(var s in e)Object.prototype.hasOwnProperty.call(e,s)&&t.indexOf(s)<0&&(n[s]=e[s]);if(e!=null&&typeof Object.getOwnPropertySymbols=="function")for(var i=0,s=Object.getOwnPropertySymbols(e);i{if(this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null),t&&t.constants)throw new j("ConvRNN2D cell does not support constants");const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}computeOutputShape(e){let t=this.computeSingleOutputShape(e);return this.returnSequences||(t=[t[0],...t.slice(2)]),this.returnState&&(t=[t,...Array(2).fill([e[0],...t.slice(-3)])]),t}getInitialState(e){return ee(()=>{const{stateSize:t}=this.cell,n=e.shape,s=this.computeSingleOutputShape(n),i=[s[0],...s.slice(2)],o=ct(i);return Array.isArray(t)?Array(t.length).fill(o):[o]})}resetStates(e,t=!1){ee(()=>{if(!this.stateful)throw new Qi("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape,s=this.computeSingleOutputShape(n),i=[s[0],...s.slice(2)],o=n[0];if(o==null)throw new j("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.getStates()==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>ct(i)):this.states_=[ct(i)];else if(e==null)He(this.states_),this.keptStates!=null&&(He(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>ct(i)):this.states_[0]=ct(i);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new j(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);t?this.keptStates.push(this.states_.slice()):He(this.states_);for(let a=0;aNn(a.clone()))})}computeSingleOutputShape(e){const{dataFormat:t,filters:n,kernelSize:s,padding:i,strides:o,dilationRate:a}=this.cell,c=t==="channelsFirst",h=e[c?3:2],d=e[c?4:3],m=ai(h,s[0],i,o[0],a[0]),y=ai(d,s[1],i,o[1],a[1]),b=[...e.slice(0,2),...c?[n,m,y]:[m,y,n]];return b}}iN.className="ConvRNN2D";class Bp extends Dh{constructor(e){const{filters:t,kernelSize:n,strides:s,padding:i,dataFormat:o,dilationRate:a}=e;super(Object.assign({},e,{units:t}));this.filters=t,pn(this.filters,"filters"),this.kernelSize=Ka(n,2,"kernelSize"),this.kernelSize.forEach(c=>pn(c,"kernelSize")),this.strides=Ka(s||1,2,"strides"),this.strides.forEach(c=>pn(c,"strides")),this.padding=i||"valid",Ts(this.padding),this.dataFormat=o||"channelsLast",Gt(this.dataFormat),this.dilationRate=Ka(a||1,2,"dilationRate"),this.dilationRate.forEach(c=>pn(c,"dilationRate"))}build(e){var t;e=St(e);const n=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[n]==null)throw new j(`The channel dimension of the input should be defined. Found ${e[n]}`);const s=e[n],i=4,o=this.kernelSize.concat([s,this.filters*i]);this.kernel=this.addWeight("kernel",o,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint);const a=this.kernelSize.concat([this.filters,this.filters*i]);if(this.recurrentKernel=this.addWeight("recurrent_kernel",a,null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){let c;if(this.unitForgetBias){const h=this.biasInitializer,d=this.filters;c=new(t=class extends Us{apply(y,b){const w=h.apply([d]),L=Qs([d]),T=h.apply([d*2]);return Zb([w,L,T])}},t.className="CustomInit",t)}else c=this.biasInitializer;this.bias=this.addWeight("bias",[this.filters*i],null,c,this.biasRegularizer,!0,this.biasConstraint)}this.built=!0}call(e,t){return ee(()=>{if(e.length!==3)throw new j(`ConvLSTM2DCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);const n=t.training||!1,s=e[0],i=e[1],o=e[2],a=4;0On(s),rate:this.dropout,training:n,count:a}));const c=this.dropoutMask,h=(Ie,Se,Ee)=>!Se||!Se[Ee]?Ie:X(Se[Ee],Ie);let d=h(s,c,0),m=h(s,c,1),y=h(s,c,2),b=h(s,c,3);0On(i),rate:this.recurrentDropout,training:n,count:a}));const w=this.recurrentDropoutMask;let L=h(i,w,0),T=h(i,w,1),A=h(i,w,2),N=h(i,w,3);const E=3,[D,F,_,B]=ss(this.kernel.read(),a,E),[$,H,q,J]=this.useBias?ss(this.bias.read(),a):[null,null,null,null];d=this.inputConv(d,D,$,this.padding),m=this.inputConv(m,F,H,this.padding),y=this.inputConv(y,_,q,this.padding),b=this.inputConv(b,B,J,this.padding);const[re,ce,ue,he]=ss(this.recurrentKernel.read(),a,E);L=this.recurrentConv(L,re),T=this.recurrentConv(T,ce),A=this.recurrentConv(A,ue),N=this.recurrentConv(N,he);const de=this.recurrentActivation.apply(be(d,L)),le=this.recurrentActivation.apply(be(m,T)),ye=be(X(le,o),X(de,this.activation.apply(be(y,A)))),pe=X(this.recurrentActivation.apply(be(b,N)),this.activation.apply(ye));return[pe,pe,ye]})}getConfig(){const e=super.getConfig(),{units:t}=e,n=v3(e,["units"]),s={filters:this.filters,kernelSize:this.kernelSize,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,strides:this.strides};return Object.assign({},n,s)}inputConv(e,t,n,s){const i=ji(e,t,this.strides,s||"valid",this.dataFormat==="channelsFirst"?"NCHW":"NHWC",this.dilationRate);return n?Di(i,n,this.dataFormat):i}recurrentConv(e,t){const n=1;return ji(e,t,n,"same",this.dataFormat==="channelsFirst"?"NCHW":"NHWC")}}Bp.className="ConvLSTM2DCell",me(Bp);class Vw extends iN{constructor(e){const t=new Bp(e);super(Object.assign({},e,{cell:t}))}static fromConfig(e,t){return new e(t)}}Vw.className="ConvLSTM2D",me(Vw);class Mp extends lt{constructor(e){super(e);this.rate=Math.max(Math.min(e.rate,1),0),this.noiseShape=e.noiseShape,this.seed=e.seed,this.supportsMasking=!0}getNoiseShape(e){if(this.noiseShape==null)return this.noiseShape;const t=e.shape,n=[];for(let s=0;s{this.invokeCallHook(e,t);const n=je(e);if(0tv(n,this.rate,i,this.seed),()=>n,s);return o}return e})}getConfig(){const e={rate:this.rate,noiseShape:this.noiseShape,seed:this.seed},t=super.getConfig();return Object.assign(e,t),e}dispose(){return super.dispose()}}Mp.className="Dropout",me(Mp);class Yw extends Mp{constructor(e){super(e);this.inputSpec=[{ndim:3}]}getNoiseShape(e){const t=e.shape;return[t[0],1,t[2]]}}Yw.className="SpatialDropout1D",me(Yw);class Hw extends lt{constructor(e){super(e);if(this.activation=null,this.useBias=!0,this.kernel=null,this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",e.batchInputShape==null&&e.inputShape==null&&e.inputDim!=null){let t=null;e.batchSize!=null&&(t=e.batchSize),this.batchInputShape=[t,e.inputDim]}this.units=e.units,pn(this.units,"units"),this.activation=Gr(e.activation),e.useBias!=null&&(this.useBias=e.useBias),this.kernelInitializer=Ft(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.biasInitializer=Ft(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelConstraint=ln(e.kernelConstraint),this.biasConstraint=ln(e.biasConstraint),this.kernelRegularizer=_t(e.kernelRegularizer),this.biasRegularizer=_t(e.biasRegularizer),this.activityRegularizer=_t(e.activityRegularizer),this.supportsMasking=!0,this.inputSpec=[{minNDim:2}]}build(e){e=St(e);const t=e[e.length-1];this.kernel==null&&(this.kernel=this.addWeight("kernel",[t,this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint))),this.inputSpec=[{minNDim:2,axes:{[-1]:t}}],this.built=!0}computeOutputShape(e){e=St(e);const t=e.slice();return t[t.length-1]=this.units,t}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=je(e),s=VA(this.activation.getClassName());let i;return s!=null?i=Ei(n,this.kernel.read(),s,this.bias?this.bias.read():null):(i=Ei(n,this.kernel.read()),this.bias!=null&&(i=Di(i,this.bias.read())),this.activation!=null&&(i=this.activation.apply(i))),i})}getConfig(){const e={units:this.units,activation:zr(this.activation),useBias:this.useBias,kernelInitializer:Vt(this.kernelInitializer),biasInitializer:Vt(this.biasInitializer),kernelRegularizer:It(this.kernelRegularizer),biasRegularizer:It(this.biasRegularizer),activityRegularizer:It(this.activityRegularizer),kernelConstraint:cn(this.kernelConstraint),biasConstraint:cn(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}Hw.className="Dense",me(Hw);class qw extends lt{constructor(e){e=e||{},super(e),this.inputSpec=[{minNDim:3}],this.dataFormat=e.dataFormat}computeOutputShape(e){e=St(e);for(const t of e.slice(1))if(t==null)throw new j(`The shape of the input to "Flatten" is not fully defined (got ${e.slice(1)}). Make sure to pass a complete "input_shape" or "batch_input_shape" argument to the first layer in your model.`);return[e[0],Ur(e,1)]}call(e,t){return ee(()=>{this.invokeCallHook(e,t);let n=je(e);if(this.dataFormat==="channelsFirst"&&n.rank>1){const s=[0];for(let i=2;i{this.invokeCallHook(e,t);const n=je(e);return this.activation.apply(n)})}getConfig(){const e={activation:zr(this.activation)},t=super.getConfig();return Object.assign(e,t),e}}jw.className="Activation",me(jw);class Kw extends lt{constructor(e){super(e);this.n=e.n,this.inputSpec=[{ndim:2}]}computeOutputShape(e){return[e[0],this.n,e[1]]}call(e,t){return ee(()=>(e=je(e),qP(e,this.n)))}getConfig(){const e={n:this.n},t=super.getConfig();return Object.assign(e,t),e}}Kw.className="RepeatVector",me(Kw);class Xw extends lt{constructor(e){super(e);this.targetShape=e.targetShape;for(let t=0;t{this.invokeCallHook(e,t);const n=je(e),s=n.shape,i=s.slice(0,1).concat(this.fixUnknownDimension(s.slice(1),this.targetShape));return n.reshape(i)})}getConfig(){const e={targetShape:this.targetShape},t=super.getConfig();return Object.assign(e,t),e}}Xw.className="Reshape",me(Xw);class Jw extends lt{constructor(e){super(e);if(e.dims==null)throw new Error("Required configuration field `dims` is missing during Permute constructor call.");if(!Array.isArray(e.dims))throw new Error(`Permute constructor requires \`dims\` to be an Array, but received ${e.dims} instead.`);const t=si(1,e.dims.length+1);if(!ot(e.dims.slice().sort(),t))throw new Error("Invalid permutation `dims`: "+JSON.stringify(e.dims)+" `dims` must contain consecutive integers starting from 1.");this.dims=e.dims,this.dimsIncludingBatch=[0].concat(this.dims),this.inputSpec=[new mn({ndim:this.dims.length+1})]}computeOutputShape(e){e=St(e);const t=e.slice();return this.dims.forEach((n,s)=>{t[s+1]=e[n]}),t}call(e,t){return Me(je(e),this.dimsIncludingBatch)}getConfig(){const e={dims:this.dims},t=super.getConfig();return Object.assign(e,t),e}}Jw.className="Permute",me(Jw);class Zw extends lt{constructor(e){super(e==null?{}:e);this.supportsMasking=!0,e!=null?this.maskValue=e.maskValue==null?0:e.maskValue:this.maskValue=0}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={maskValue:this.maskValue};return Object.assign(t,e),t}computeMask(e,t){const n=je(e),s=-1;return Pl(kr(n,this.maskValue),s)}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=je(e),s=-1,i=!0,o=Pl(kr(n,this.maskValue),s,i),a=n.mul(o.asType(n.dtype));return a})}}Zw.className="Masking",me(Zw);class Qw extends lt{constructor(e){super(e);if(this.embeddings=null,this.DEFAULT_EMBEDDINGS_INITIALIZER="randomUniform",e.batchInputShape==null&&e.inputShape==null){let t=null;e.batchSize!=null&&(t=e.batchSize),e.inputLength==null?this.batchInputShape=[t,null]:this.batchInputShape=[t].concat(Nt(e.inputLength))}this.inputDim=e.inputDim,pn(this.inputDim,"inputDim"),this.outputDim=e.outputDim,pn(this.outputDim,"outputDim"),this.embeddingsInitializer=Ft(e.embeddingsInitializer||this.DEFAULT_EMBEDDINGS_INITIALIZER),this.embeddingsRegularizer=_t(e.embeddingsRegularizer),this.activityRegularizer=_t(e.activityRegularizer),this.embeddingsConstraint=ln(e.embeddingsConstraint),this.maskZero=e.maskZero,this.supportsMasking=e.maskZero,this.inputLength=e.inputLength}build(e){this.embeddings=this.addWeight("embeddings",[this.inputDim,this.outputDim],this.dtype,this.embeddingsInitializer,this.embeddingsRegularizer,!0,this.embeddingsConstraint),this.built=!0}warnOnIncompatibleInputShape(e){}computeMask(e,t){return ee(()=>this.maskZero?(e=je(e),kr(e,Qe(e))):null)}computeOutputShape(e){if(e=St(e),this.inputLength==null)return[...e,this.outputDim];const t=Nt(this.inputLength);if(t.length!==e.length-1)throw new j(`"inputLength" is ${this.inputLength}, but received input shape has shape ${e}`);{let n=0;for(let s=0;s{this.invokeCallHook(e,t);let n=je(e);n.dtype!=="int32"&&(n=Sh(n,"int32"));const s=ev(this.embeddings.read(),n.as1D());return s.reshape(St(this.computeOutputShape(n.shape)))})}getConfig(){const e={inputDim:this.inputDim,outputDim:this.outputDim,embeddingsInitializer:Vt(this.embeddingsInitializer),embeddingsRegularizer:It(this.embeddingsRegularizer),activityRegularizer:It(this.activityRegularizer),embeddingsConstraint:cn(this.embeddingsConstraint),maskZero:this.maskZero,inputLength:this.inputLength},t=super.getConfig();return Object.assign(e,t),e}}Qw.className="Embedding",me(Qw);class Wo extends lt{constructor(e){super(e||{});this.supportsMasking=!0}mergeFunction(e){throw new ze}computeElementwiseOpOutputShape(e,t){if(e==null||t==null)return null;if(e.length1)throw new j(`Can not merge tensors with different batch sizes. Got tensors with shapes: ${JSON.stringify(e)}.`);let n=e[0]==null?null:e[0].slice(1);for(let i=1;ii.length);e.indexOf(null)===-1&&$r(s).length===1?this.reshapeRequired=!1:this.reshapeRequired=!0}call(e,t){return ee(()=>{if(e=e,this.reshapeRequired){const n=[],s=e.map(i=>i.rank);if(s.indexOf(null)===-1){const i=Br(s);for(let o of e){const a=o.rank;for(let c=0;c1){const d=si(1,h).concat([0]);n.push(Me(c,d)),i=!0}else n.push(c)}let o=this.mergeFunction(n);const a=o.rank;if(i){if(a==null){const c=o.shape,h=c.length,d=c[h-1],m=[d].concat(c.slice(0,c.length-1));o=Me(o.reshape([-1,d]),[1,0]).reshape(m)}else if(a>1){const c=[a-1].concat(si(0,a-1));o=Me(o,c)}}return o}}else return this.mergeFunction(e)})}computeOutputShape(e){e=e;let t;e[0]==null?t=null:t=e[0].slice(1);for(let s=1;s{if(t==null)return null;if(!Array.isArray(t))throw new j("`mask` should be an Array");if(!Array.isArray(e))throw new j("`inputs` should be an Array");if(t.length!==e.length)throw new j(`The Array 'inputs' and 'mask' are expected to have the same length, but have different lengths (${e.length} vs ${t.length})`);if(t.every(s=>s==null))return null;t=t.map(s=>s==null?s:Hn(s,0));let n=t[0];for(let s=1;s{let t=e[0].clone();for(let n=1;n{let t=e[0].clone();for(let n=1;n{let t=e[0].clone();for(let n=1;n{let t=e[0];for(let n=1;n{let t=e[0];for(let n=1;n1)throw new j("A `Concatenate` layer requires inputs with matching shapes except for the concat axis. Got input shapes: "+JSON.stringify(e))}mergeFunction(e){return ee(()=>Zb(e,this.axis))}computeOutputShape(e){if(!(Array.isArray(e)&&Array.isArray(e[0])))throw new j("A `Concatenate` layer should be called on a list of inputs.");const t=e,n=t[0].slice(),s=this.axis<0?n.length+this.axis:this.axis;for(const i of t.slice(1)){if(n[s]==null||i[s]==null){n[s]=null;break}n[s]+=i[s]}return n}computeMask(e,t){if(t==null)return null;if(!Array.isArray(t))throw new j("`mask` should be an array for Concatenate");if(!Array.isArray(e))throw new j("`inputs` should be an array for Concatenate");if(t.length!==e.length)throw new j(`Mismatch in the length of mask (${t.length}) and the legnth of inputs (${e.length})`);return ee(()=>{let n=!0;if(t.forEach(o=>{if(o!=null){n=!1;return}}),n)return null;const s=[];for(let o=0;o3||t.shape.length>3)throw new ze("batchDot is not implemented for tensors of 4D or higher rank yet");if(k(e.shape.length>=2,()=>`batchDot requires the rank of x to be >= 2, but got ${e.shape.length}`),k(e.shape.length>=2,()=>`batchDot requires the rank of y to be >= 2, but got ${t.shape.length}`),typeof n=="number"&&(n=[n,n]),e.dtype==="complex64"||t.dtype==="complex64")throw new ze("batchDot is not implemented for complex64-type Tensors yet.");const s=e.shape.length,i=t.shape.length;n==null&&(n=[s-1,i-2]);const o=n;return ee(()=>{let a;if(s>i){a=s-i;const h=[];for(let d=0;ds){a=i-s;const h=[];for(let d=0;d0){let h;s>i?h=s+i-3:h=s-1;const d=[];for(let m=h;m"A `Dot` layer should be called on a list of exactly 2 inputs.");const t=e[0],n=e[1];if(t.length>3||n.length>3)throw new ze("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);if(t[s[0]]!==n[s[1]])throw new j(`Dimension incompatibility: ${t[s[0]]} !== ${n[s[1]]}`)}mergeFunction(e){if(e.length!==2)throw new j(`A \`Dot\` layer must be called on exactly 2 inputs, but received ${e.length} input(s).`);let t=e[0],n=e[1],s;return Array.isArray(this.axes)?s=this.axes.map((i,o)=>Bh(i,e[o].shape.length)):s=[Bh(this.axes,t.shape.length),Bh(this.axes,n.shape.length)],this.normalize&&(t=Tp(t,s[0]),n=Tp(n,s[1])),N3(t,n,s)}interpretAxes(e,t){let n;return Array.isArray(this.axes)?n=this.axes:n=[Bh(this.axes,e.length),Bh(this.axes,t.length)],n}computeOutputShape(e){k(Array.isArray(e)&&e.length===2&&Array.isArray(e[0])&&Array.isArray(e[1]),()=>"A `Dot` layer should be called on a list of exactly 2 inputs.");const t=e[0].slice(),n=e[1].slice();if(t.length>3||n.length>3)throw new ze("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);t.splice(s[0],1),n.splice(s[1],1),n.splice(0,1);const i=t.concat(n);return i.length===1&&i.push(1),i}computeMask(e,t){return null}getConfig(){const e={axes:this.axes,normalize:this.normalize},t=super.getConfig();return Object.assign(e,t),e}}eL.className="Dot",me(eL);class tL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.stddev=e.stddev}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={stddev:this.stddev};return Object.assign(t,e),t}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=je(e),s=()=>up(n.shape,0,this.stddev).add(n),i=Th(s,()=>n,t.training||!1);return i})}}tL.className="GaussianNoise",me(tL);class nL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.rate=e.rate}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return ee(()=>{this.invokeCallHook(e,t);const n=je(e);if(this.rate>0&&this.rate<1){const s=()=>{const i=Math.sqrt(this.rate/(1-this.rate));return n.mul(up(n.shape,1,i))};return Th(s,()=>n,t.training||!1)}return n})}}nL.className="GaussianDropout",me(nL);class sL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.rate=e.rate,this.noiseShape=e.noiseShape}_getNoiseShape(e){return this.noiseShape||je(e).shape}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return ee(()=>{if(this.rate<1&&this.rate>0){const n=this._getNoiseShape(e),s=()=>{const i=je(e),o=1.6732632423543772,a=1.0507009873554805,c=-o*a;let h=Ki(vo(n),this.rate);h=Sh(h,"float32");const d=((1-this.rate)*(1+this.rate*c**2))**-.5,m=-d*c*this.rate,y=i.mul(h).add(h.add(-1).mul(c));return y.mul(d).add(m)};return Th(s,()=>je(e),t.training||!1)}return e})}}sL.className="AlphaDropout",me(sL);function Mh(e,t,n,s,i,o=.001){let a;if(e.rank===2)a=CT(e,t,n,s,i,o);else if(e.rank===3)a=RT(e,t,n,s,i,o);else if(e.rank===4)a=OT(e,t,n,s,i,o);else throw new ze(`batchNormalization is not implemented for array of rank ${e.rank} yet`);return a}function C3(e,t,n,s,i=.001){return ee(()=>{const o=Ud(e,s),a=o.mean,c=o.variance,h=Mh(e,a,c,n,t,i);return[h,a,c]})}function R3(e,t,n,s,i=.001){return ee(()=>{const o=Ud(e,s),a=o.mean,c=o.variance,h=[];for(const L of si(0,e.rank))s.indexOf(L)!==-1?h.push(1):h.push(e.shape[L]);const d=a.reshape(h),m=c.reshape(h),y=t==null?null:t.reshape(h),b=n==null?null:n.reshape(h),w=Mh(e,d,m,b,y,i);return[w,a,c]})}function O3(e,t,n,s,i=.001){return ot(s.slice().sort(),si(0,e.rank-1))?C3(e,t,n,s,i):R3(e,t,n,s,i)}class iL extends lt{constructor(e){e==null&&(e={}),super(e),this.supportsMasking=!0,this.axis=e.axis==null?-1:e.axis,this.momentum=e.momentum==null?.99:e.momentum,this.epsilon=e.epsilon==null?.001:e.epsilon,this.center=e.center==null?!0:e.center,this.scale=e.scale==null?!0:e.scale,this.betaInitializer=Ft(e.betaInitializer||"zeros"),this.gammaInitializer=Ft(e.gammaInitializer||"ones"),this.movingMeanInitializer=Ft(e.movingMeanInitializer||"zeros"),this.movingVarianceInitializer=Ft(e.movingVarianceInitializer||"ones"),this.betaConstraint=ln(e.betaConstraint),this.gammaConstraint=ln(e.gammaConstraint),this.betaRegularizer=_t(e.betaRegularizer),this.gammaRegularizer=_t(e.gammaRegularizer)}build(e){e=St(e);const t=this.axis>=0?this.axis:this.axis+e.length,n=e[t];if(n==null)throw new j(`Axis ${t} of input tensor should have a defined dimension but the layer received an input with shape ${JSON.stringify(e)}.`);this.inputSpec=[new mn({ndim:e.length,axes:{[t]:n}})];const s=[n];this.scale&&(this.gamma=this.addWeight("gamma",s,null,this.gammaInitializer,this.gammaRegularizer,!0,this.gammaConstraint)),this.center&&(this.beta=this.addWeight("beta",s,null,this.betaInitializer,this.betaRegularizer,!0,this.betaConstraint)),this.movingMean=this.addWeight("moving_mean",s,null,this.movingMeanInitializer,null,!1),this.movingVariance=this.addWeight("moving_variance",s,null,this.movingVarianceInitializer,null,!1),this.built=!0}call(e,t){return ee(()=>{const n=t.training==null?!1:t.training,s=je(e),i=s.shape,o=i.length,a=si(0,o),c=this.axis>=0?this.axis:this.axis+o;a.splice(c,1);const h=Oo(1,o);h[c]=i[c];const d=a.slice();d.sort();const m=!ot(d,si(0,o).slice(0,o-1)),y=()=>{if(m){const N=this.movingMean.read().reshape(h),E=this.movingVariance.read().reshape(h),D=this.center?this.beta.read().reshape(h):null,F=this.scale?this.gamma.read().reshape(h):null;return Mh(s,N,E,D,F,this.epsilon)}else return Mh(s,this.movingMean.read(),this.movingVariance.read(),this.beta==null?null:this.beta.read(),this.gamma==null?null:this.gamma.read(),this.epsilon)};if(!n)return y();const[b,w,L]=O3(s,this.gamma.read(),this.beta.read(),a,this.epsilon),T=(N,E,D)=>{ee(()=>{const F=1-D,_=N.read(),B=_.sub(E).mul(F);N.write(_.sub(B))})},A=()=>{T(this.movingMean,w,this.momentum),T(this.movingVariance,L,this.momentum)};return A(),b})}getConfig(){const e={axis:this.axis,momentum:this.momentum,epsilon:this.epsilon,center:this.center,scale:this.scale,betaInitializer:Vt(this.betaInitializer),gammaInitializer:Vt(this.gammaInitializer),movingMeanInitializer:Vt(this.movingMeanInitializer),movingVarianceInitializer:Vt(this.movingVarianceInitializer),betaRegularizer:It(this.betaRegularizer),gammaRegularizer:It(this.gammaRegularizer),betaConstraint:cn(this.betaConstraint),gammaConstraint:cn(this.gammaConstraint)},t=super.getConfig();return Object.assign(e,t),e}}iL.className="BatchNormalization",me(iL);class rL extends lt{constructor(e){if(e==null&&(e={}),super(e),this.axis=e.axis==null?-1:e.axis,typeof this.axis=="number"){if(!Number.isInteger(this.axis))throw new Error(`Expected axis to be an integer, but received ${this.axis}`)}else if(Array.isArray(this.axis)){for(const t of this.axis)if(!Number.isInteger(t))throw new Error(`Expected axis to be an array of integers, but received ${JSON.stringify(this.axis)}`)}else throw new Error(`Expected axis to be an integer or an array of integers, but received ${JSON.stringify(this.axis)}`);this.epsilon=e.epsilon==null?.001:e.epsilon,this.center=e.center==null?!0:e.center,this.scale=e.scale==null?!0:e.scale,this.betaInitializer=Ft(e.betaInitializer||"zeros"),this.gammaInitializer=Ft(e.gammaInitializer||"ones"),this.betaRegularizer=_t(e.betaRegularizer),this.gammaRegularizer=_t(e.gammaRegularizer),this.supportsMasking=!0}build(e){e=St(e);const t=e.length;typeof this.axis=="number"&&(this.axis=[this.axis]);for(let i=0;i=t)throw new Error(`Invalid axis: ${i}`);if(this.axis.length!==$r(this.axis).length)throw new Error(`Found duplicate axes in: ${this.axis}`);const n=this.axis.map(i=>e[i]),s=!0;this.scale?this.gamma=this.addWeight("gamma",n,"float32",this.gammaInitializer,this.gammaRegularizer,s):this.gamma=null,this.center?this.beta=this.addWeight("beta",n,"float32",this.betaInitializer,this.betaRegularizer,s):this.beta=null,this.built=!0}call(e,t){const n=je(e),s=n.shape,i=s.length;return ee(()=>{const o=!0;let{mean:a,variance:c}=Ud(n,this.axis,o);const h=Oo(1,i);for(const L of this.axis)h[L]=s[L];const d=L=>L!=null&&L.shape.length!==i&&this.axis!==[i-1]?L.reshape(h):L;let m=d(this.gamma.read()),y=d(this.beta.read());const b=[],w=[];for(let L=0;L{if(e.rank!==3)throw new j(`temporalPadding expects input tensor to be 3-D, but received a ${e.rank}-D tensor.`);if(t==null&&(t=[1,1]),t.length!==2)throw new j(`temporalPadding expects input padding pattern to be a length-2 array, but received a length-${t.length} array.`);const n=[[0,0],t,[0,0]];return Ci(e,n)})}function E3(e,t,n){return ee(()=>{if(e.rank!==4)throw new j(`temporalPadding expects input tensor to be 4-D, but received a ${e.rank}-D tensor.`);if(t==null&&(t=[[1,1],[1,1]]),t.length!==2||t[0].length!==2||t[1].length!==2)throw new j("spatial2dPadding expects `padding` to be an Array of two Arrays, each of which is an Array of two integers.");if(n==null&&(n=ti()),n!=="channelsLast"&&n!=="channelsFirst")throw new j(`Unknown data format: ${n}. Supported data formats are 'channelsLast' and 'channelsFirst.`);let s;return n==="channelsFirst"?s=[[0,0],[0,0],t[0],t[1]]:s=[[0,0],t[0],t[1],[0,0]],Ci(e,s)})}class oL extends lt{constructor(e){if(e==null&&(e={}),super(e),this.dataFormat=e.dataFormat==null?ti():e.dataFormat,e.padding==null)this.padding=[[1,1],[1,1]];else if(typeof e.padding=="number")this.padding=[[e.padding,e.padding],[e.padding,e.padding]];else{if(e.padding=e.padding,e.padding.length!==2)throw new j(`ZeroPadding2D expects padding to be a length-2 array, but received a length-${e.padding.length} array.`);let t,n;if(typeof e.padding[0]=="number")t=[e.padding[0],e.padding[0]],n=[e.padding[1],e.padding[1]];else{if(e.padding=e.padding,e.padding[0].length!==2)throw new j(`ZeroPadding2D expects height padding to be a length-2 array, but received a length-${e.padding[0].length} array.`);if(t=e.padding[0],e.padding[1].length!==2)throw new j(`ZeroPadding2D expects width padding to be a length-2 array, but received a length-${e.padding[1].length} array.`);n=e.padding[1]}this.padding=[t,n]}this.inputSpec=[new mn({ndim:4})]}computeOutputShape(e){e=St(e);let t,n;return this.dataFormat==="channelsFirst"?(e[2]!=null&&e[2]>=0?t=e[2]+this.padding[0][0]+this.padding[0][1]:t=null,e[3]!=null&&e[3]>=0?n=e[3]+this.padding[1][0]+this.padding[1][1]:n=null,[e[0],e[1],t,n]):(e[1]!=null&&e[1]>=0?t=e[1]+this.padding[0][0]+this.padding[0][1]:t=null,e[2]!=null&&e[2]>=0?n=e[2]+this.padding[1][0]+this.padding[1][1]:n=null,[e[0],t,n,e[3]])}call(e,t){return ee(()=>E3(je(e),this.padding,this.dataFormat))}getConfig(){const e={padding:this.padding,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}oL.className="ZeroPadding2D",me(oL);function Pp(e,t,n,s,i,o){return ee(()=>{Gt(i),qA(o),Ts(s),n==null&&(n=[1,1]),s==null&&(s="valid"),i==null&&(i=ti()),o==null&&(o="max"),e=kw(e,i);let a;const c=s==="same"?"same":"valid";return o==="max"?a=eh(e,t,n,c):a=Hl(e,t,n,c),i==="channelsFirst"&&(a=Me(a,[0,3,1,2])),a})}function rN(e,t,n,s,i,o){return ee(()=>{Gt(i),qA(o),Ts(s),n==null&&(n=[1,1,1]),s==null&&(s="valid"),i==null&&(i=ti()),o==null&&(o="max"),e=Zv(e,i);let a;const c=s==="same"?"same":"valid";return o==="max"?a=cb(e,t,n,c):a=jy(e,t,n,c),i==="channelsFirst"&&(a=Me(a,[0,4,1,2,3])),a})}class oN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=2),super(e),typeof e.poolSize=="number")this.poolSize=[e.poolSize];else if(Array.isArray(e.poolSize)&&e.poolSize.length===1&&typeof e.poolSize[0]=="number")this.poolSize=e.poolSize;else throw new j(`poolSize for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.poolSize)}`);if(pn(this.poolSize,"poolSize"),e.strides==null)this.strides=this.poolSize;else if(typeof e.strides=="number")this.strides=[e.strides];else if(Array.isArray(e.strides)&&e.strides.length===1&&typeof e.strides[0]=="number")this.strides=e.strides;else throw new j(`strides for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.strides)}`);pn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,Ts(this.padding),this.inputSpec=[new mn({ndim:3})]}computeOutputShape(e){e=St(e);const t=ai(e[1],this.poolSize[0],this.padding,this.strides[0]);return[e[0],t,e[2]]}call(e,t){return ee(()=>{this.invokeCallHook(e,t),e=Ih(je(e),2);const n=this.poolingFunction(je(e),[this.poolSize[0],1],[this.strides[0],1],this.padding,"channelsLast");return Fr(n,[2])})}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides},t=super.getConfig();return Object.assign(e,t),e}}class aL extends oN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ts(s),Pp(e,t,n,s,i,"max")}}aL.className="MaxPooling1D",me(aL);class cL extends oN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ts(s),Pp(e,t,n,s,i,"avg")}}cL.className="AveragePooling1D",me(cL);class aN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=[2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize],e.strides==null)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(e.strides.length!==2)throw new j(`If the strides property of a 2D pooling layer is an Array, it is expected to have a length of 2, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides];pn(this.poolSize,"poolSize"),pn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,Gt(this.dataFormat),Ts(this.padding),this.inputSpec=[new mn({ndim:4})]}computeOutputShape(e){e=St(e);let t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2];return t=ai(t,this.poolSize[0],this.padding,this.strides[0]),n=ai(n,this.poolSize[1],this.padding,this.strides[1]),this.dataFormat==="channelsFirst"?[e[0],e[1],t,n]:[e[0],t,n,e[3]]}call(e,t){return ee(()=>(this.invokeCallHook(e,t),this.poolingFunction(je(e),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class lL extends aN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ts(s),Pp(e,t,n,s,i,"max")}}lL.className="MaxPooling2D",me(lL);class hL extends aN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ts(s),Pp(e,t,n,s,i,"avg")}}hL.className="AveragePooling2D",me(hL);class cN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=[2,2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize,e.poolSize],e.strides==null)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(e.strides.length!==3)throw new j(`If the strides property of a 3D pooling layer is an Array, it is expected to have a length of 3, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides,e.strides];pn(this.poolSize,"poolSize"),pn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,Gt(this.dataFormat),Ts(this.padding),this.inputSpec=[new mn({ndim:5})]}computeOutputShape(e){e=St(e);let t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2],s=this.dataFormat==="channelsFirst"?e[4]:e[3];return t=ai(t,this.poolSize[0],this.padding,this.strides[0]),n=ai(n,this.poolSize[1],this.padding,this.strides[1]),s=ai(s,this.poolSize[2],this.padding,this.strides[2]),this.dataFormat==="channelsFirst"?[e[0],e[1],t,n,s]:[e[0],t,n,s,e[4]]}call(e,t){return ee(()=>(this.invokeCallHook(e,t),this.poolingFunction(je(e),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class uL extends cN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ts(s),rN(e,t,n,s,i,"max")}}uL.className="MaxPooling3D",me(uL);class dL extends cN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return Gt(i),Ts(s),rN(e,t,n,s,i,"avg")}}dL.className="AveragePooling3D",me(dL);class lN extends lt{constructor(e){super(e);this.inputSpec=[new mn({ndim:3})]}computeOutputShape(e){return[e[0],e[2]]}call(e,t){throw new ze}}class pL extends lN{constructor(e){super(e||{})}call(e,t){return ee(()=>{const n=je(e);return zt(n,1)})}}pL.className="GlobalAveragePooling1D",me(pL);class mL extends lN{constructor(e){super(e||{})}call(e,t){return ee(()=>{const n=je(e);return qn(n,1)})}}mL.className="GlobalMaxPooling1D",me(mL);class hN extends lt{constructor(e){super(e);this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,Gt(this.dataFormat),this.inputSpec=[new mn({ndim:4})]}computeOutputShape(e){return e=e,this.dataFormat==="channelsLast"?[e[0],e[3]]:[e[0],e[1]]}call(e,t){throw new ze}getConfig(){const e={dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class fL extends hN{call(e,t){return ee(()=>{const n=je(e);return this.dataFormat==="channelsLast"?zt(n,[1,2]):zt(n,[2,3])})}}fL.className="GlobalAveragePooling2D",me(fL);class gL extends hN{call(e,t){return ee(()=>{const n=je(e);return this.dataFormat==="channelsLast"?qn(n,[1,2]):qn(n,[2,3])})}}gL.className="GlobalMaxPooling2D",me(gL);class uN extends lt{constructor(e){super(e);this.layer=e.layer}build(e){this.built=!0}get trainable(){return this.layer!=null?this.layer.trainable:!1}set trainable(e){this.layer!=null&&(this.layer.trainable=e)}get trainableWeights(){return this.layer.trainableWeights}get nonTrainableWeights(){return this.layer.nonTrainableWeights}get updates(){return this.layer._updates}get losses(){return this.layer.losses}getWeights(){return this.layer.getWeights()}setWeights(e){this.layer.setWeights(e)}getConfig(){const e={layer:{className:this.layer.getClassName(),config:this.layer.getConfig()}},t=super.getConfig();return Object.assign(e,t),e}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.layer!=null&&this.layer.setFastWeightInitDuringBuild(e)}static fromConfig(e,t,n={}){const s=t.layer,i=oi(s,n);delete t.layer;const o={layer:i};return Object.assign(o,t),new e(o)}}class yL extends uN{constructor(e){super(e);this.supportsMasking=!0}build(e){if(e=St(e),e.length<3)throw new j(`TimeDistributed layer expects an input shape >= 3D, but received input shape ${JSON.stringify(e)}`);this.inputSpec=[{shape:e}];const t=[e[0]].concat(e.slice(2));this.layer.built||(this.layer.build(t),this.layer.built=!0),super.build(e)}computeOutputShape(e){e=St(e);const t=[e[0]].concat(e.slice(2)),n=this.layer.computeOutputShape(t),s=e[1];return[n[0],s].concat(n.slice(1))}call(e,t){return ee(()=>{e=je(e);const n=(o,a)=>{const c=je(this.layer.call(o,t));return[c,[]]},s=sN(n,e,[],!1,null,null,!1,!0),i=s[1];return i})}}yL.className="TimeDistributed",me(yL);function D3(e){za(PP,"BidirectionalMergeMode",e)}const k3="concat";class bL extends uN{constructor(e){super(e);const t=e.layer.getConfig(),n={};n.className=e.layer.getClassName(),n.config=t,this.forwardLayer=oi(n),t.goBackwards=!(t.goBackwards===!0);const s={};if(s.className=e.layer.getClassName(),s.config=t,this.backwardLayer=oi(s),this.forwardLayer.name="forward_"+this.forwardLayer.name,this.backwardLayer.name="backward_"+this.backwardLayer.name,this.mergeMode=e.mergeMode===void 0?k3:e.mergeMode,D3(this.mergeMode),e.weights)throw new ze("weights support is not implemented for Bidirectional layer yet.");this._stateful=e.layer.stateful,this.returnSequences=e.layer.returnSequences,this.returnState=e.layer.returnState,this.supportsMasking=!0,this._trainable=!0,this.inputSpec=e.layer.inputSpec,this.numConstants=null}get trainable(){return this._trainable}set trainable(e){this._trainable=e,this.forwardLayer!=null&&(this.forwardLayer.trainable=e),this.backwardLayer!=null&&(this.backwardLayer.trainable=e)}getWeights(){return this.forwardLayer.getWeights().concat(this.backwardLayer.getWeights())}setWeights(e){const t=e.length,n=Math.floor(t/2);this.forwardLayer.setWeights(e.slice(0,n)),this.backwardLayer.setWeights(e.slice(n))}computeOutputShape(e){let t=this.forwardLayer.computeOutputShape(e);Array.isArray(t)&&Array.isArray(t[0])||(t=[t]),t=t;let n,s,i;return this.returnState&&(i=t.slice(1)),n=t[0],n=n,this.mergeMode==="concat"?(n[n.length-1]*=2,s=[n]):this.mergeMode==null?s=[n,n.slice()]:s=[n],this.returnState?this.mergeMode==null?s.concat(i).concat(i.slice()):[n].concat(i).concat(i.slice()):jn(s)}apply(e,t){let n=t==null?null:t.initialState,s=t==null?null:t.constants;t==null&&(t={});const i=nN(e,n,s,this.numConstants);if(e=i.inputs,n=i.initialState,s=i.constants,Array.isArray(e)&&(n=e.slice(1),e=e[0]),(n==null||n.length===0)&&s==null)return super.apply(e,t);const o=[],a=[];if(n!=null){const h=n.length;if(h%2>0)throw new j("When passing `initialState` to a Bidrectional RNN, the state should be an Array containing the states of the underlying RNNs.");t.initialState=n,o.push(...n);const d=n.map(m=>new mn({shape:m.shape}));this.forwardLayer.stateSpec=d.slice(0,h/2),this.backwardLayer.stateSpec=d.slice(h/2),a.push(...d)}if(s!=null)throw new ze("Support for constants in Bidirectional layers is not implemented yet.");const c=o[0]instanceof ri;for(const h of o)if(h instanceof ri!==c)throw new j("The initial state of a Bidirectional layer cannot be specified as a mix of symbolic and non-symbolic tensors");if(c){const h=[e].concat(o),d=this.inputSpec.concat(a),m=this.inputSpec;this.inputSpec=d;const y=super.apply(h,t);return this.inputSpec=m,y}else return super.apply(e,t)}call(e,t){return ee(()=>{const n=t.initialState;let s,i;if(n==null)s=this.forwardLayer.call(e,t),i=this.backwardLayer.call(e,t);else{const c=n.slice(0,n.length/2),h=n.slice(n.length/2);s=this.forwardLayer.call(e,Object.assign(t,{initialState:c})),i=this.backwardLayer.call(e,Object.assign(t,{initialState:h}))}let o;this.returnState&&(Array.isArray(s)&&(o=s.slice(1).concat(i.slice(1))),s=s[0],i=i[0]),this.returnSequences&&(i=Is(i,1));let a;return this.mergeMode==="concat"?a=Zb([s,i]):this.mergeMode==="sum"?a=be(s,i):this.mergeMode==="ave"?a=X(.5,be(s,i)):this.mergeMode==="mul"?a=X(s,i):this.mergeMode==null&&(a=[s,i]),this.returnState?this.mergeMode==null?a.concat(o):[a].concat(o):a})}resetStates(e){this.forwardLayer.resetStates(),this.backwardLayer.resetStates()}build(e){Do(this.forwardLayer.name,()=>{this.forwardLayer.build(e)}),Do(this.backwardLayer.name,()=>{this.backwardLayer.build(e)}),this.built=!0}computeMask(e,t){Array.isArray(t)&&(t=t[0]);let n;if(this.returnSequences?this.mergeMode==null?n=[t,t]:n=t:this.mergeMode==null?n=[null,null]:n=null,this.returnState){const s=this.forwardLayer.states,i=s.map(o=>null);return Array.isArray(n)?n.concat(i).concat(i):[n].concat(i).concat(i)}else return n}get trainableWeights(){return this.forwardLayer.trainableWeights.concat(this.backwardLayer.trainableWeights)}get nonTrainableWeights(){return this.forwardLayer.nonTrainableWeights.concat(this.backwardLayer.nonTrainableWeights)}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.forwardLayer!=null&&this.forwardLayer.setFastWeightInitDuringBuild(e),this.backwardLayer!=null&&this.backwardLayer.setFastWeightInitDuringBuild(e)}getConfig(){const e={mergeMode:this.mergeMode},t=super.getConfig();return Object.assign(e,t),e}static fromConfig(e,t){const n=oi(t.layer);if(delete t.layer,t.numConstants!=null)throw new ze("Deserialization of a Bidirectional layer with numConstants present is not supported yet.");const s=t;return s.layer=n,new e(s)}}bL.className="Bidirectional",me(bL);function F3(e){return new Ya(e)}function _3(e){return new Ow(e)}function W3(e){return new Nw(e)}function $3(e){return new Cw(e)}function U3(e){return new Rw(e)}function B3(e){return new Dw(e)}function M3(e){return new Ew(e)}function P3(e){return new _p(e)}function z3(e){return new Eh(e)}function G3(e){return new Ww(e)}function V3(e){return new Fp(e)}function Y3(e){return new $w(e)}function H3(e){return new Uw(e)}function q3(e){return new Bw(e)}function j3(e){return new Mw(e)}function K3(e){return new jw(e)}function X3(e){return new Hw(e)}function J3(e){return new Mp(e)}function Z3(e){return new Yw(e)}function Q3(e){return new qw(e)}function eG(e){return new Kw(e)}function tG(e){return new Xw(e)}function nG(e){return new Jw(e)}function sG(e){return new Qw(e)}function iG(e){return new kh(e)}function rG(e){return new _h(e)}function oG(e){return new Uh(e)}function aG(e){return new Wh(e)}function cG(e){return new $h(e)}function lG(e){return new Fh(e)}function hG(e){return new eL(e)}function uG(e){return new iL(e)}function dG(e){return new rL(e)}function pG(e){return new oL(e)}function wL(e){return new cL(e)}function mG(e){return wL(e)}function fG(e){return wL(e)}function LL(e){return new hL(e)}function gG(e){return LL(e)}function yG(e){return LL(e)}function SL(e){return new dL(e)}function bG(e){return SL(e)}function wG(e){return SL(e)}function LG(e){return new pL(e)}function SG(e){return new fL(e)}function dN(e){return new mL(e)}function pN(e){return new gL(e)}function mN(e){return new aL(e)}function fN(e){return new lL(e)}function IG(e){return new uL(e)}function xG(e){return new zw(e)}function TG(e){return new $p(e)}function AG(e){return new Gw(e)}function vG(e){return new Dh(e)}function NG(e){return new Pw(e)}function CG(e){return new Wp(e)}function RG(e){return new Vw(e)}function OG(e){return new Bp(e)}function EG(e){return new Fi(e)}function DG(e){return new Up(e)}function kG(e){return new bL(e)}function FG(e){return new yL(e)}const _G=dN,WG=pN,$G=mN,UG=fN;function BG(e){return new tL(e)}function MG(e){return new nL(e)}function PG(e){return new sL(e)}function zG(e){return new Zw(e)}var GG=Object.freeze({__proto__:null,inputLayer:F3,elu:_3,reLU:W3,leakyReLU:$3,prelu:U3,softmax:B3,thresholdedReLU:M3,conv1d:P3,conv2d:z3,conv2dTranspose:G3,conv3d:V3,separableConv2d:Y3,cropping2D:H3,upSampling2d:q3,depthwiseConv2d:j3,activation:K3,dense:X3,dropout:J3,spatialDropout1d:Z3,flatten:Q3,repeatVector:eG,reshape:tG,permute:nG,embedding:sG,add:iG,average:rG,concatenate:oG,maximum:aG,minimum:cG,multiply:lG,dot:hG,batchNormalization:uG,layerNormalization:dG,zeroPadding2d:pG,averagePooling1d:wL,avgPool1d:mG,avgPooling1d:fG,averagePooling2d:LL,avgPool2d:gG,avgPooling2d:yG,averagePooling3d:SL,avgPool3d:bG,avgPooling3d:wG,globalAveragePooling1d:LG,globalAveragePooling2d:SG,globalMaxPooling1d:dN,globalMaxPooling2d:pN,maxPooling1d:mN,maxPooling2d:fN,maxPooling3d:IG,gru:xG,gruCell:TG,lstm:AG,lstmCell:vG,simpleRNN:NG,simpleRNNCell:CG,convLstm2d:RG,convLstm2dCell:OG,rnn:EG,stackedRNNCells:DG,bidirectional:kG,timeDistributed:FG,globalMaxPool1d:_G,globalMaxPool2d:WG,maxPool1d:$G,maxPool2d:UG,Layer:lt,RNN:Fi,RNNCell:Xa,input:Wv,gaussianNoise:BG,gaussianDropout:MG,alphaDropout:PG,masking:zG});function VG(e,t){return mw(e,t)}function YG(e,t){return yv(e,t)}function HG(e,t){return bv(e,t)}function qG(e,t){return fw(e,t)}function jG(e,t){return gw(e,t)}function KG(e,t){return gv(e,t)}function XG(e,t){return Wz(e,t)}function JG(e,t){return Np(e,t)}function ZG(e,t){return qa(e,t)}function QG(e,t){return Pr(e,t)}function eV(e,t){return Pr(e,t)}function tV(e,t){return Pr(e,t)}function nV(e,t){return tr(e,t)}function sV(e,t){return tr(e,t)}function iV(e,t){return tr(e,t)}var rV=Object.freeze({__proto__:null,binaryAccuracy:VG,binaryCrossentropy:YG,sparseCategoricalAccuracy:HG,categoricalAccuracy:qG,categoricalCrossentropy:jG,precision:KG,recall:XG,cosineProximity:JG,meanAbsoluteError:ZG,meanAbsolutePercentageError:QG,MAPE:eV,mape:tV,meanSquaredError:nV,MSE:sV,mse:iV});var oV=Object.freeze({__proto__:null,modelFromJSON:f3});function aV(e){return new Rh(e)}function cV(e){return x3(e)}function lV(e){return T3(e)}var hV=Object.freeze({__proto__:null,l1l2:aV,l1:cV,l2:lV});class gN extends Ha{constructor(){super(...arguments);this.model=null}setModel(e){if(!(e instanceof nr))throw new Error("model must be a LayersModel, not some other Container");this.model=e}}function zp(e,t){return et}class bN extends gN{constructor(e){super();if(e==null&&(e={}),e.restoreBestWeights)throw new ze("restoreBestWeights = True is not implemented in EarlyStopping yet.");this.monitor=e.monitor||"val_loss",this.minDelta=Math.abs(e.minDelta||0),this.patience=e.patience||0,this.verbose=e.verbose||0,this.mode=e.mode||"auto",this.baseline=e.baseline,["auto","min","max"].indexOf(this.mode)===-1&&(console.warn(`EarlyStopping mode '${this.mode}' is invalid. Falling back to mode 'auto'.`),this.mode="auto"),this.mode==="min"?this.monitorFunc=zp:this.mode==="max"?this.monitorFunc=yN:this.monitor.indexOf("acc")!==-1?this.monitorFunc=yN:this.monitorFunc=zp,this.monitorFunc===zp&&(this.minDelta*=-1)}async onTrainBegin(e){this.wait=0,this.stoppedEpoch=0,this.baseline!=null?this.best=this.baseline:this.best=this.monitorFunc===zp?Infinity:-Infinity}async onEpochEnd(e,t){await Mr(t);const n=this.getMonitorValue(t);if(n==null)return;this.monitorFunc(n-this.minDelta,this.best)?(this.best=n,this.wait=0):(this.wait++,this.wait>=this.patience&&(this.stoppedEpoch=e,this.model.stopTraining=!0))}async onTrainEnd(e){this.stoppedEpoch>0&&this.verbose&&console.log(`Epoch ${this.stoppedEpoch}: early stopping.`)}getMonitorValue(e){e==null&&(e={});const t=e[this.monitor];return t==null&&console.warn(`Metric for EarlyStopping ${this.monitor} is not available. Available metrics are: ${Object.keys(e)}`),t}}function uV(e){return new bN(e)}const dV={earlyStopping:uV};var ci;(function(e){e[e.DT_INVALID=0]="DT_INVALID",e[e.DT_FLOAT=1]="DT_FLOAT",e[e.DT_DOUBLE=2]="DT_DOUBLE",e[e.DT_INT32=3]="DT_INT32",e[e.DT_UINT8=4]="DT_UINT8",e[e.DT_INT16=5]="DT_INT16",e[e.DT_INT8=6]="DT_INT8",e[e.DT_STRING=7]="DT_STRING",e[e.DT_COMPLEX64=8]="DT_COMPLEX64",e[e.DT_INT64=9]="DT_INT64",e[e.DT_BOOL=10]="DT_BOOL",e[e.DT_QINT8=11]="DT_QINT8",e[e.DT_QUINT8=12]="DT_QUINT8",e[e.DT_QINT32=13]="DT_QINT32",e[e.DT_BFLOAT16=14]="DT_BFLOAT16",e[e.DT_FLOAT_REF=101]="DT_FLOAT_REF",e[e.DT_DOUBLE_REF=102]="DT_DOUBLE_REF",e[e.DT_INT32_REF=103]="DT_INT32_REF",e[e.DT_UINT8_REF=104]="DT_UINT8_REF",e[e.DT_INT16_REF=105]="DT_INT16_REF",e[e.DT_INT8_REF=106]="DT_INT8_REF",e[e.DT_STRING_REF=107]="DT_STRING_REF",e[e.DT_COMPLEX64_REF=108]="DT_COMPLEX64_REF",e[e.DT_INT64_REF=109]="DT_INT64_REF",e[e.DT_BOOL_REF=110]="DT_BOOL_REF",e[e.DT_QINT8_REF=111]="DT_QINT8_REF",e[e.DT_QUINT8_REF=112]="DT_QUINT8_REF",e[e.DT_QINT32_REF=113]="DT_QINT32_REF",e[e.DT_BFLOAT16_REF=114]="DT_BFLOAT16_REF"})(ci||(ci={}));var wN;(function(e){let t;(function(n){n[n.LEGACY=0]="LEGACY",n[n.V1=1]="V1",n[n.V2=2]="V2"})(t=e.CheckpointFormatVersion||(e.CheckpointFormatVersion={}))})(wN||(wN={}));const IL={};function pV(e,t){const n={tfOpName:e,category:"custom",inputs:[],attrs:[],customExecutor:t};IL[e]=n}function LN(e){return IL[e]}function mV(e){delete IL[e]}function R(e,t,n,s){const i=t.inputParams[e];if(i&&i.inputIndexStart!==void 0){const a=i.inputIndexStart,c=i.inputIndexEnd===0?void 0:i.inputIndexEnd===void 0?a+1:i.inputIndexEnd;if(i.type==="tensor")return Xn(t.inputNames[i.inputIndexStart],n,s);if(i.type==="tensors"){const m=t.inputNames.slice(a,c);return m.map(y=>Xn(y,n,s))}const h=Xn(t.inputNames.slice(a)[0],n,s),d=h.dataSync();return i.type==="number"?d[0]:ys(h.shape,d)}const o=t.attrParams[e];return o&&o.value}function Xn(e,t,n){const[s,i]=os(e),o=n.currentContextIds.find(a=>!!t[Gp(s,a)]);return o!==void 0?t[Gp(s,o)][i]:void 0}function fV(e,t,n){return t[Gp(e,n.currentContextId)]}function sr(e,t){const[n,s]=os(e);return[Gp(n,t&&t.currentContextId),s]}function Gp(e,t){return t?`${e}-${t}`:e}function os(e){const t=e.split(":");if(t.length===1)return[e,0];const n=t[0];return[n,Number(t[t.length-1])]}function GQ(e,t){const n=[];for(let s=0;sn.json));this.opMappers=t.reduce((n,s)=>(n[s.tfOpName]=s,n),{})}transformGraph(e,t={}){const n=e.node,s=[],i=[],o=[],a=n.reduce((L,T)=>(L[T.name]=this.mapNode(T),T.op.startsWith("Placeholder")?s.push(L[T.name]):T.op==="Const"?i.push(L[T.name]):(T.input==null||T.input.length===0)&&o.push(L[T.name]),L),{});let c=[];const h=[];let d={},m={};t!=null&&(d=this.mapSignatureEntries(t.inputs),m=this.mapSignatureEntries(t.outputs));const y=Object.keys(a);y.forEach(L=>{const T=a[L];T.inputNames.forEach(A=>{const[N]=sr(A);T.inputs.push(a[N]),a[N].children.push(T)})}),Object.keys(m).length===0?y.forEach(L=>{const T=a[L];T.children.length===0&&h.push(T)}):Object.keys(m).forEach(L=>{const[T]=sr(L),A=a[T];A!=null&&(A.signatureKey=m[L],h.push(A))}),Object.keys(d).length>0?Object.keys(d).forEach(L=>{const[T]=sr(L),A=a[T];A&&(A.signatureKey=d[L],c.push(A))}):c=s;let b={};e.library!=null&&e.library.function!=null&&(b=e.library.function.reduce((L,T)=>(L[T.signature.name]=this.mapFunction(T),L),{}));const w={nodes:a,inputs:c,outputs:h,weights:i,placeholders:s,signature:t,functions:b};return o.length>0&&(w.initNodes=o),w}mapSignatureEntries(e){return Object.keys(e||{}).reduce((t,n)=>(t[e[n].name]=n,t),{})}mapNode(e){const t=LN(e.op)||this.opMappers[e.op]||{};e.attr==null&&(e.attr={});const n={name:e.name,op:e.op,category:t.category,inputNames:(e.input||[]).map(s=>s.startsWith("^")?s.substr(1):s),inputs:[],children:[],inputParams:{},attrParams:{},rawAttrs:e.attr};return t.inputs!=null&&(n.inputParams=t.inputs.reduce((s,i)=>(s[i.name]={type:i.type,inputIndexStart:i.start,inputIndexEnd:i.end},s),{})),t.attrs!=null&&(n.attrParams=t.attrs.reduce((s,i)=>{const o=i.type;let a;switch(i.type){case"string":a=xL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=xL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"string[]":a=EL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=EL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"number":a=AL(e.attr,i.tfName,i.defaultValue||0),a===void 0&&!!i.tfDeprecatedName&&(a=AL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"number[]":a=OL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=OL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"bool":a=TL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=TL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"bool[]":a=kL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=kL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"shape":a=RL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=RL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"shape[]":a=DL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=DL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"dtype":a=NL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=NL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"dtype[]":a=CL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=CL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"func":a=xN(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=xN(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"tensor":case"tensors":break;default:throw new Error(`Unsupported param type: ${i.type} for op: ${e.op}`)}return s[i.name]={value:a,type:o},s},{})),n}mapFunction(e){const t=e.nodeDef,n=[],s=[];let i={};t!=null&&(i=t.reduce((m,y)=>(m[y.name]=this.mapNode(y),y.op==="Const"&&s.push(m[y.name]),m),{}));const o=[],a=[];e.signature.inputArg.forEach(m=>{const[y]=sr(m.name),b={name:y,op:"Placeholder",inputs:[],inputNames:[],category:"graph",inputParams:{},attrParams:{dtype:{value:vL(m.type),type:"dtype"}},children:[]};b.signatureKey=m.name,o.push(b),i[y]=b});const c=Object.keys(i);c.forEach(m=>{const y=i[m];y.inputNames.forEach(b=>{const[w]=sr(b);y.inputs.push(i[w]),i[w].children.push(y)})});const h=e.ret;e.signature.outputArg.forEach(m=>{const[y,b]=sr(h[m.name]),w=i[y];w!=null&&(w.defaultOutput=b,a.push(w))});const d=this.mapArgsToSignature(e);return{nodes:i,inputs:o,outputs:a,weights:s,placeholders:n,signature:d}}mapArgsToSignature(e){return{methodName:e.signature.name,inputs:e.signature.inputArg.reduce((t,n)=>(t[n.name]=this.mapArgToTensorInfo(n),t),{}),outputs:e.signature.outputArg.reduce((t,n)=>(t[n.name]=this.mapArgToTensorInfo(n,e.ret),t),{})}}mapArgToTensorInfo(e,t){let n=e.name;return t!=null&&(n=t[n]),{name:n,dtype:e.type}}}function jV(e){const t=C().global;if(typeof t.atob!="undefined")return t.atob(e);if(typeof Buffer!="undefined")return new Buffer(e,"base64").toString();throw new Error("Unable to decode base64 in this environment. Missing built-in atob() or Buffer()")}function IN(e,t){const n=Array.isArray(e)?String.fromCharCode.apply(null,e):jV(e);return t?n:n.toLowerCase()}function xL(e,t,n,s=!1){const i=e[t];return i!=null?IN(i.s,s):n}function TL(e,t,n){const s=e[t];return s?s.b:n}function AL(e,t,n){const s=e[t]||{},i=s.i!=null?s.i:s.f!=null?s.f:n;return typeof i=="number"?i:parseInt(i,10)}function vL(e){typeof e=="string"&&(e=ci[e]);switch(e){case ci.DT_FLOAT:return"float32";case ci.DT_INT32:case ci.DT_INT64:case ci.DT_INT8:case ci.DT_UINT8:return"int32";case ci.DT_BOOL:return"bool";case ci.DT_DOUBLE:return"float32";case ci.DT_STRING:return"string";default:return null}}function xN(e,t,n){const s=e[t];return s&&s.func?s.func.name:n}function NL(e,t,n){const s=e[t];return s&&s.type?vL(s.type):n}function CL(e,t,n){const s=e[t];return s&&s.list&&s.list.type?s.list.type.map(i=>vL(i)):n}function TN(e){return e.unknownRank?void 0:e.dim!=null?e.dim.map(t=>typeof t.size=="number"?t.size:parseInt(t.size,10)):[]}function RL(e,t,n){const s=e[t];return s&&s.shape?TN(s.shape):n}function OL(e,t,n){const s=e[t];return s?((s.list.f&&s.list.f.length?s.list.f:s.list.i)||[]).map(i=>typeof i=="number"?i:parseInt(i,10)):n}function EL(e,t,n,s=!1){const i=e[t];return i&&i.list&&i.list.s?i.list.s.map(o=>IN(o,s)):n}function DL(e,t,n){const s=e[t];return s&&s.list&&s.list.shape?s.list.shape.map(i=>TN(i)):n}function kL(e,t,n){const s=e[t];return s&&s.list&&s.list.b?s.list.b:n}class KV{constructor(e,t,n){this.node=e,this.tensorMap=t,this.context=n,this.inputs=[],this.attrs={},this.inputs=e.inputNames.map(s=>this.getInput(s)),e.rawAttrs!=null&&(this.attrs=Object.keys(e.rawAttrs).reduce((s,i)=>(s[i]=this.getAttr(i),s),{}))}getInput(e){return Xn(e,this.tensorMap,this.context)}getAttr(e,t){const n=this.node.rawAttrs[e];if(n.tensor!=null)return Xn(e,this.tensorMap,this.context);if(n.i!=null||n.f!=null)return AL(this.node.rawAttrs,e,t);if(n.s!=null)return xL(this.node.rawAttrs,e,t);if(n.b!=null)return TL(this.node.rawAttrs,e,t);if(n.shape!=null)return RL(this.node.rawAttrs,e,t);if(n.type!=null)return NL(this.node.rawAttrs,e,t);if(n.list!=null){if(n.list.i!=null||n.list.f!=null)return OL(this.node.rawAttrs,e,t);if(n.list.s!=null)return EL(this.node.rawAttrs,e,t);if(n.list.shape!=null)return DL(this.node.rawAttrs,e,t);if(n.list.b!=null)return kL(this.node.rawAttrs,e,t);if(n.list.type!=null)return CL(this.node.rawAttrs,e,t)}return t}}const XV=(e,t,n)=>{switch(e.op){case"BiasAdd":case"AddV2":case"Add":return[be(R("a",e,t,n),R("b",e,t,n))];case"AddN":return[vT(R("tensors",e,t,n))];case"FloorMod":case"Mod":return[$d(R("a",e,t,n),R("b",e,t,n))];case"Mul":return[X(R("a",e,t,n),R("b",e,t,n))];case"RealDiv":case"Div":return[_e(R("a",e,t,n),R("b",e,t,n))];case"DivNoNan":return[tb(R("a",e,t,n),R("b",e,t,n))];case"FloorDiv":return[Id(R("a",e,t,n),R("b",e,t,n))];case"Sub":return[Ce(R("a",e,t,n),R("b",e,t,n))];case"Minimum":return[Io(R("a",e,t,n),R("b",e,t,n))];case"Maximum":return[_s(R("a",e,t,n),R("b",e,t,n))];case"Pow":return[ei(R("a",e,t,n),R("b",e,t,n))];case"SquaredDifference":return[ah(R("a",e,t,n),R("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},VQ="arithmetic";const JV=(e,t,n)=>{switch(e.op){case"Abs":case"ComplexAbs":return[sn(R("x",e,t,n))];case"Acos":return[$y(R("x",e,t,n))];case"Acosh":return[Uy(R("x",e,t,n))];case"Asin":return[Py(R("x",e,t,n))];case"Asinh":return[zy(R("x",e,t,n))];case"Atan":return[Gy(R("x",e,t,n))];case"Atan2":return[Vy(R("x",e,t,n),R("y",e,t,n))];case"Atanh":return[Yy(R("x",e,t,n))];case"Ceil":return[Xy(R("x",e,t,n))];case"Complex":return[xi(R("real",e,t,n),R("imag",e,t,n))];case"Cos":return[Kl(R("x",e,t,n))];case"Cosh":return[Rd(R("x",e,t,n))];case"Elu":return[So(R("x",e,t,n))];case"Erf":return[nb(R("x",e,t,n))];case"Exp":return[Ls(R("x",e,t,n))];case"Expm1":return[sb(R("x",e,t,n))];case"Floor":return[Ra(R("x",e,t,n))];case"Log":return[ts(R("x",e,t,n))];case"Log1p":return[kd(R("x",e,t,n))];case"Imag":return[Ea(R("x",e,t,n))];case"Neg":return[Pt(R("x",e,t,n))];case"Reciprocal":return[db(R("x",e,t,n))];case"Real":return[xo(R("x",e,t,n))];case"Relu":return[Ri(R("x",e,t,n))];case"Round":return[mb(R("x",e,t,n))];case"Selu":return[Pd(R("x",e,t,n))];case"Sigmoid":return[vi(R("x",e,t,n))];case"Sin":return[zd(R("x",e,t,n))];case"Sign":return[gb(R("x",e,t,n))];case"Sinh":return[Gd(R("x",e,t,n))];case"Softplus":return[Da(R("x",e,t,n))];case"Sqrt":return[Ln(R("x",e,t,n))];case"Square":return[wt(R("x",e,t,n))];case"Tanh":return[Ca(R("x",e,t,n))];case"Tan":return[wb(R("x",e,t,n))];case"Relu6":case"ClipByValue":return[Yn(R("x",e,t,n),R("clipValueMin",e,t,n),R("clipValueMax",e,t,n))];case"Rsqrt":return[Md(Xn(e.inputNames[0],t,n))];case"Prod":return[Bd(R("x",e,t,n),R("axes",e,t,n))];case"LeakyRelu":return[Dd(R("x",e,t,n),R("alpha",e,t,n))];case"Prelu":return[nh(R("x",e,t,n),R("alpha",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},YQ="basic_math";function Ms(e,t,n=""){k(ZV(e,t),()=>n+` Shapes ${e} and ${t} must match`)}function ZV(e,t){if(e.length!==t.length)return!1;for(let n=0;n{(e==null||!e.has(t.tensor.id))&&t.tensor.dispose()}),this.tensors=[],this.closed_=!0,this.idTensor.dispose()}size(){return this.tensors.length}read(e){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||e>=this.size())throw new Error(`Tried to read from index ${e}, but array size is: ${this.size()}`);const t=this.tensors[e];if(t.cleared)throw new Error(`TensorArray ${this.name}: Could not read index ${e} twice because it was cleared after a previous read (perhaps try setting clear_after_read = false?).`);return this.clearAfterRead&&(t.cleared=!0),t.read=!0,t.tensor}readMany(e){return e.map(t=>this.read(t))}write(e,t){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||!this.dynamicSize&&e>=this.maxSize)throw new Error(`Tried to write to index ${e}, but array is not resizeable and size is: ${this.maxSize}`);const n=this.tensors[e]||{};if(t.dtype!==this.dtype)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, - because the value dtype is ${t.dtype}, but TensorArray dtype is ${this.dtype}.`);if(this.size()===0&&(this.elementShape==null||this.elementShape.length===0)&&(this.elementShape=t.shape),Ms(this.elementShape,t.shape,`TensorArray ${this.name}: Could not write to TensorArray index ${e}.`),n.read)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, because it has already been read.`);if(n.written)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, because it has already been written.`);n.tensor=t,Nn(t),n.written=!0,this.tensors[e]=n}writeMany(e,t){if(e.length!==t.length)throw new Error(`TensorArray ${this.name}: could not write multiple tensors,because the index size: ${e.length} is not the same as tensors size: ${t.length}.`);e.forEach((n,s)=>this.write(n,t[s]))}gather(e,t){if(!!t&&t!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but gather requested dtype ${t}`);if(e)e=e.slice(0,this.size());else{e=[];for(let s=0;s=this.maxSize)throw new Error(`Max index must be < array size (${n} vs. ${this.maxSize})`);this.writeMany(e,Oi(t,0))}split(e,t){if(t.dtype!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${t.dtype}`);let n=0;const s=e.map(c=>(n+=c,n));if(n!==t.shape[0])throw new Error(`Expected sum of lengths to be equal to +2. The custom ${s} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);if(h!=null){const d={};for(const w of Object.keys(Bs))d[w]=Bs[w];for(const w of Object.keys(n))d[w]=n[w];const m=o.config;m.customObjects=d;const f=Object.assign({},Bs);for(const w of Object.keys(n))Bs[w]=n[w];pw(o.config);const b=h(c,o.config,n,i);return Bs=Object.assign({},f),b}else{const d=Object.assign({},Bs);for(const f of Object.keys(n))Bs[f]=n[f];const m=new c(o.config);return Bs=Object.assign({},d),m}}}function dz(e,t){return et?1:0}function Bp(e,t){return-1*dz(e,t)}function Oee(e){switch(e){case"float32":return"float32";default:throw new q(`Invalid dtype: ${e}`)}}function Eee(e,t){if(e==null||t==null)return e===t;if(e.length!==t.length)return!1;for(let n=0;n=0),As(s>=n),Array.isArray(e)&&e.length>=n&&e.length<=s&&e.every(i=>typeof i===t)}function wn(e,t){Array.isArray(e)?(A(e.length>0,()=>`${t} is unexpectedly an empty array.`),e.forEach((n,s)=>wn(n,`element ${s+1} of ${t}`))):A(Number.isInteger(e)&&e>0,()=>`Expected ${t} to be a positive integer, but got ${av(e)}.`)}function av(e){return e===null?"null":Array.isArray(e)?"["+e.map(t=>av(t)).join(",")+"]":typeof e=="string"?`"${e}"`:`${e}`}function mz(e,t){let n=jn(),s;const i=(...o)=>{const a=jn();return a-n0,"arrayOfValues is empty");for(const t of e)As(Array.isArray(t),"one of the values is not an array"),As(t.length>0,"one of the values is empty");return e.reduce((t,n)=>t.length===0?n.map(s=>[s]):n.map(s=>t.map(i=>[...i,s])).reduce((s,i)=>s.concat(i),[]),[])}function fw(e,t){return Q(()=>Nn($e(X(e,e),t,!0)))}class Fh extends Ao{getConfig(){return{}}}class gw extends Fh{constructor(e){super();this.defaultMaxValue=2,this.defaultAxis=0,this.maxValue=e.maxValue!=null?e.maxValue:this.defaultMaxValue,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return Q(()=>{const t=fw(e,this.axis),n=Jn(t,0,this.maxValue);return X(e,We(n,be(mn(),t)))})}getConfig(){return{maxValue:this.maxValue,axis:this.axis}}}gw.className="MaxNorm",fe(gw);class yw extends Fh{constructor(e){super();this.defaultAxis=0,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return Q(()=>We(e,be(mn(),fw(e,this.axis))))}getConfig(){return{axis:this.axis}}}yw.className="UnitNorm",fe(yw);class bw extends Fh{apply(e){return Ni(e)}}bw.className="NonNeg",fe(bw);class ww extends Fh{constructor(e){super();this.defaultMinValue=0,this.defaultMaxValue=1,this.defaultRate=1,this.defaultAxis=0,this.minValue=e.minValue!=null?e.minValue:this.defaultMinValue,this.maxValue=e.maxValue!=null?e.maxValue:this.defaultMaxValue,this.rate=e.rate!=null?e.rate:this.defaultRate,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return Q(()=>{const t=fw(e,this.axis),n=be(X(this.rate,Jn(t,this.minValue,this.maxValue)),X(1-this.rate,t));return X(e,We(n,be(mn(),t)))})}getConfig(){return{minValue:this.minValue,maxValue:this.maxValue,rate:this.rate,axis:this.axis}}}ww.className="MinMaxNorm",fe(ww);const lv={maxNorm:"MaxNorm",minMaxNorm:"MinMaxNorm",nonNeg:"NonNeg",unitNorm:"UnitNorm"};function fn(e){return dw(e)}function hv(e,t={}){return kh(e,Ws.getMap().classNameMap,t,"constraint")}function gn(e){if(e==null)return null;if(typeof e=="string"){const t=e in lv?lv[e]:e,n={className:t,config:{}};return hv(n)}else return e instanceof Fh?e:hv(e)}function fz(e){return new gw(e)}function gz(e){return new yw(e)}function yz(){return new bw}function bz(e){return new ww(e)}var wz=Object.freeze({__proto__:null,maxNorm:fz,unitNorm:gz,nonNeg:yz,minMaxNorm:bz});const Lz=["channelsFirst","channelsLast"],Sz=["valid","same","causal"],Iz=["max","avg"],xz=["sum","mul","concat","ave"],kee=["temporal"];const ec=new Map;function jt(e){Qa(Lz,"DataFormat",e)}function vs(e){Qa(Sz,"PaddingMode",e)}function uv(e){Qa(Iz,"PoolMode",e)}const _h=[],dv="/";function Bo(e,t){_h.push(e);try{const n=t();return _h.pop(),n}catch(n){throw _h.pop(),n}}function Tz(){return _h.length===0?"":_h.join(dv)+dv}function pv(e){if(!fv(e))throw new Error("Not a valid tensor name: '"+e+"'");return Tz()+e}function mv(e){if(!fv(e))throw new Error("Not a valid tensor name: '"+e+"'");ec.has(e)||ec.set(e,0);const t=ec.get(e);if(ec.set(e,ec.get(e)+1),t>0){const n=`${e}_${t}`;return ec.set(n,1),n}else return e}const Az=new RegExp(/^[A-Za-z0-9][-A-Za-z0-9\._\/]*$/);function fv(e){return!!e.match(Az)}function vz(e){return e===parseInt(e.toString(),10)}function Gr(e,t,n){t==null&&(t=0),n==null&&(n=e.length);let s=1;for(let i=t;ii-o),n=Math.floor((t.length-1)/2),s=Math.ceil((t.length-1)/2);return n===s?t[n]:(t[n]+t[s])/2}function ni(e,t){if(t0?t.reduce((n,s)=>n*s):1}function Wh(e,t){return e.asType(t)}function $h(e,t=-1){const n=e.shape.slice();return t<0&&(t=n.length+t+1),n.splice(t,0,1),e.reshape(n)}function Rz(e,t){return Q(()=>{if(e.shape.length!==2)throw new q(`repeat() expects a rank-2 tensor, but received a rank-${e.shape.length} tensor.`);const n=$h(e,1);return Iw(n,[1,t,1])})}function Oz(e){const t=[Gr(e.shape)];return e.reshape(t)}function Ez(e){if(e.rank<=1)throw new q(`batchFlatten requires a minimum rank of 2. Got rank: ${e.rank}.`);const t=[e.shape[0],Gr(e.shape,1)];return e.reshape(t)}function Mo(e,t,n){return Q(()=>{switch(e.rank){case 1:return Sp(e,t,n);case 2:return Mb(e,[t,0],[n,e.shape[1]]);case 3:return Ip(e,[t,0,0],[n,e.shape[1],e.shape[2]]);case 4:return wh(e,[t,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3]]);case 5:return tt(e,[t,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4]]);case 6:return tt(e,[t,0,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4],e.shape[5]]);default:throw new q(`sliceAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}})}function Lw(e,t,n){return Q(()=>{switch(e.rank){case 1:return Sp(e,t,n);case 2:return Mb(e,[0,t],[e.shape[0],n]);case 3:return Ip(e,[0,0,t],[e.shape[0],e.shape[1],n]);case 4:return wh(e,[0,0,0,t],[e.shape[0],e.shape[1],e.shape[2],n]);default:throw new q(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}})}function Pp(e,t,n,s){return Q(()=>{switch(e.rank){case 1:return Sp(e,t,n);case 2:switch(s){case 1:return Mo(e,t,n);case 2:return Lw(e,t,n);default:throw new q(`The axis is not within the rank of the tensor ${s}`)}case 3:switch(s){case 1:return Mo(e,t,n);case 2:return Ip(e,[0,t,0],[e.shape[0],n,e.shape[2]]);case 3:return Lw(e,t,n);default:throw new q(`The axis is not within the rank of the tensor ${s}`)}case 4:switch(s){case 1:return Mo(e,t,n);case 2:return wh(e,[0,t,0,0],[e.shape[0],n,e.shape[2],e.shape[3]]);case 3:return wh(e,[0,0,t,0],[e.shape[0],e.shape[1],n,e.shape[3]]);case 4:return Lw(e,t,n);default:throw new q(`The axis is not within the rank of the tensor ${s}`)}default:throw new q(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}})}function Sw(e,t=-1){let n;return t<0&&(n=e[0].rank,n!==0?t=n:t=0),t===e[0].rank&&(t=-1),Yt(e,t)}function yv(e,t){switch(e.rank){case 1:return XT([e,t]);case 2:return JT([e,t],0);case 3:return ZT([e,t],0);case 4:return QT([e,t],0);default:throw new q(`concatAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}}function Iw(e,t){if(Array.isArray(t)||(t=[t]),e.rank!==t.length)throw new q(`The length of input n (${t.length}) does not match the number of dimensions in input x (${e.rank})`);return $r(e,t)}function zp(e,t=0,n=1,s,i){return Fb(e,t,n,s,i)}function Ci(e,t,n,s){if(e.rank<2||t.rank<2)throw new Pe(`dot requires both inputs to be rank >= 2 but got x shape = ${e.shape} and y shape = ${t.shape}`);if(t.rank>=3){const i=e.shape.slice(-1)[0],o=t.shape.slice(-2)[0];if(i!==o)throw new Pe(`If rank y >= 3, then the second last dim of y must equal the last dim of x but got x shape = ${e.shape} and y shape = ${t.shape}`)}if(e.rank===2&&t.rank===2){const i=!1,o=!1;return Ep({a:e,b:t,transposeA:i,transposeB:o,bias:s?xw(e.rank,s,ei()):null,activation:n})}else{const i=e.shape.slice(),o=i.pop();e=e.reshape([-1,o]);const a=t.shape.slice(),c=a.pop(),h=a.pop(),d=[...a,c],m=Array.from({length:t.rank},(L,x)=>x===0?t.rank-2:x<=t.rank-2?x-1:x);t=t.transpose(m).reshape([h,-1]);const f=[...i,...d],b=!1,w=!1;return Ep({a:e,b:t,transposeA:b,transposeB:w,bias:s?xw(e.rank,s,ei()):null,activation:n}).reshape(f)}}function Mee(e){return Q(()=>{const t=et(e),n=Fn(e);return Bn(Xs(e,t),t,Bn(xs(e,et(e)),n,X(-1,n)))})}function Pee(e,t){return Q(()=>{if(e.rank!==1)throw new Error("Only 1D one-hot tensors are supported in the deeplearn backend, at present.");return e=e.toInt(),To(e,t).toFloat()})}function bv(e,t,n){return Q(()=>(Array.isArray(t)?t=ls(t,"int32"):t=t.toInt(),Pa(e,t,n)))}function Uh(e){return X(e,e)}function zee(e,t){return Q(()=>{if(typeof t=="number"&&(t=Ce(Math.round(t),"int32")),t.dtype!=="int32")throw new Pe(`Non-int32 dtype (${t.dtype}) is not supported by pow() yet`);return Zs(e,t)})}function xw(e,t,n){const s=t.shape;if(t.rank!==1&&t.rank!==e)throw new q(`Unexpected bias dimensions: ${t.rank}; expected it to be 1 or ${e}`);if(e===5){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1,1,1]):t.reshape([1,s[3],s[0],s[1],s[2]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,1,1,s[0]]):t.reshape([1].concat(s))}else if(e===4){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1,1]):t.reshape([1,s[2],s[0],s[1]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,1,s[0]]):t.reshape([1].concat(s))}else if(e===3){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1]):t.reshape([1,s[1],s[0]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,s[0]]):t.reshape([1].concat(s))}else if(e<3)return t;throw new q(`Unsupported input rank by biasAdd: ${t.rank}`)}function Ri(e,t,n){return Q(()=>(n==null&&(n=ei()),jt(n),e.add(xw(e.rank,t,n))))}function Dz(e,t=1){if(t!==1)throw new Pe(`Support for alpha values other than 1 (${t}) is not implemented yet.`);return Ua(e)}function kz(e){return Q(()=>We(e,dn(e).add(1)))}function wv(e,t,n,s){return Q(()=>kA(e,t,n,s))}function Fz(e){return Q(()=>{const t=be(.5,X(.2,e));return Jn(t,0,1)})}function Bh(e,t,n=!1){return n?e():t()}const _z=["fanIn","fanOut","fanAvg"],Wz=["normal","uniform","truncatedNormal"],Vee=["Zeros","Ones","Constant","RandomNormal","RandomUniform","TruncatedNormal","VarianceScaling","Orthogonal","Identity"];function $z(e){Qa(_z,"FanMode",e)}function Uz(e){Qa(Wz,"Distribution",e)}class Ms extends Ao{fromConfigUsesCustomObjects(){return!1}getConfig(){return{}}}class Tw extends Ms{apply(e,t){return dt(e,t)}}Tw.className="Zeros",fe(Tw);class Vp extends Ms{apply(e,t){return Js(e,t)}}Vp.className="Ones",fe(Vp);class Aw extends Ms{constructor(e){super();if(typeof e!="object")throw new q(`Expected argument of type ConstantConfig but got ${e}`);if(e.value===void 0)throw new q(`config must have value set but got ${e}`);this.value=e.value}apply(e,t){return Q(()=>X(Ce(this.value),Js(e,t)))}getConfig(){return{value:this.value}}}Aw.className="Constant",fe(Aw);class vw extends Ms{constructor(e){super();this.DEFAULT_MINVAL=-.05,this.DEFAULT_MAXVAL=.05,this.minval=e.minval||this.DEFAULT_MINVAL,this.maxval=e.maxval||this.DEFAULT_MAXVAL,this.seed=e.seed}apply(e,t){return ko(e,this.minval,this.maxval,t)}getConfig(){return{minval:this.minval,maxval:this.maxval,seed:this.seed}}}vw.className="RandomUniform",fe(vw);class Nw extends Ms{constructor(e){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Pe(`randomNormal does not support dType ${t}.`);return zp(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}Nw.className="RandomNormal",fe(Nw);class Cw extends Ms{constructor(e){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Pe(`truncatedNormal does not support dType ${t}.`);return xh(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}Cw.className="TruncatedNormal",fe(Cw);class Rw extends Ms{constructor(e){super();this.gain=e.gain!=null?e.gain:1}apply(e,t){return Q(()=>{if(e.length!==2||e[0]!==e[1])throw new q("Identity matrix initializer can only be used for 2D square matrices.");return X(this.gain,cp(e[0]))})}getConfig(){return{gain:this.gain}}}Rw.className="Identity",fe(Rw);function Bz(e,t="channelsLast"){let n,s;if(jt(t),e.length===2)n=e[0],s=e[1];else if([3,4,5].indexOf(e.length)!==-1){if(t==="channelsFirst"){const i=Gr(e,2);n=e[1]*i,s=e[0]*i}else if(t==="channelsLast"){const i=Gr(e,0,e.length-2);n=e[e.length-2]*i,s=e[e.length-1]*i}}else{const i=Gr(e);n=Math.sqrt(i),s=Math.sqrt(i)}return[n,s]}class ns extends Ms{constructor(e){super();if(e.scale<0)throw new q(`scale must be a positive float. Got: ${e.scale}`);this.scale=e.scale==null?1:e.scale,this.mode=e.mode==null?"fanIn":e.mode,$z(this.mode),this.distribution=e.distribution==null?"normal":e.distribution,Uz(this.distribution),this.seed=e.seed}apply(e,t){const n=Bz(e),s=n[0],i=n[1];let o=this.scale;if(this.mode==="fanIn"?o/=Math.max(1,s):this.mode==="fanOut"?o/=Math.max(1,i):o/=Math.max(1,(s+i)/2),this.distribution==="normal"){const a=Math.sqrt(o);if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Pe(`${this.getClassName()} does not support dType ${t}.`);return xh(e,0,a,t,this.seed)}else{const a=Math.sqrt(3*o);return ko(e,-a,a,t)}}getConfig(){return{scale:this.scale,mode:this.mode,distribution:this.distribution,seed:this.seed}}}ns.className="VarianceScaling",fe(ns);class Gp extends ns{constructor(e){super({scale:1,mode:"fanAvg",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return ns.className}}Gp.className="GlorotUniform",fe(Gp);class Yp extends ns{constructor(e){super({scale:1,mode:"fanAvg",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return ns.className}}Yp.className="GlorotNormal",fe(Yp);class Hp extends ns{constructor(e){super({scale:2,mode:"fanIn",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return ns.className}}Hp.className="HeNormal",fe(Hp);class qp extends ns{constructor(e){super({scale:2,mode:"fanIn",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return ns.className}}qp.className="HeUniform",fe(qp);class jp extends ns{constructor(e){super({scale:1,mode:"fanIn",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return ns.className}}jp.className="LeCunNormal",fe(jp);class Kp extends ns{constructor(e){super({scale:1,mode:"fanIn",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return ns.className}}Kp.className="LeCunNormal",fe(Kp);class Ow extends Ms{constructor(e){super();if(this.DEFAULT_GAIN=1,this.gain=e.gain==null?this.DEFAULT_GAIN:e.gain,this.seed=e.seed,this.seed!=null)throw new Pe("Random seed is not implemented for Orthogonal Initializer yet.")}apply(e,t){return Q(()=>{if(e.length<2)throw new Pe("Shape must be at least 2D.");e[0]*e[1]>2e3&&console.warn(`Orthogonal initializer is being called on a matrix with more than 2000 (${e[0]*e[1]}) elements: Slowness may result.`);const n=e[0]>e[1]?[e[1],e[0]]:e,s=zp(n,0,1,"float32");let i=GA.gramSchmidt(s);return e[0]>e[1]&&(i=i.transpose()),X(this.gain,i)})}getConfig(){return{gain:this.gain,seed:this.seed}}}Ow.className="Orthogonal",fe(Ow);const Lv={constant:"Constant",glorotNormal:"GlorotNormal",glorotUniform:"GlorotUniform",heNormal:"HeNormal",heUniform:"HeUniform",identity:"Identity",leCunNormal:"LeCunNormal",leCunUniform:"LeCunUniform",ones:"Ones",orthogonal:"Orthogonal",randomNormal:"RandomNormal",randomUniform:"RandomUniform",truncatedNormal:"TruncatedNormal",varianceScaling:"VarianceScaling",zeros:"Zeros"};function Sv(e,t={}){return kh(e,Ws.getMap().classNameMap,t,"initializer")}function Kt(e){return dw(e)}function Pt(e){if(typeof e=="string"){const t=e in Lv?Lv[e]:e;if(t==="GlorotNormal")return new Yp;if(t==="GlorotUniform")return new Gp;if(t==="HeNormal")return new Hp;if(t==="HeUniform")return new qp;if(t==="LeCunNormal")return new jp;if(t==="LeCunUniform")return new Kp;{const n={};return n.className=t,n.config={},Sv(n)}}else return e instanceof Ms?e:Sv(e)}function Mz(){return new Tw}function Pz(){return new Vp}function zz(e){return new Aw(e)}function Vz(e){return new vw(e)}function Gz(e){return new Nw(e)}function Yz(e){return new Cw(e)}function Hz(e){return new Rw(e)}function qz(e){return new ns(e)}function jz(e){return new Gp(e)}function Kz(e){return new Yp(e)}function Xz(e){return new Hp(e)}function Jz(e){return new qp(e)}function Zz(e){return new jp(e)}function Qz(e){return new Kp(e)}function e3(e){return new Ow(e)}var t3=Object.freeze({__proto__:null,zeros:Mz,ones:Pz,constant:zz,randomUniform:Vz,randomNormal:Gz,truncatedNormal:Yz,identity:Hz,varianceScaling:qz,glorotUniform:jz,glorotNormal:Kz,heNormal:Xz,heUniform:Jz,leCunNormal:Zz,leCunUniform:Qz,orthogonal:e3});let n3=0;function Iv(){return n3++}const Xp={};function Jp(e=""){return e in Xp||(Xp[e]=0),Xp[e]+=1,e+Xp[e].toString()}function Ew(e){return Array.isArray(e)&&Array.isArray(e[0])}function Zp(e){return e.length===0?[]:Array.isArray(e[0])?e:[e]}function Xe(e){let t;if(Array.isArray(e)){if(e.length!==1)throw new q(`Expected Tensor length to be 1; got ${e.length}`);t=e[0]}else t=e;return t}function Nt(e){if(Array.isArray(e)&&Array.isArray(e[0])){if(e.length===1)return e=e,e[0];throw new q(`Expected exactly 1 Shape; got ${e.length}`)}else return e}function Qp(e){let t=0;for(const n of e)n.shape.length===0?t+=1:t+=n.shape.reduce((s,i)=>s*i);return t}const xv="Variable";class si{constructor(e,t="float32",n=xv,s=!0,i=null){this.dtype=t==null?"float32":t,this.shape=e.shape,this.id=Iv(),n=n==null?xv:n,this.originalName=pv(n),this.name=mv(this.originalName),this.trainable_=s,this.constraint=i,this.val=mA(e,this.trainable_,this.name,this.dtype)}read(){return this.assertNotDisposed(),this.val}write(e){return this.assertNotDisposed(),s3(this.val,e),this.val.id!==e.id&&(this.val.assign(e),this.constraint!=null&&this.val.assign(this.constraint.apply(this.val))),this}dispose(){this.assertNotDisposed(),this.val.dispose()}assertNotDisposed(){if(this.val.isDisposed)throw new Error(`LayersVariable ${this.name} is already disposed.`)}get trainable(){return this.trainable_}set trainable(e){this.trainable_=e,this.val.trainable=e}}function s3(e,t){if(e.shape.toString()!==t.shape.toString())throw new Error("Shape mismatch: "+JSON.stringify(e.shape)+" vs. "+JSON.stringify(t.shape))}function Gee(e,t,n,s){return new si(e,t,n,!0,s)}function Yee(e,t,n){return new si(dt(e),t,n)}function Hee(e,t,n){return new si(et(e),t,n)}function qee(e,t,n){const s=Js(e);return new si(s,t,n)}function jee(e,t,n){const s=Fn(e);return new si(s,t,n)}function Kee(e,t,n){return new si(cp(e),t,n)}function Xee(e,t,n,s,i,o="randomUniform"){return new si(ko(e,t,n,s),s,o)}function Jee(e,t=0,n=1,s,i,o="truncatedNormal"){if(s=s||"float32",s!=="float32"&&s!=="int32")throw new Pe(`randomNormal does not support dType ${s}.`);return new si(xh(e,t,n,s,i),s,o)}function Zee(e,t=0,n=1,s,i,o="randomNormal"){if(s=s||"float32",s!=="float32"&&s!=="int32")throw new Pe(`randomNormalVariable does not support dType ${s}.`);return new si(Fb(e,t,n,s,i),s,o)}function Qee(e,t){return e.write(t)}function ete(e,t){return e.write(be(e.read(),t))}function tte(e,t){return e.write(Re(e.read(),t))}function Dw(e){return e.map(t=>t.read())}function kw(e){e.forEach(t=>{const n=t[0];n.write(t[1])})}function nte(e,t){const n=t.map(i=>i.read()),s=Cb(e,n);return t.map(i=>s.grads[i.name])}class Ln{constructor(e){this.dtype=e.dtype,this.shape=e.shape,e.shape!=null?this.ndim=e.shape.length:this.ndim=e.ndim,this.maxNDim=e.maxNDim,this.minNDim=e.minNDim,this.axes=e.axes||{}}}class ii{constructor(e,t,n,s,i,o,a){this.dtype=e,this.shape=t,this.sourceLayer=n,this.inputs=s,this.callArgs=i,this.outputTensorIndex=a,this.id=Iv(),o!=null&&(this.originalName=pv(o),this.name=mv(this.originalName)),this.rank=t.length}}let i3=0;class em{constructor(e,t){this.callArgs=t,this.id=i3++,this.outboundLayer=e.outboundLayer,this.inboundLayers=e.inboundLayers,this.nodeIndices=e.nodeIndices,this.tensorIndices=e.tensorIndices,this.inputTensors=e.inputTensors,this.outputTensors=e.outputTensors,this.inputMasks=e.inputMasks,this.outputMasks=e.outputMasks,this.inputShapes=e.inputShapes,this.outputShapes=e.outputShapes;for(const n of e.inboundLayers)n!=null&&n.outboundNodes.push(this);e.outboundLayer.inboundNodes.push(this)}getConfig(){const e=[];for(const t of this.inboundLayers)t!=null?e.push(t.name):e.push(null);return{outboundLayer:this.outboundLayer?this.outboundLayer.name:null,inboundLayers:e,nodeIndices:this.nodeIndices,tensorIndices:this.tensorIndices}}}let r3=0;class lt extends Ao{constructor(e={}){super();this._callHook=null,this._addedWeightNames=[],this._stateful=!1,this.id=r3++,this.activityRegularizer=null,this.inputSpec=null,this.supportsMasking=!1,this._trainableWeights=[],this._nonTrainableWeights=[],this._losses=[],this._updates=[],this._built=!1,this.inboundNodes=[],this.outboundNodes=[];let t=e.name;if(!t){const n=this.getClassName();t=sr(n)+"_"+Jp(n)}if(this.name=t,this.trainable_=e.trainable==null?!0:e.trainable,e.inputShape!=null||e.batchInputShape!=null){let n;if(e.batchInputShape!=null)n=e.batchInputShape;else if(e.inputShape!=null){let i=null;e.batchSize!=null&&(i=e.batchSize),n=[i].concat(e.inputShape)}this.batchInputShape=n;let s=e.dtype;s==null&&(s=e.inputDType),s==null&&(s="float32"),this.dtype=s}e.weights!=null?this.initialWeights=e.weights:this.initialWeights=null,this._refCount=null,this.fastWeightInitDuringBuild=!1}static nodeKey(e,t){return e.name+"_ib-"+t.toString()}getNodeAtIndex(e,t){if(this.inboundNodes.length===0)throw new ti(`The layer has never been called and thus has no defined ${t}.`);if(this.inboundNodes.length<=e)throw new q(`Asked to get ${t} at node ${e}, but the layer has only ${this.inboundNodes.length} inbound nodes.`);return this.inboundNodes[e]}getInputAt(e){return ts(this.getNodeAtIndex(e,"input").inputTensors)}getOutputAt(e){return ts(this.getNodeAtIndex(e,"output").outputTensors)}get input(){if(this.inboundNodes.length>1)throw new nr(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer input" is ill-defined. Use \`getInputAt(nodeIndex)\` instead.`);if(this.inboundNodes.length===0)throw new nr(`Layer ${this.name} is not connected, no input to return.`);return ts(this.getNodeAtIndex(0,"input").inputTensors)}get output(){if(this.inboundNodes.length===0)throw new nr(`Layer ${this.name} has no inbound nodes.`);if(this.inboundNodes.length>1)throw new nr(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer output" is ill-defined. Use \`getOutputAt(nodeIndex)\` instead.`);return ts(this.getNodeAtIndex(0,"output").outputTensors)}get losses(){return this._losses}calculateLosses(){return this.losses.map(e=>e())}get updates(){return this._updates}get built(){return this._built}set built(e){this._built=e}get trainable(){return this.trainable_}set trainable(e){this._trainableWeights.forEach(t=>t.trainable=e),this.trainable_=e}get trainableWeights(){return this.trainable_?this._trainableWeights.filter(e=>e.trainable):[]}set trainableWeights(e){this._trainableWeights=e}get nonTrainableWeights(){return this.trainable?this._trainableWeights.filter(e=>!e.trainable).concat(this._nonTrainableWeights):this._trainableWeights.concat(this._nonTrainableWeights)}set nonTrainableWeights(e){this._nonTrainableWeights=e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}get stateful(){return this._stateful}resetStates(){if(!this.stateful)throw new Error("Cannot call the resetStates() method of a non-stateful Layer object.")}assertInputCompatibility(e){if(e=Et(e),this.inputSpec==null||this.inputSpec.length===0)return;const t=Et(this.inputSpec);if(e.length!==t.length)throw new q(`Layer ${this.name} expects ${t.length} inputs, but it received ${e.length} input tensors. Input received: ${e}`);for(let n=0;ni.maxNDim)throw new q(`Input ${n} is incompatible with layer ${this.name}: expected max_ndim=${i.maxNDim}, found ndim=${o}`);if(i.minNDim!=null&&o=0?a[h]:a[a.length+h];if(d!=null&&[d,null].indexOf(m)===-1)throw new q(`Input ${n} is incompatible with layer ${this.name}: expected axis ${h} of input shape to have value ${d} but got shape ${a}.`)}}if(i.shape!=null)for(let a=0;a{if(!this.built){this.assertInputCompatibility(e);const o=[];for(const a of Et(e))o.push(a.shape);this.build(ts(o)),this.built=!0,this.initialWeights&&this.setWeights(this.initialWeights),this._refCount===null&&i&&(this._refCount=1)}if(this.assertInputCompatibility(e),i){let o=this.call(e,t);const a=Et(o),c=[];for(let h of a)n.indexOf(h)!==-1&&(h=h.clone()),c.push(h);if(o=ts(c),this.activityRegularizer!=null)throw new Pe("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return o}else{const o=o3(e),a=this.computeOutputShape(o);let c;const h=a3(e);if(this.warnOnIncompatibleInputShape(Array.isArray(e)?o[0]:o),a!=null&&a.length>0&&Array.isArray(a[0])?c=a.map((d,m)=>new ii(h,d,this,Et(e),t,this.name,m)):c=new ii(h,a,this,Et(e),t,this.name),this.addInboundNode(e,c,null,null,o,a,t),this._refCount++,this.activityRegularizer!=null)throw new Pe("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return c}})}warnOnIncompatibleInputShape(e){if(this.batchInputShape==null)return;if(e.length!==this.batchInputShape.length)console.warn(`The rank of the input tensor provided (shape: ${JSON.stringify(e)}) does not match that of the batchInputShape (${JSON.stringify(this.batchInputShape)}) of the layer ${this.name}`);else{let t=!1;this.batchInputShape.forEach((n,s)=>{n!=null&&e[s]!=null&&e[s]!==n&&(t=!0)}),t&&console.warn(`The shape of the input tensor (${JSON.stringify(e)}) does not match the expectation of layer ${this.name}: ${JSON.stringify(this.batchInputShape)}`)}}get outputShape(){if(this.inboundNodes==null||this.inboundNodes.length===0)throw new nr(`The layer ${this.name} has never been called and thus has no defined output shape.`);const e=[];for(const t of this.inboundNodes){const n=JSON.stringify(t.outputShapes);e.indexOf(n)===-1&&e.push(n)}if(e.length===1){const t=this.inboundNodes[0].outputShapes;return Array.isArray(t)&&Array.isArray(t[0])&&t.length===1?t[0]:t}else throw new nr(`The layer ${this.name} has multiple inbound nodes with different output shapes. Hence the notion of "output shape" is ill-defined for the layer.`)}countParams(){if(!this.built)throw new ti(`You tried to call countParams() on ${this.name}, but the layer is not built yet. Build it first by calling build(batchInputShape).`);return Qp(this.weights)}build(e){this.built=!0}getWeights(e=!1){return Dw(e?this.trainableWeights:this.weights)}setWeights(e){Q(()=>{const t=this.weights;if(t.length!==e.length)throw new q(`You called setWeights(weights) on layer "${this.name}" with a weight list of length ${e.length}, but the layer was expecting ${t.length} weights. Provided weights: ${e}...`);if(t.length===0)return;const n=[],s=Dw(t);for(let i=0;ii.apply(h.read())),o==null&&(o=!0),o?this._trainableWeights.push(h):this._nonTrainableWeights.push(h),h}setFastWeightInitDuringBuild(e){this.fastWeightInitDuringBuild=e}addLoss(e){if(e==null||Array.isArray(e)&&e.length===0)return;e=Et(e),this._losses!==void 0&&this._losses!==null&&this.losses.push(...e)}computeOutputShape(e){return e}computeMask(e,t){if(!this.supportsMasking){if(t!=null)if(Array.isArray(t))t.forEach(n=>{if(n!=null)throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`)});else throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`);return null}return t}addInboundNode(e,t,n,s,i,o,a=null){const c=Et(e);t=Et(t),n=Et(n),s=Et(s),i=Zp(i),o=Zp(o);const h=[],d=[],m=[];for(const f of c)h.push(f.sourceLayer),d.push(f.nodeIndex),m.push(f.tensorIndex);new em({outboundLayer:this,inboundLayers:h,nodeIndices:d,tensorIndices:m,inputTensors:c,outputTensors:t,inputMasks:n,outputMasks:s,inputShapes:i,outputShapes:o},a);for(let f=0;fe.dispose()),this.weights.length}assertNotDisposed(){if(this._refCount===0)throw new Error(`Layer '${this.name}' is already disposed.`)}dispose(){if(!this.built)throw new Error(`Cannot dispose Layer ${this.name} because it has not been built yet.`);if(this._refCount===null)throw new Error(`Cannot dispose Layer ${this.name} because it has not been used yet.`);this.assertNotDisposed();let e=0;return--this._refCount===0&&(e=this.disposeWeights()),{refCountAfterDispose:this._refCount,numDisposedVariables:e}}}function o3(e){e=Et(e);const t=[];for(const n of e)t.push(n.shape);return ts(t)}function a3(e){return"float32"}function Tv(e,t,n){if((t==null||n!=null&&n>0)&&(t=e.sourceLayer,n=e.nodeIndex),t.inboundNodes.length===0)return[e];{const s=t.inboundNodes[n];if(s.inboundLayers.length===0)return s.inputTensors;{const i=[];for(let o=0;o0){const i=await Promise.all(t);for(let o=0;obe(this.totals[s],X(i,n)));this.totals[s]=a,o!=null&&o.dispose()}}}async onEpochEnd(e,t){if(t!=null)for(const n of this.params.metrics){if(this.totals[n]==null)continue;typeof this.totals[n]=="number"?t[n]=this.totals[n]/this.seen:Q(()=>{const s=X(We(1,this.seen),this.totals[n]);t[n]=s,this.totals[n].dispose(),bn(t[n])})}}}class Rv extends sc{async onTrainBegin(e){this.epoch=[],this.history={}}async onEpochEnd(e,t){t==null&&(t={}),this.epoch.push(e);for(const n in t)this.history[n]==null&&(this.history[n]=[]),this.history[n].push(t[n])}async syncData(){const e=[],t=[],n=[];for(const i in this.history){const o=this.history[i];for(let a=0;anew Ov(s,t))}class Ps{constructor(){}static registerCallbackConstructor(e,t){A(e>=0&&Number.isInteger(e),()=>`Verbosity level is expected to be an integer >= 0, but got ${e}`),Ps.checkForDuplicate(t),Ps.constructors[e]==null&&(Ps.constructors[e]=[]),Ps.constructors[e].push(t)}static checkForDuplicate(e){for(const t in Ps.constructors){const n=Ps.constructors[+t];n.forEach(s=>{if(s===e)throw new q("Duplicate callback constructor.")})}}static clear(){Ps.constructors={}}static createCallbacks(e){const t=[];for(const n in Ps.constructors){const s=+n;e>=s&&t.push(...Ps.constructors[s])}return t.map(n=>new n)}}Ps.constructors={};function Dv(e,t,n,s,i,o,a,c,h){const d=new Rv,m=[new l3,...Ps.createCallbacks(t)];e!=null&&m.push(...e),m.push(d);const f=new Cv(m);return f.setParams({epochs:n,initialEpoch:s,samples:i,steps:o,batchSize:a,verbose:t,doValidation:c,metrics:h}),{callbackList:f,history:d}}function ri(e,t={},n=!1){return kh(e,Ws.getMap().classNameMap,t,"layer",n)}function tm(e,t){return Q(()=>{e.dtype!=="float32"&&(e=e.asType("float32"));const n=$e(Uh(e),t,!0),s=Ba(n.shape,mn()),i=Nn($s(n,s));return We(e,i)})}function ir(e,t){return Q(()=>qt(Uh(Re(t,e)),-1))}function ic(e,t){return Q(()=>qt(dn(Re(t,e)),-1))}function qr(e,t){return Q(()=>{const n=Re(e,t),s=Jn(dn(e),mn(),Number.MAX_VALUE),i=dn(We(n,s));return X(100,qt(i,-1))})}function Fw(e,t){return Q(()=>{const n=Jn(t,mn(),Number.MAX_VALUE),s=cs(be(1,n)),i=Jn(e,mn(),Number.MAX_VALUE),o=cs(be(1,i));return qt(Uh(Re(s,o)),-1)})}function h3(e,t){return Q(()=>{const n=$s(0,Re(1,X(e,t)));return qt(Uh(n),-1)})}function u3(e,t){return Q(()=>{const n=$s(0,Re(1,X(e,t)));return qt(n,-1)})}function d3(e,t){return Q(()=>{const n=$e(X(e,t),-1),s=Qn(X(Re(1,e),t),-1);return $s(0,be(1,Re(s,n)))})}function p3(e,t){return Q(()=>{const n=Math.log(2),s=Re(t,e),i=Re(be(s,za(X(-2,s))),n);return qt(i,-1)})}function Mh(e,t,n=!1){return Q(()=>{if(n)t=Fo(t);else{const s=$e(t,t.shape.length-1,!0);t=We(t,s)}return t=Jn(t,mn(),1-mn()),Ht($e(X(e.toFloat(),cs(t)),t.shape.length-1))})}function nm(e,t,n=!1){return Q(()=>{const s=Ma(Oz(e)).toInt();t=Jn(t,mn(),1-mn());const i=t.shape,o=To(s,i[i.length-1]).reshape(i);return Mh(o,t,n)})}function m3(e,t){if(!ae(e.shape,t.shape))throw new q(`logits and labels must have the same shape, but got shapes ${JSON.stringify(e.shape)} and ${JSON.stringify(t.shape)}`);return Q(()=>{const n=t.relu(),s=t.abs().neg();return n.sub(t.mul(e)).add(s.exp().log1p())})}function sm(e,t){return Q(()=>{let n;return n=Jn(t,mn(),1-mn()),n=cs(We(n,Re(1,n))),qt(m3(e,n),-1)})}function _w(e,t){return Q(()=>{const n=Jn(e,mn(),1),s=Jn(t,mn(),1);return $e(X(e,cs(We(n,s))),-1)})}function f3(e,t){return Q(()=>{const n=cs(be(mn(),t));return qt(Re(t,X(e,n)),-1)})}function im(e,t){return Q(()=>{const n=tm(e,-1),s=tm(t,-1),i=X(n,s);return Ht($e(i,-1))})}const ste=ir,ite=ir,rte=ic,ote=ic,ate=qr,cte=qr,lte=Fw,hte=Fw,ute=_w,dte=_w,pte=im,rm={meanSquaredError:ir,meanAbsoluteError:ic,meanAbsolutePercentageError:qr,meanSquaredLogarithmicError:Fw,squaredHinge:h3,hinge:u3,categoricalHinge:d3,logcosh:p3,categoricalCrossentropy:Mh,sparseCategoricalCrossentropy:nm,binaryCrossentropy:sm,kullbackLeiblerDivergence:_w,poisson:f3,cosineProximity:im};function Ww(e){if(typeof e=="string"){if(e in rm)return rm[e];let t=`Unknown loss ${e}`;throw e.toLowerCase().includes("softmaxcrossentropy")&&(t=`Unknown loss ${e}. Use "categoricalCrossentropy" as the string name for tf.losses.softmaxCrossEntropy`),new q(t)}else return e}function $w(e,t){return Q(()=>{const n=X(.5,Fn(t)),s=Wh(xs(t,n),e.dtype);return qt(Xs(e,s),-1)})}function Uw(e,t){return Q(()=>Wh(Xs(rh(e,-1),rh(t,-1)),"float32"))}function kv(e,t){return Q(()=>Us(e.equal(1),t.equal(1)).sum().cast("float32"))}function g3(e,t){return Q(()=>Us(e.equal(1),t.equal(0)).sum().cast("float32"))}function y3(e,t){return Q(()=>Us(e.equal(0),t.equal(1)).sum().cast("float32"))}function Fv(e,t){return Q(()=>{const n=kv(e,t),s=y3(e,t),i=n.add(s);return Bn(xs(i,0),n.div(i),0).cast("float32")})}function b3(e,t){return Q(()=>{const n=kv(e,t),s=g3(e,t),i=n.add(s);return Bn(xs(i,0),n.div(i),0).cast("float32")})}function _v(e,t){return sm(e,t)}function Wv(e,t){return e.rank===t.rank&&(e=e.squeeze([e.rank-1])),t=t.argMax(-1),t.dtype!==e.dtype&&(t=t.asType(e.dtype)),Xs(e,t).asType("float32")}function mte(e,t){throw new Pe}function fte(e,t){throw new Pe}const w3=ir,L3=ir,S3=ic,I3=ic,x3=qr,T3=qr,Bw=Mh,A3=im,$v=nm,om={binaryAccuracy:$w,categoricalAccuracy:Uw,precision:Fv,categoricalCrossentropy:Bw,sparseCategoricalCrossentropy:$v,mse:w3,MSE:L3,mae:S3,MAE:I3,mape:x3,MAPE:T3,cosine:A3};function v3(e){if(typeof e=="string"&&e in om)return om[e];if(typeof e!="string"&&e!=null)return e;throw new q(`Unknown metric ${e}`)}function am(e){if(As(e!==null,`Unknown LossOrMetricFn ${e}`),typeof e=="string")return e;{let t;for(const n of Object.keys(rm))if(rm[n]===e){t=n;break}if(t!==void 0)return t;for(const n of Object.keys(om))if(om[n]===e){t=n;break}return t!==void 0?t:e.name}}function N3(e){const t={Adagrad:()=>Wo.adagrad(.01),Adadelta:()=>Wo.adadelta(1,.95,mn()),Adam:()=>Wo.adam(.001,.9,.999,mn()),Adamax:()=>Wo.adamax(.002,.9,.999,mn(),0),RMSProp:()=>Wo.rmsprop(.001,.9,0,mn()),SGD:()=>Wo.sgd(.01)};if(t.adagrad=t.Adagrad,t.adadelta=t.Adadelta,t.adam=t.Adam,t.adamax=t.Adamax,t.rmsprop=t.RMSProp,t.sgd=t.SGD,e in t)return t[e]();throw new q(`Unknown Optimizer ${e}`)}const Uv=1*1024*1024;function Bv(e,t,n=!1){if(e==null||typeof e!="object"||Object.getPrototypeOf(e)!==Object.prototype||!Mw(e))throw new Error("User-defined metadata is expected to be a JSON object, but is not.");if(n){const s=JSON.stringify(e);s.length>Uv&&console.warn(`User-defined metadata of model "${t}" is too large in size (length=${s.length} when serialized). It is not recommended to store such large objects in user-defined metadata. Please make sure its serialized length is <= ${Uv}.`)}}function Mw(e){if(e===null)return!0;if(typeof e=="object")if(Object.getPrototypeOf(e)===Object.prototype){const t=Object.keys(e);for(const n of t){if(typeof n!="string")return!1;if(!Mw(e[n]))return!1}return!0}else if(Array.isArray(e)){for(const t of e)if(!Mw(t))return!1;return!0}else return!1;else{const t=typeof e;return t==="string"||t==="number"||t==="boolean"}}function C3(e,t,n,s=console.log){const i=O3(e),o=["Layer (type)","Output shape","Param #"];i?(t=t||65,n=n||[.45,.85,1]):(t=t||98,n=n||[.33,.55,.67,1]),n[n.length-1]<=1&&(n=n.map(m=>Math.floor(t*m)));let a;if(!i){o.push("Receives inputs"),a=[];for(const m in e.nodesByDepth)a.push(...e.nodesByDepth[m])}s("_".repeat(t)),cm(o,n,s),s("=".repeat(t));const c=e.layers;for(let m=0;m1||i.length===1&&i[0].inboundLayers.length>1){t=!1;break}s.push(...i)}if(t)for(const i of e.layers){let o=!1;for(const a of i.inboundNodes)if(s.indexOf(a)!==-1)if(o){t=!1;break}else o=!0;if(!t)break}return t}function cm(e,t,n=console.log){let s="";for(let i=0;i0&&(s=s.slice(0,s.length-1)+" "),s+=e[i],s=s.slice(0,t[i]),s+=" ".repeat(t[i]-s.length);n(s)}function E3(e,t,n){let s;try{s=JSON.stringify(e.outputShape)}catch(c){s="multiple"}const i=e.name,o=e.getClassName(),a=[`${i} (${o})`,s,e.countParams().toString()];cm(a,t,n)}function D3(e,t,n,s){let i;try{i=JSON.stringify(e.outputShape)}catch(m){i="multiple"}const o=[];for(const m of e.inboundNodes){if(n!=null&&n.length>0&&n.indexOf(m)===-1)continue;for(let f=0;fL.name),h=[],d=t.names();for(const L of c)d.indexOf(L)!==-1?h.push(t.getValue(L)):h.push(null);s!=null&&(s.maxNumTensors=-Infinity,s.minNumTensors=Infinity);const m=c.join(",")+"|"+t.names().join(",");let f,b;if(zw[m]==null){const L=F3(a,t);f=L.sorted,b=L.recipientCounts,zw[m]=f,Pv[m]=b}f=zw[m],b={},i||Object.assign(b,Pv[m]);const w=new Po(t);for(let L=0;Ls.maxNumTensors&&(s.maxNumTensors=j),j0,()=>"Expected at least one fetch, got none");let n=[],s={};if(e.length===1){const i=zv(e[0],t);n=i.sorted,s=i.recipientMap}else{const i=new Set;for(const o of e){const{sorted:a,recipientMap:c}=zv(o,t);for(const h of a)i.has(h.name)||(n.push(h),i.add(h.name));for(const h in c)s[h]==null&&(s[h]=new Set),c[h].forEach(d=>s[h].add(d))}}return{sorted:n,recipientCounts:_3(s)}}function _3(e){const t={};for(const n in e)t[n]=e[n].size;return t}function zv(e,t){const n=new Set,s=[],i={};for(const c of t.names())n.add(c);const o=[],a=[];for(o.push(e);o.length>0;){const c=o[o.length-1];if(n.has(c.name)){o.pop();continue}const h=a[a.length-1]===o.length-1;if(c.inputs.length===0||h)o.pop(),s.push(c),n.add(c.name),h&&a.pop();else{a.push(o.length-1);for(const d of c.inputs){if(i[d.name]==null&&(i[d.name]=new Set),i[d.name].add(c.name),n.has(d.name))continue;o.push(d)}}}return{sorted:s,recipientMap:i}}function W3(e){let t;if(e.sourceLayer.inboundNodes.length===1)t=e.sourceLayer.output;else{let n=null;for(let s=0;sN.name)}`);Vr(this.outputs).length!==this.outputs.length&&console.warn(`The list of outputs passed to the model is redundant. All outputs should only appear once. Found: ${this.outputs.map(N=>N.name)}`),this.inputLayers=[],this.inputLayersNodeIndices=[],this.inputLayersTensorIndices=[],this.outputLayers=[],this.outputLayersNodeIndices=[],this.outputLayersTensorIndices=[],this.layers=[],this.internalContainerRefs=[];for(const N of this.outputs){const O=N.sourceLayer,E=N.nodeIndex,k=N.tensorIndex;this.outputLayers.push(O),this.outputLayersNodeIndices.push(E),this.outputLayersTensorIndices.push(k)}for(const N of this.inputs){const O=N.sourceLayer,E=N.nodeIndex,k=N.tensorIndex;As(E===0,"input layer has >1 nodes"),As(k===0,"input layer has >1 tensors"),this.inputLayers.push(O),this.inputLayersNodeIndices.push(E),this.inputLayersTensorIndices.push(k)}this.inputNames=[],this.outputNames=[],this.feedInputShapes=[],this.feedInputNames=[],this.feedOutputNames=[];for(let N=0;NN.shape),this.internalOutputShapes=this.outputs.map(N=>N.shape);const t={},n={},s={},i={},o={},a=[],c=(N,O,E,k,F,U)=>{(k==null||F==null||U==null)&&(k=N.sourceLayer,F=N.nodeIndex,U=N.tensorIndex);const $=k.inboundNodes[F];if(E.indexOf($)!==-1)throw new ti(`The tensor ${N.name} at layer "${k.name}" is part of a cycle.`);if(O.indexOf($)!==-1)return;this.containerNodes.add(Oi.nodeKey(k,F)),k.id in o||(o[k.id]=Object.keys(o).length),E.indexOf($)===-1&&E.push($);const Y=$.inboundLayers.length;for(let j=0;j=0;)E.splice(E.indexOf($),1);a.push($)},h=[],d=[];for(const N of this.outputs)c(N,h,d);const m=a.slice().reverse();for(const N of m){n[N.id]=N,N.id in t||(t[N.id]=0);let O=t[N.id];const E=s[N.outboundLayer.id]==null?0:s[N.outboundLayer.id];O=Math.max(O,E),s[N.outboundLayer.id]=O,i[N.outboundLayer.id]=N.outboundLayer,t[N.id]=O;for(let k=0;kparseInt(N,10)).sort(Bp);this.layers=[];for(const N of w){const O=b[N];O.sort((E,k)=>{const F=o[E.id],U=o[k.id];return FU?1:0});for(const E of O)E instanceof Oi&&this.internalContainerRefs.push(E),this.layers.push(E)}this.layersByDepth=b,w=Object.keys(f).map(N=>parseInt(N,10)).sort(Bp);const L=this.inputs.slice(),x=[];for(const N of w)for(const O of f[N]){const E=O.outboundLayer;if(E!=null){for(const k of O.inputTensors)if(L.indexOf(k)===-1)throw new ti(`Graph disconnected: cannot obtain value for tensor ${k} at layer "${E.name}". The following previous layers were accessed without issue: ${x}`);for(const k of O.outputTensors)L.push(k);x.push(E.name)}}this.nodesByDepth=f;const v=this.layers.map(N=>N.name);for(const N of v){const O=v.filter(E=>E===N).length;if(O!==1)throw new ti(`The name "${N}" is used ${O} times in the model. All layer names should be unique. Layer names: `+JSON.stringify(v))}this.outboundNodes=[],this.inboundNodes=[],new em({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:this.inputs.map(N=>null),outputMasks:this.outputs.map(N=>null),inputShapes:this.inputs.map(N=>N.shape),outputShapes:this.outputs.map(N=>N.shape)}),this.built=!0,this._refCount=1}assertNotDisposed(){if(this._refCount===0)throw new Error(`Container '${this.name}' is already disposed.`)}dispose(){this.assertNotDisposed();const e={refCountAfterDispose:null,numDisposedVariables:0};if(--this._refCount===0){for(const t of this.layers)e.numDisposedVariables+=t.dispose().numDisposedVariables;for(const t of this.internalContainerRefs)e.numDisposedVariables+=t.dispose().numDisposedVariables}return e.refCountAfterDispose=this._refCount,e}get trainable(){return this.trainable_}set trainable(e){this.layers.forEach(t=>{t._trainableWeights.forEach(n=>n.trainable=e)}),this.trainable_=e}get trainableWeights(){if(this._trainableWeights.length>0)throw new q("Container instance unexpectedly contains _trainableWeights.The trainable weights of a Container are a union of the trainable weights of its consituent Layers. Its own _trainableWeights must remain an empty Array.");if(!this.trainable)return[];let e=[];for(const t of this.layers)e=e.concat(t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.layers)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const n of this.layers)t.push(...n.trainableWeights);return t.concat(e)}return e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}loadWeights(e,t=!0){const n={};let s=0;for(const o of this.layers)for(const a of o.weights){if(n[a.originalName]!=null)throw new q(`Duplicate weight name: ${a.originalName}`);n[a.originalName]=a,s++}const i=[];for(const o in e){let a=o;if(n[o]==null){const c=o.split("/"),h=c.slice(0,-2).concat([c[c.length-1]]);a=h.join("/")}if(n[a]!=null)i.push([n[a],e[o]]);else if(t)throw new q(`Provided weight data has no target variable: ${o}`);delete n[a]}if(t){const o=[];for(const a in n)o.push(a);if(o.length>0)throw new q(`${o.length} of ${s} weights are not set: ${o}`)}kw(i)}updatedConfig(){const e=this.getConfig(),t={};return t.className=this.getClassName(),t.config=e,t.kerasVersion=`tfjs-layers ${lm}`,t.backend="TensorFlow.js",t}toJSON(e,t=!0){const n=Pw(this.updatedConfig());return t?JSON.stringify(n):n}call(e,t){return Q(()=>{e=Et(e);const n=new Po;for(let s=0;s{e=Et(e);let n;return t==null?n=$o(null,e.length):n=Et(t),this.runInternalGraph(e,n)[1]})}computeOutputShape(e){const t=Zp(e);if(t.length!==this.inputLayers.length)throw new q(`Invalid inputShape argument ${e}: model has ${this.inputLayers.length} tensor inputs.`);const n={};for(let a=0;aparseInt(a,10)).sort(Bp);if(s.length>1)for(const a of s){const c=this.nodesByDepth[a];for(const h of c){const d=h.outboundLayer;if(this.inputLayers.map(L=>L.id).indexOf(d.id)!==-1)continue;const m=[];for(let L=0;LparseInt(c,10)).sort(Bp);for(const c of s){const h=this.nodesByDepth[c];for(const d of h){const m=d.outboundLayer,f=d.inputTensors,b=d.outputTensors,w=new Array;for(const L of f)L.id in n&&w.push(n[L.id]);if(w.length===f.length){let L={},x,v,N,O;if(d.callArgs!=null&&(L=d.callArgs),w.length===1){const[E,k]=w[0];L.mask==null&&(L.mask=k),N=Et(m.call(E,L)),O=Et(m.computeMask(E,k)),x=[E],v=[k]}else x=w.map(E=>E[0]),v=w.map(E=>E[1]),L.mask==null&&(L.mask=v),N=Et(m.call(x,L)),O=Et(m.computeMask(x,v));if(m.activityRegularizer)throw new Pe("LayersModel invocation with concrete Tensor value(s) in the presence of activity regularizer(s) is not supported yet.");for(let E=0;E{const e=[];for(const t of this.layers)for(let n=0;n0){const L=[];for(let x=0;x0&&x.apply(ts(N),O)}function h(x){const v=x.name,N=ri(x,t.customObjects!=null?t.customObjects:{});N.setFastWeightInitDuringBuild(s),i[v]=N;const O=x.inboundNodes;O.forEach(E=>{if(!(E instanceof Array))throw new q(`Corrupted configuration, expected array for nodeData: ${E}`);a(N,E)})}const d=t.name,m=t.layers;for(const x of m)h(x);for(;!pz(o);)for(const x of m){const v=i[x.name];if(v.name in o){const N=o[v.name];delete o[v.name];for(const O of N)c(v,O)}}const f=[],b=[],w=t.inputLayers;for(const x of w){const v=x[0],N=x[1],O=x[2];As(v in i);const E=i[v],k=E.inboundNodes[N].outputTensors;f.push(k[O])}const L=t.outputLayers;for(const x of L){const v=x[0],N=x[1],O=x[2];As(v in i);const E=i[v],k=E.inboundNodes[N].outputTensors;b.push(k[O])}return new e({inputs:f,outputs:b,name:d})}get stateful(){if(this._stateful)throw new q("Container instance unexpectedly has _stateful = true. The statefulness of a Container is determined by the Layers it contains. Its _stateful property must remain the default false.");for(const e of this.layers)if(e.stateful)return!0;return!1}resetStates(){Q(()=>{this.layers.forEach(e=>{e.stateful&&e.resetStates()})})}}function Vv(e,t,n){const s=t.length;if(e==null||Array.isArray(e)&&e.length===0)return t.map(i=>null);if(s===1)return Array.isArray(e)&&e.length===1?e:typeof e=="object"&&t[0]in e?[e[t[0]]]:[e];if(Array.isArray(e)){if(e.length!==s)throw new Error(`Provided ${n} is an array of ${e.length} element(s), but the model has ${s} outputs. Make sure a set of weights is provided for each model output.`);return e}else if(typeof e=="object"&&Object.keys(e).length>0&&typeof e[Object.keys(e)[0]]=="object"){const i=[];return t.forEach(o=>{o in e?i.push(e[o]):i.push(null)}),i}else throw new Error(`The model has multiple (${s}) outputs, so ${n} must be either an array with ${s} elements or an object with ${t} keys. Provided ${n} not understood: ${JSON.stringify(e)}`)}function Gv(e,t){return Vv(e,t,"classWeight")}function gte(e,t){return Vv(e,t,"sampleWeight")}async function Yv(e,t,n,s){if(t!=null||s!=null)throw new Error("Support sampleWeight is not implemented yet");if(n!=null){const i=Q(()=>{if(e.shape.length===1)return e.clone();if(e.shape.length===2)if(e.shape[1]>1){const c=1;return e.argMax(c)}else{if(e.shape[1]===1)return e.reshape([e.shape[0]]);throw new Error(`Encountered unexpected last-dimension size (${e.shape[1]}) during handling of class weights. The size is expected to be >= 1.`)}else throw new Error(`Unexpected rank of target (y) tensor (${e.rank}) during handling of class weights. The rank is expected to be 1 or 2.`)}),o=Array.from(await i.data());He(i);const a=[];return o.forEach(c=>{if(n[c]==null)throw new Error(`classWeight must contain all classes in the training data. The class ${c} exists in the data but not in classWeight`);a.push(n[c])}),ls(a,"float32")}else return null}function $3(e,t){return X(e,t)}const U3=32;function Hv(e,t){let n,s;const i=t;n=i.xs,s=i.ys,A(n!=null&&s!=null,()=>`A Dataset iterator for fitDataset() is expected to generate objects of the form \`{xs: xVal, ys: yVal}\`, where the two values may be \`tf.Tensor\`, an array of Tensors, or a map of string to Tensor. The provided Dataset instead generates ${t}`);const o=qv("input",e.inputNames,n),a=qv("output",e.outputNames,s),c=o[0].shape[0];A(o.length===e.inputs.length,()=>`LayersModel has ${e.inputs.length} inputs, but the dataset provides ${o.length} inputs. (Expected input keys: ${JSON.stringify(e.inputNames)})`),A(a.length===e.outputs.length,()=>`LayersModel has ${e.outputs.length} outputs, but the dataset provides ${a.length} outputs. (Expected output keys: ${JSON.stringify(e.outputNames)})`);for(let h=0;h`Batch size mismatch: input ${e.inputNames[h]} has ${o[h].shape[0]}; expected ${c} based on input ${e.inputNames[0]}.`);for(let h=0;h`Batch size mismatch: output ${e.outputNames[h]} has ${a[h].shape[0]}; expected ${c} based on input ${e.inputNames[0]}.`);return{xs:o,ys:a}}function qv(e,t,n){if(n instanceof ee)return[n];if(Array.isArray(n))return A(n.length===t.length,()=>`Received an array of ${n.length} Tensors, but expected ${t.length} to match the ${e} keys ${t}.`),n;{const s=[];for(const i of t){if(n[i]==null)throw new q(`The feature data generated by the dataset lacks the required ${e} key '${i}'.`);s.push(n[i])}return s}}function B3(e){if(e.length===3)throw new Pe("Validation with sample weights is not implemented yet.");return{xs:e[0],ys:e[1]}}async function M3(e,t,n){const s=n.batchesPerEpoch!=null;if(A(e.optimizer!=null,()=>"You must compile a model before training/testing. Use LayersModel.compile(modelCompileConfig)."),A(n!=null,()=>"For fitDataset(), the 2nd argument (config) is required, but it is not provided in this call."),A(n.epochs!=null&&n.epochs>0&&Number.isInteger(n.epochs),()=>`For fitDataset(), config.epochs is expected to be a positive integer, but got ${n.epochs}`),A(!s||n.batchesPerEpoch>0&&Number.isInteger(n.batchesPerEpoch),()=>`For fitDataset(), config.batchesPerEpoch is expected to be a positive integer if specified, but got ${n.batchesPerEpoch}`),A(n.validationSplit==null,()=>"`validationSplit` is not supported by `fitDataset()`. Use validationData instead."),e.isTraining)throw new Error("Cannot start training because another fit() call is ongoing.");e.isTraining=!0;try{const i=n.validationData!=null;let o,a;if(i)if(jv(n.validationData))A(n.validationBatches==null||n.validationBatches>0&&Number.isInteger(n.validationBatches),()=>`For fitDataset() with dataset-based validation, config.validationBatches is expected not to be provided, or to be a positive integer, but got ${n.validationBatches}`);else{const v=B3(n.validationData);o=v.xs,a=v.ys}const c=e.makeTrainFunction(),h=e.getDedupedMetricsNames();let d;i?d=h.slice().concat(h.map(v=>"val_"+v)):d=h.slice();const m=Ev(n.callbacks,n.yieldEvery),f=n.verbose==null?1:n.verbose,{callbackList:b,history:w}=Dv(m,f,n.epochs,null,null,P3(t,n),null,i,d);b.setModel(e),e.history=w,await b.onTrainBegin(),e.stopTraining_=!1;let L=n.initialEpoch==null?0:n.initialEpoch,x=await t.iterator();for(;L=n.batchesPerEpoch:E.done){if(i){let k;jv(n.validationData)?k=Et(await e.evaluateDataset(n.validationData,{batches:n.validationBatches})):k=Et(e.evaluate(o,a,{batchSize:n.validationBatchSize==null?U3:n.validationBatchSize,verbose:0}));for(let F=0;F0)throw new Pe("Verbose mode is not implemented yet.");A(!s||n.batches>0&&Number.isInteger(n.batches),()=>`Test loop expects \`batches\` to be a positive integer, but received ${JSON.stringify(n.batches)}`);const a=z3(t)?t:await t.iterator();let c=0,h=0;for(;s?h{if(d.value){const{xs:m,ys:f}=Hv(e,d.value),b=m.concat(f),w=Q(()=>i(b));if(He(b),h===0)for(let x=0;xbe(o[x],X(L,v))),h>0&&He(N)}He(w),c+=L,++h}return o}),d.done){s&&console.warn(`Your dataset iterator ran out of data during evaluateDataset(). Interrupting evalution. Make sure that your dataset can generate at least \`batches\` batches (in this case, ${n.batches} batches). You may need to use the repeat() function when building your dataset.`);break}}for(let d=0;d0&&Number.isInteger(e),()=>`batchSize is required to be a positive integer, but got ${e}`)}function Vh(e,t,n){return e==null?[null]:Array.isArray(e)?e.map(s=>Mo(s,t,n-t)):Mo(e,t,n-t)}function Gw(e,t){return Q(()=>e==null?null:Array.isArray(e)?e.map(n=>Gw(n,t)):bv(e,t.dtype==="int32"?t:t.toInt()))}function Yw(e,t){const n=[];let s=0,i=null;for(;s=e&&(i=e),n.push([s,i]),s=i;return n}async function G3(e,t,n,s,i,o,a,c,h,d,m,f,b,w,L){i==null&&(i=32),o==null&&(o=1),m==null&&(m=!0),b==null&&(b=0);let x=!1;if(h!=null&&d!=null&&(x=!0),L!=null&&(x=!0,w==null))throw new q("Can only use `validationSteps` when doing step-wise training, i.e., `stepsPerEpoch` must be set.");const v=e.checkNumSamples(n,i,w,"steps_per_epoch");let N;v!=null&&(N=ni(0,v)),a==null&&(a=1);const{callbackList:O,history:E}=Dv(c,a,o,b,v,w,i,x,f);O.setModel(e),e.history=E,await O.onTrainBegin(),e.stopTraining_=!1;for(let k=b;k{const Z=$[Y][0],ie=$[Y][1],de=Mo(U,Z,ie-Z);j.batch=Y,j.size=ie-Z;const he=Gw(n,de),ue=t(he);for(let me=0;me0){if(L=!0,s.validationData.length===2)a=s.validationData[0],c=s.validationData[1];else throw s.validationData.length===3?new Pe("validationData including sample weights is not supported yet."):new q(`When passing validation data, it must contain 2 (valX, valY) or 3 (valX, valY, valSampleWeight) items; ${s.validationData} is invalid.`);const $=!0,Y=await e.standardizeUserData(a,c,null,null,$,f);h=Y[0],d=Y[1],x=h.concat(d)}else if(s.validationSplit!=null&&s.validationSplit>0&&s.validationSplit<1){L=!0;const $=Math.floor(i[0].shape[0]*(1-s.validationSplit)),Y=i[0].shape[0];h=Vh(i,$,Y),i=Vh(i,0,$),d=Vh(o,$,Y),o=Vh(o,0,$),x=h.concat(d)}else s.validationSteps!=null&&(L=!0);const v=i.concat(o).concat(m);e.checkTrainableWeightsConsistency();const N=e.makeTrainFunction(),O=e.getDedupedMetricsNames();let E,k;L?(e.makeTestFunction(),E=e.testFunction,k=O.slice().concat(O.map($=>"val_"+$))):(E=null,x=[],k=O.slice());const F=Ev(s.callbacks,s.yieldEvery),U=await G3(e,N,v,O,f,s.epochs,s.verbose,F,E,x,s.shuffle,k,s.initialEpoch,null,null);return U}finally{e.isTraining=!1,zo(i,t),zo(o,n),zo(h,a),zo(d,c),m!=null&&He(m)}}function Kv(e){const t=[];e instanceof ee&&(e=[e]);for(let n=0;nn.push(i.id));else if(t!=null)for(const i in t){const o=t[i];n.push(o.id)}const s=[];if(e instanceof ee)n.indexOf(e.id)===-1&&s.push(e);else if(Array.isArray(e))e.forEach(i=>{n.indexOf(i.id)===-1&&s.push(i)});else if(e!=null)for(const i in e){const o=e[i];n.indexOf(o.id)===-1&&s.push(o)}s.forEach(i=>{i.isDisposed||i.dispose()})}function H3(e){return e instanceof ee}function Hw(e){return Array.isArray(e)}function Xv(e){return!H3(e)&&!Hw(e)}function Jv(e,t,n,s=!0,i=""){if(t==null||t.length===0){if(e!=null){let a=!1;if(Hw(e)&&e.length>0)a=!0;else if(Xv(e)){for(const c in e)if(e.hasOwnProperty(c)){a=!0;break}}else a=!0;if(a)throw new q(`Error when checking model ${i} expected no data, but got ${e}`)}return[]}if(e==null)return t.map(a=>null);let o;if(Xv(e)){e=e,o=[];for(const a of t){if(e[a]==null)throw new q(`No data provided for "${a}". Need data for each key in: ${t}`);o.push(e[a])}}else if(Hw(e)){if(e=e,e.length!==t.length)throw new q(`Error when checking model ${i}: the Array of Tensors that you are passing to your model is not the size the model expected. Expected to see ${t.length} Tensor(s), but instead got the following list of Tensor(s): ${e}`);o=e}else{if(e=e,t.length>1)throw new q(`The model ${i} expects ${t.length} Tensor(s), but only received one Tensor. Found: Tensor with shape ${e.shape}`);o=[e]}if(o=Kv(o),n!=null)for(let a=0;a=0&&d!==m)throw new q(`Error when checking ${i}: expected ${t[a]} to have shape [${n[a]}], but got array with shape [${c.shape}].`)}}return o}function q3(e,t,n){const s=Vr(e.map(o=>o.shape[0]));s.sort();const i=Vr(t.map(o=>o.shape[0]));if(i.sort(),s.length>1)throw new q(`All input Tensors (x) should have the same number of samples. Got array shapes: ${JSON.stringify(e.map(o=>o.shape))}`);if(i.length>1)throw new q(`All target Tensors (y) should have the same number of samples. Got array shapes: ${JSON.stringify(t.map(o=>o.shape))}`);if(s.length>0&&i.length>0&&!ae(s,i))throw new q(`Input Tensors should have the same number of samples as target Tensors. Found ${s[0]} input sample(s) and ${i[0]} target sample(s).`)}function j3(e,t,n){const s=[ir,sm,Mh];for(let i=0;i1)throw new q(`The model expects ${t.length} ${i} Tensors, but only received one Tensor. Found: array with shape ${JSON.stringify(e.shape)}.`);o=[e]}if(n!=null)for(let a=0;a[]);let n;if(typeof e=="string"||typeof e=="function")n=[e];else if(Array.isArray(e)||typeof e=="object")n=e;else throw new TypeError(`Type of metrics argument not understood. Expected an string,function, Array, or Object, found: ${e}`);if(Array.isArray(n))return t.map(s=>n);{const s=[];for(const i of t){let o=n.hasOwnProperty(i)?n[i]:[];Array.isArray(o)||(o=[o]),s.push(o)}return s}}const X3="layers-model";class rr extends Oi{constructor(e){super(e);this.isTraining=!1}summary(e,t,n=console.log){if(!this.built)throw new q("This model has never been called, thus its weights have not been created yet. So no summary can be displayed. Build the model first (e.g., by calling it on some test data).");C3(this,e,t,n)}compile(e){if(e.loss==null&&(e.loss=[]),this.loss=e.loss,typeof e.optimizer=="string")this.optimizer_=N3(e.optimizer),this.isOptimizerOwned=!0;else{if(!(e.optimizer instanceof er))throw new q("User-defined optimizer must be an instance of tf.Optimizer.");this.optimizer_=e.optimizer,this.isOptimizerOwned=!1}let t=[];if(!Array.isArray(e.loss)&&typeof e.loss!="string"&&typeof e.loss!="function"){e.loss=e.loss;for(const o in e.loss)if(this.outputNames.indexOf(o)===-1)throw new q(`Unknown entry in loss dictionary: "${o}". Only expected the following keys: ${this.outputNames}`);for(const o of this.outputNames)e.loss[o]==null&&console.warn(`Output "${o}" is missing from loss dictionary. We assume this was done on purpose, and we will not be expecting data to be passed to ${o} during training`),t.push(Ww(e.loss[o]))}else if(Array.isArray(e.loss)){if(e.loss.length!==this.outputs.length)throw new q(`When passing an Array as loss, it should have one entry per model output. The model has ${this.outputs.length} output(s), but you passed loss=${e.loss}.`);const o=e.loss;t=o.map(a=>Ww(a))}else{const o=Ww(e.loss);this.outputs.forEach(a=>{t.push(o)})}this.lossFunctions=t,this.feedOutputNames=[],this.feedOutputShapes=[],this.feedLossFns=[];for(let o=0;o{for(let o=0;o1&&(this.metricsTensors.push([a,o]),this.metricsNames.push(this.outputNames[o]+"_loss"))}});const s=K3(e.metrics,this.outputNames),i=(o,a,c)=>{this.outputNames.length>1&&(a=this.outputNames[o]+"_"+a),this.metricsNames.push(a),this.metricsTensors.push([c,o])};Bo("metric",()=>{for(let o=0;o{const d="";let m,f,b;for(const w of h){if(typeof w=="string"&&["accuracy","acc","crossentropy","ce"].indexOf(w)!==-1){const x=this.internalOutputShapes[o];x[x.length-1]===1||this.lossFunctions[o]===sm?["accuracy","acc"].indexOf(w)!==-1?f=$w:["crossentropy","ce"].indexOf(w)!==-1&&(f=_v):this.lossFunctions[o]===nm?["accuracy","acc"].indexOf(w)!==-1?f=Wv:["crossentropy","ce"].indexOf(w)!==-1&&(f=$v):["accuracy","acc"].indexOf(w)!==-1?f=Uw:["crossentropy","ce"].indexOf(w)!==-1&&(f=Bw);let v;["accuracy","acc"].indexOf(w)!==-1?v="acc":["crossentropy","ce"].indexOf(w)!==-1&&(v="ce"),b=f,m=d+v}else{const x=v3(w);b=x,m=d+am(w)}let L;Bo(m,()=>{L=b}),i(o,m,L)}};c(a)}}),this.collectedTrainableWeights=this.trainableWeights}checkTrainableWeightsConsistency(){if(this.collectedTrainableWeights==null)return;this.trainableWeights.length!==this.collectedTrainableWeights.length&&console.warn("Discrepancy between trainableweights and collected trainable weights. Did you set `model.trainable` without calling `model.compile()` afterwards?")}evaluate(e,t,n={}){const s=n.batchSize==null?32:n.batchSize;Vw(s);const i=!0,o=this.standardizeUserDataXY(e,t,i,s);try{const a=o[0].concat(o[1]);this.makeTestFunction();const c=this.testFunction,h=this.testLoop(c,a,s,n.verbose,n.steps);return ts(h)}finally{zo(o[0],e),zo(o[1],t)}}async evaluateDataset(e,t){return this.makeTestFunction(),V3(this,e,t)}checkNumSamples(e,t,n,s="steps"){let i;if(n!=null){if(i=null,t!=null)throw new q(`If ${s} is set, batchSize must be null or undefined.Got batchSize = ${t}`)}else if(e!=null)Array.isArray(e)?i=e[0].shape[0]:i=e.shape[0];else throw new q(`Either the input data should have a defined shape, or ${s} shoud be specified.`);return i}execute(e,t){if(Array.isArray(t)&&t.length===0)throw new q("`outputs` is an empty Array, which is not allowed.");const n=Array.isArray(t),s=n?t:[t],i=this.retrieveSymbolicTensors(s),o=new Po;if(e instanceof ee&&(e=[e]),Array.isArray(e)){if(e.length!==this.inputs.length)throw new q(`The number of inputs provided (${e.length}) does not match the number of inputs of this model (${this.inputs.length}).`);for(let c=0;ca.name);for(let a=0;a0){const s=[];throw t.forEach((i,o)=>{i==null&&s.push(e[o])}),new q(`Cannot find SymbolicTensors for output name(s): ${JSON.stringify(s)}`)}return t}predictLoop(e,t=32,n=!1){return Q(()=>{const s=this.checkNumSamples(e);if(n)throw new Pe("Verbose predictLoop() is not implemented yet.");const i=Yw(s,t),o=this.outputs.map(a=>[]);for(let a=0;a{const h=i[a][0],d=i[a][1],m=Vh(e,h,d),f=[];if(Array.isArray(m))for(let w=0;wo[d].push(h))}return ts(o.map(a=>Yt(a,0)))})}predict(e,t={}){const n=Kv(e);Zv(n,this.inputNames,this.feedInputShapes,!1);try{const s=t.batchSize==null?32:t.batchSize;return Vw(s),this.predictLoop(n,s)}finally{zo(n,e)}}predictOnBatch(e){Zv(e,this.inputNames,this.feedInputShapes,!0);const t=(Array.isArray(e)?e[0]:e).shape[0];return this.predictLoop(e,t)}standardizeUserDataXY(e,t,n=!0,s){if(this.optimizer_==null)throw new ti("You must compile a model before training/testing. Use LayersModel.compile(modelCompileArgs).");const i=[];for(let o=0;o0&&e[0].shape[0]%s!==0)throw new q(`In a stateful network, you should only pass inputs with a number of samples that is divisible by the batch size ${s}. Found: ${e[0].shape[0]} sample(s).`);return[e,t]}async standardizeUserData(e,t,n,s,i=!0,o){const[a,c]=this.standardizeUserDataXY(e,t,i,o);if(n!=null)throw new Error("sample weight is not supported yet.");let h=null;if(s!=null){const d=Gv(s,this.outputNames);h=[];for(let m=0;m{const o=this.checkNumSamples(t,n,i,"steps"),a=[];if(s>0)throw new Pe("Verbose mode is not implemented yet.");if(i!=null)throw new Pe("steps mode in testLoop() is not implemented yet");{const c=Yw(o,n),h=ls(ni(0,o));for(let d=0;d1){const o=ov(e.slice(0,n),s);i+=`_${o}`}t.push(i)}return t}makeTrainFunction(){return e=>{const t=[],n=e.slice(0,this.inputs.length),s=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),i=e.slice(this.inputs.length+this.outputs.length,this.inputs.length+this.outputs.length*2),o=[],a=()=>{const m=[];for(let L=0;L1&&L{w=be(w,L)}),w},c=this.collectedTrainableWeights.map(m=>m.read()),h=!0,d=this.optimizer_.minimize(a,h,c);return[d].concat(o)}}makeTestFunction(){this.testFunction=e=>Q(()=>{const t=[];let n;const s=e.slice(0,this.inputs.length),i=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),o=[];for(let h=0;hsr(t))}else{const t=Object.keys(this.loss);e={};const n=this.loss;for(const s of t)if(typeof n[s]=="string")e[s]=sr(n[s]);else throw new Error("Serialization of non-string loss is not supported.")}return e}getMetricIdentifiers(){if(typeof this.metrics=="string"||typeof this.metrics=="function")return[sr(am(this.metrics))];if(Array.isArray(this.metrics))return this.metrics.map(e=>sr(am(e)));{const e={};for(const t in this.metrics)e[t]=sr(am(this.metrics[t]));return e}}getTrainingConfig(){return{loss:this.getLossIdentifiers(),metrics:this.getMetricIdentifiers(),optimizer_config:{class_name:this.optimizer.getClassName(),config:this.optimizer.getConfig()}}}loadTrainingConfig(e){if(e.weighted_metrics!=null)throw new Error("Loading weight_metrics is not supported yet.");if(e.loss_weights!=null)throw new Error("Loading loss_weights is not supported yet.");if(e.sample_weight_mode!=null)throw new Error("Loading sample_weight_mode is not supported yet.");const t=Ph(e.optimizer_config),n=ri(t);let s;if(typeof e.loss=="string")s=Uo(e.loss);else if(Array.isArray(e.loss))s=e.loss.map(o=>Uo(o));else if(e.loss!=null){s={};for(const o in e.loss)s[o]=Uo(e.loss[o])}let i;if(Array.isArray(e.metrics))i=e.metrics.map(o=>Uo(o));else if(e.metrics!=null){i={};for(const o in e.metrics)i[o]=Uo(e.metrics[o])}this.compile({loss:s,metrics:i,optimizer:n})}async save(e,t){if(typeof e=="string"){const h=zy(e);if(h.length===0)throw new q(`Cannot find any save handlers for URL '${e}'`);if(h.length>1)throw new q(`Found more than one (${h.length}) save handlers for URL '${e}'`);e=h[0]}if(e.save==null)throw new q("LayersModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");const n=await My(this.getNamedWeights(t)),s=!1,i=null,o=this.toJSON(i,s),a={modelTopology:o,format:X3,generatedBy:`TensorFlow.js tfjs-layers v${lm}`,convertedBy:null},c=t==null?!1:t.includeOptimizer;if(c&&this.optimizer!=null){a.trainingConfig=this.getTrainingConfig();const h="optimizer",{data:d,specs:m}=await My(await this.optimizer.getWeights(),h);n.specs.push(...m),n.data=zd([n.data,d])}if(this.userDefinedMetadata!=null){const h=!0;Bv(this.userDefinedMetadata,this.name,h),a.userDefinedMetadata=this.userDefinedMetadata}return a.weightData=n.data,a.weightSpecs=n.specs,e.save(a)}setUserDefinedMetadata(e){Bv(e,this.name),this.userDefinedMetadata=e}getUserDefinedMetadata(){return this.userDefinedMetadata}}rr.className="Model",fe(rr);class Qv extends rr{}Qv.className="Functional",fe(Qv);async function J3(e,t){"modelTopology"in e||(e={modelTopology:e}),e=e;let n=e.modelTopology;n.model_config!=null&&(n=n.model_config);const s=Ph(n),i=ri(s,t);if(e.weightsManifest!=null){const o=await vT(e.weightsManifest,e.pathPrefix,i.weights.map(c=>c.originalName)),a={};for(const c of i.weights)a[c.originalName]=o[c.originalName];i.loadWeights(a),He(o)}return i}async function Z3(e,t){if(t==null&&(t={}),typeof e=="string"){const n=Vy(e,t);if(n.length===0)n.push(Yd(e,t));else if(n.length>1)throw new q(`Found more than one (${n.length}) load handlers for URL '${e}'`);e=n[0]}return Q3(e,void 0,t)}async function Q3(e,t,n){if(n==null&&(n={}),e.load==null)throw new q("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const s=await e.load();let i=s.modelTopology;i.model_config!=null&&(i=i.model_config);const o=n.strict==null?!0:n.strict,a=s.weightData!=null&&s.weightSpecs!=null&&o,c=ri(Ph(i),t,a),h=s.trainingConfig;if(h!=null&&c.loadTrainingConfig(h),s.userDefinedMetadata!=null&&c.setUserDefinedMetadata(s.userDefinedMetadata),s.weightData!=null){if(s.weightSpecs==null)throw new q("LayersModel artifacts contains weight data, but not weight specs. Therefore loading of weights cannot proceed.");const{modelWeights:d,optimizerWeights:m}=eV(s.weightData,s.weightSpecs);c.loadWeights(d,o),c.optimizer!=null&&m.length>0&&await c.optimizer.setWeights(m),He(d),He(m.map(f=>f.tensor))}return c}function eV(e,t){const n=Pd(e,t),s={},i=[];return t.forEach(o=>{o.group==="optimizer"?i.push({name:o.name,tensor:n[o.name]}):s[o.name]=n[o.name]}),{modelWeights:s,optimizerWeights:i}}class rc extends rr{constructor(e){super({inputs:[],outputs:[]});if(e=e||{},this.trainable=!0,this.built=!1,this.name=e.name!=null?e.name:Jp("sequential_"),e.layers!=null)for(const t of e.layers)this.add(t)}checkShape(e){const t=e.inboundNodes[0].outputTensors[0].shape;if(t.some(n=>n<0))throw new q(`Negative dimension size caused by adding layer ${e.name} with input shape [${e.inboundNodes[0].inputTensors[0].shape}]`)}add(e){const t=e instanceof rc||e instanceof rr;let n;if(t){if(n=e,n.outputs.length!==1)throw new q("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");if(n.inputs.length!==1)throw new q("All layers in a Sequential model should have a single input tensor. For multi-input layers, use the functional API.")}if(this.outputs.length===0){if(e.inboundNodes.length===0){if(e.batchInputShape==null)throw new q("The first layer in a Sequential model must get an `inputShape` or `batchInputShape` argument.");const s=Av({batchShape:e.batchInputShape,dtype:e.dtype,name:e.name+"_input"});e.apply(s)}if(t)this.outputs=n.outputs,this.inputs=n.inputs;else{if(e.inboundNodes.length!==1)throw new q(`A layer added to a Sequential model must not already be connected somewhere else. LayersModel received layer ${e.name} which has ${e.inboundNodes.length} pre-existing inbound connections.`);if(e.inboundNodes[0].outputTensors.length!==1)throw new q("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[e.inboundNodes[0].outputTensors[0]],this.inputs=Tv(this.outputs[0])}this.inboundNodes=[],new em({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:$o(null,this.inputs.length),outputMasks:[null],inputShapes:this.inputs.map(s=>s.shape),outputShapes:this.outputs[0].shape})}else{const s=e.apply(this.outputs[0]);if(Array.isArray(s))throw new TypeError("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[s],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}this.layers.push(e),this.built=!1}pop(){if(this.layers.length===0)throw new TypeError("There are no layers in the model.");if(this.layers.pop(),this.layers.length===0)this.outputs=[],this.inboundNodes=[],this.outboundNodes=[];else{const e=this.layers.length-1;this.layers[e].outboundNodes=[],this.outputs=[this.layers[e].output],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}}call(e,t){return this.model==null&&this.build(),this.model.call(e,t)}build(e){if(Nt(e),this.inputs.length===0||this.outputs.length===0)throw new TypeError("Sequential model cannot be built: model is empty. Add some layers first.");this.model=new rr({inputs:this.inputs,outputs:this.outputs[0],name:this.name+"_model"}),this.model.trainable=this.trainable,this.supportsMasking=this.model.supportsMasking,this.inputLayers=this.model.inputLayers,this.inputLayersNodeIndices=this.model.inputLayersNodeIndices,this.inputLayersTensorIndices=this.model.inputLayersTensorIndices,this.outputLayers=this.model.outputLayers,this.outputLayersNodeIndices=this.model.outputLayersNodeIndices,this.outputLayersTensorIndices=this.model.outputLayersTensorIndices,this.nodesByDepth=this.model.nodesByDepth,this.containerNodes=this.model.containerNodes,this.outputNames=this.model.outputNames,this.inputNames=this.model.inputNames,this.built=!0}countParams(){return this.built||this.build(),super.countParams()}summary(e,t,n=console.log){this.built||this.build(),super.summary(e,t,n)}setWeights(e){this.model==null&&this.build(),this.model.setWeights(e)}evaluate(e,t,n={}){if(!this.built)throw new ti("The model needs to be compiled before being used.");return this.model.evaluate(e,t,n)}async evaluateDataset(e,t){if(!this.built)throw new ti("The model needs to be compiled before being used.");return this.model.evaluateDataset(e,t)}predict(e,t={}){return this.model==null&&this.build(),this.model.predict(e,t)}predictOnBatch(e){return this.model==null&&this.build(),this.model.predictOnBatch(e)}compile(e){this.build(),this.model.compile(e),this.optimizer_=this.model.optimizer,this.isOptimizerOwned=this.model.isOptimizerOwned,this.loss=this.model.loss,this.metrics=this.model.metrics,this.metricsTensors=this.model.metricsTensors,this.metricsNames=this.model.metricsNames}get optimizer(){return this.model==null?void 0:this.model.optimizer}set optimizer(e){this.model.optimizer=e}async fit(e,t,n={}){if(!this.built)throw new ti("The model needs to be compiled before being used.");return this.model.fit(e,t,n)}async fitDataset(e,t){if(!this.built)throw new ti("The model needs to be compiled before being used.");return this.model.fitDataset(e,t)}async trainOnBatch(e,t){return this.model.trainOnBatch(e,t)}static fromConfig(e,t,n={},s=!1){let i,o={};if(t instanceof Array){if(!(t[0].className!=null)||t[0].className==="Merge")throw new q("Legacy serialization format not supported yet.");i=t}else A(t.layers!=null,()=>"When the config data for a Sequential model is not an Array, it must be an Object that contains the 'layers' field."),i=t.layers,delete t.layers,o=t;const a=new e(o);if(!(a instanceof rc))throw new Pe(`Sequential.fromConfig called on non-Sequential input: ${a}`);for(const c of i){const h=void 0,d=ri(c,h,s);s&&d.setFastWeightInitDuringBuild(!0),a.add(d)}return a}set stopTraining(e){if(this.model==null)throw new q("Cannot set the stopTraining property of a sequential model before it is compiled.");this.model.stopTraining=e}get stopTraining(){if(this.model==null)throw new q("Cannot get the stopTraining property of a sequential model before it is compiled.");return this.model.stopTraining}getConfig(){const e=[];for(const t of this.layers){const n={};n.className=t.getClassName(),n.config=t.getConfig(),e.push(n)}return{name:this.name,layers:e}}}rc.className="Sequential",fe(rc);function tV(e){return new rr(e)}function nV(e){return new rc(e)}function sV(e,t){return t==null&&(t={}),Z3(e,t)}function eN(e){return Av(e)}function iV(e,t){Ps.registerCallbackConstructor(e,t)}class us extends Ao{getConfig(){return{}}}class tN extends us{apply(e,t=1){return Dz(e,t)}}tN.className="elu",fe(tN);class nN extends us{apply(e){return bp(e)}}nN.className="selu",fe(nN);class sN extends us{apply(e){return Ni(e)}}sN.className="relu",fe(sN);class iN extends us{apply(e){return Q(()=>Oo(6,Ni(e)))}}iN.className="relu6",fe(iN);class rN extends us{apply(e){return e}}rN.className="linear",fe(rN);class oN extends us{apply(e){return Ti(e)}}oN.className="sigmoid",fe(oN);class aN extends us{apply(e){return Fz(e)}}aN.className="hardSigmoid",fe(aN);class cN extends us{apply(e){return za(e)}}cN.className="softplus",fe(cN);class lN extends us{apply(e){return kz(e)}}lN.className="softsign",fe(lN);class hN extends us{apply(e){return $a(e)}}hN.className="tanh",fe(hN);class qw extends us{apply(e,t=-1){return Fo(e,t)}}qw.className="softmax",fe(qw);class uN extends us{apply(e,t=-1){return dp(e,t)}}uN.className="logSoftmax",fe(uN);class dN extends us{apply(e,t=1){return Q(()=>Ti(e.mul(t)).mul(e))}}dN.className="swish",fe(dN);function jr(e){return e.getClassName()}function jw(e,t={}){return kh(e,Ws.getMap().classNameMap,t,"activation")}function Kr(e){if(e==null){const t={};return t.className="linear",t.config={},jw(t)}if(typeof e=="string"){const t={};return t.className=e,t.config={},jw(t)}else return e instanceof us?e:jw(e)}function Kw(e){if(e!=null&&typeof e!="object")throw new Error(`Argument to L1L2 regularizer's constructor is expected to be an object, but received: ${e}`)}class pN extends Ao{}class Gh extends pN{constructor(e){super();Kw(e),this.l1=e==null||e.l1==null?.01:e.l1,this.l2=e==null||e.l2==null?.01:e.l2,this.hasL1=this.l1!==0,this.hasL2=this.l2!==0}apply(e){return Q(()=>{let t=dt([1]);return this.hasL1&&(t=be(t,$e(X(this.l1,dn(e))))),this.hasL2&&(t=be(t,$e(X(this.l2,Uh(e))))),t.asScalar()})}getConfig(){return{l1:this.l1,l2:this.l2}}static fromConfig(e,t){return new e({l1:t.l1,l2:t.l2})}}Gh.className="L1L2",fe(Gh);function rV(e){return Kw(e),new Gh({l1:e!=null?e.l1:null,l2:0})}function oV(e){return Kw(e),new Gh({l2:e!=null?e.l2:null,l1:0})}const mN={l1l2:"L1L2"};function Ct(e){return dw(e)}function fN(e,t={}){return kh(e,Ws.getMap().classNameMap,t,"regularizer")}function zt(e){if(e==null)return null;if(typeof e=="string"){const t=e in mN?mN[e]:e,n={className:t,config:{}};return fN(n)}else return e instanceof pN?e:fN(e)}class Xw extends lt{constructor(e){super(e==null?{}:e);this.supportsMasking=!0,e!=null&&(this.maxValue=e.maxValue)}call(e,t){e=Xe(e);let n=Ni(e);return this.maxValue!=null&&(n=Jn(n,0,this.maxValue)),n}computeOutputShape(e){return e}getConfig(){const e={maxValue:this.maxValue},t=super.getConfig();return Object.assign(e,t),e}}Xw.className="ReLU",fe(Xw);class Jw extends lt{constructor(e){super(e==null?{}:e);this.DEFAULT_ALPHA=.3,e==null&&(e={}),this.alpha=e.alpha==null?this.DEFAULT_ALPHA:e.alpha}call(e,t){const n=Xe(e);return lp(n,this.alpha)}computeOutputShape(e){return e}getConfig(){const e={alpha:this.alpha},t=super.getConfig();return Object.assign(e,t),e}}Jw.className="LeakyReLU",fe(Jw);class Zw extends lt{constructor(e){super(e==null?{}:e);if(this.DEFAULT_ALPHA_INITIALIZER="zeros",e==null&&(e={}),this.supportsMasking=!0,this.alphaInitializer=Pt(e.alphaInitializer||this.DEFAULT_ALPHA_INITIALIZER),this.alphaRegularizer=zt(e.alphaRegularizer),this.alphaConstraint=gn(e.alphaConstraint),e.sharedAxes==null)this.sharedAxes=null;else if(Array.isArray(e.sharedAxes))this.sharedAxes=e.sharedAxes;else if(typeof e.sharedAxes=="number")this.sharedAxes=[e.sharedAxes];else throw new q(`Expected sharedAxes to be a number or an array of numbers, but got ${e.sharedAxes}`)}build(e){e=Nt(e);const t=e.slice(1);if(this.sharedAxes!=null)for(const s of this.sharedAxes)t[s-1]=1;this.alpha=this.addWeight("alpha",t,"float32",this.alphaInitializer,this.alphaRegularizer,!0,this.alphaConstraint);const n={};if(this.sharedAxes!=null)for(let s=1;s(jt(t),t==="channelsFirst"?Ye(e,[0,2,3,1]):e))}function gN(e,t){return Q(()=>(jt(t),t==="channelsFirst"?Ye(e,[0,2,3,4,1]):e))}function yN(e,t,n,s=1,i="valid",o,a=1){return Q(()=>{if(o==null&&(o=ei()),jt(o),e.shape.length!==3)throw new q(`The input of a conv1dWithBias operation should be 3, but is ${e.shape.length} instead.`);if(t.shape.length!==3)throw new q(`The kernel for a conv1dWithBias operation should be 3, but is ${t.shape.length} instead`);if(n!=null&&n.shape.length!==1)throw new q(`The bias for a conv1dWithBias operation should be 1, but is ${t.shape.length} instead`);if(o==="channelsFirst"&&(e=Ye(e,[0,2,1])),i==="causal")throw new Pe("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");let c=ip(e,t,s,i==="same"?"same":"valid","NWC",a);return n!=null&&(c=Ri(c,n)),c})}function yte(e,t,n=1,s="valid",i,o=1){return Q(()=>(jt(i),yN(e,t,null,n,s,i,o)))}function bte(e,t,n=[1,1],s="valid",i,o){return Q(()=>(jt(i),sL(e,t,null,n,s,i,o)))}function sL(e,t,n,s=[1,1],i="valid",o,a,c=null){return Q(()=>{if(o==null&&(o=ei()),jt(o),e.rank!==3&&e.rank!==4)throw new q(`conv2dWithBiasActivation expects input to be of rank 3 or 4, but received ${e.rank}.`);if(t.rank!==3&&t.rank!==4)throw new q(`conv2dWithBiasActivation expects kernel to be of rank 3 or 4, but received ${e.rank}.`);let h=nL(e,o);if(i==="causal")throw new Pe("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");return h=Kb({x:h,filter:t,strides:s,pad:i==="same"?"same":"valid",dilations:a,dataFormat:"NHWC",bias:n,activation:c}),o==="channelsFirst"&&(h=Ye(h,[0,3,1,2])),h})}function wte(e,t,n=[1,1,1],s="valid",i,o){return Q(()=>(jt(i),bN(e,t,null,n,s,i,o)))}function bN(e,t,n,s=[1,1,1],i="valid",o,a){return Q(()=>{if(o==null&&(o=ei()),jt(o),e.rank!==4&&e.rank!==5)throw new q(`conv3dWithBias expects input to be of rank 4 or 5, but received ${e.rank}.`);if(t.rank!==4&&t.rank!==5)throw new q(`conv3dWithBias expects kernel to be of rank 4 or 5, but received ${e.rank}.`);let c=gN(e,o);if(i==="causal")throw new Pe("The support for CAUSAL padding mode in conv3dWithBias is not implemented yet.");return c=Lb(c,t,s,i==="same"?"same":"valid","NDHWC",a),n!=null&&(c=Ri(c,n)),o==="channelsFirst"&&(c=Ye(c,[0,4,1,2,3])),c})}class iL extends lt{constructor(e,t){super(t);if(this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",iL.verifyArgs(t),this.rank=e,wn(this.rank,"rank"),this.rank!==1&&this.rank!==2&&this.rank!==3)throw new Pe(`Convolution layer for rank other than 1, 2, or 3 (${this.rank}) is not implemented yet.`);if(this.kernelSize=oc(t.kernelSize,e,"kernelSize"),this.strides=oc(t.strides==null?1:t.strides,e,"strides"),this.padding=t.padding==null?"valid":t.padding,vs(this.padding),this.dataFormat=t.dataFormat==null?"channelsLast":t.dataFormat,jt(this.dataFormat),this.activation=Kr(t.activation),this.useBias=t.useBias==null?!0:t.useBias,this.biasInitializer=Pt(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.biasConstraint=gn(t.biasConstraint),this.biasRegularizer=zt(t.biasRegularizer),this.activityRegularizer=zt(t.activityRegularizer),this.dilationRate=oc(t.dilationRate==null?1:t.dilationRate,e,"dilationRate"),this.rank===1&&Array.isArray(this.dilationRate)&&this.dilationRate.length!==1)throw new q(`dilationRate must be a number or an array of a single number for 1D convolution, but received ${JSON.stringify(this.dilationRate)}`);if(this.rank===2){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==2)throw new q(`dilationRate must be a number or array of two numbers for 2D convolution, but received ${JSON.stringify(this.dilationRate)}`)}else if(this.rank===3){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==3)throw new q(`dilationRate must be a number or array of three numbers for 3D convolution, but received ${JSON.stringify(this.dilationRate)}`)}}static verifyArgs(e){if(As("kernelSize"in e,"required key 'kernelSize' not in config"),typeof e.kernelSize!="number"&&!mw(e.kernelSize,"number",1,3))throw new q(`BaseConv expects config.kernelSize to be number or number[] with length 1, 2, or 3, but received ${JSON.stringify(e.kernelSize)}.`)}getConfig(){const e={kernelSize:this.kernelSize,strides:this.strides,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,activation:jr(this.activation),useBias:this.useBias,biasInitializer:Kt(this.biasInitializer),biasRegularizer:Ct(this.biasRegularizer),activityRegularizer:Ct(this.activityRegularizer),biasConstraint:fn(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}class Yh extends iL{constructor(e,t){super(e,t);this.kernel=null,Yh.verifyArgs(t),this.filters=t.filters,wn(this.filters,"filters"),this.kernelInitializer=Pt(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.kernelConstraint=gn(t.kernelConstraint),this.kernelRegularizer=zt(t.kernelRegularizer)}build(e){e=Nt(e);const t=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[t]==null)throw new q(`The channel dimension of the input should be defined. Found ${e[t]}`);const n=e[t],s=this.kernelSize.concat([n,this.filters]);this.kernel=this.addWeight("kernel",s,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[{ndim:this.rank+2,axes:{[t]:n}}],this.built=!0}call(e,t){return Q(()=>{e=Xe(e);let n;const s=this.bias==null?null:this.bias.read(),i=cv(this.activation.getClassName());if(i!=null&&this.rank===2)n=sL(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate,i);else{if(this.rank===1)n=yN(e,this.kernel.read(),s,this.strides[0],this.padding,this.dataFormat,this.dilationRate[0]);else if(this.rank===2)n=sL(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate);else if(this.rank===3)n=bN(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate);else throw new Pe("convolutions greater than 3D are not implemented yet.");this.activation!=null&&(n=this.activation.apply(n))}return n})}computeOutputShape(e){e=Nt(e);const t=[],n=this.dataFormat==="channelsLast"?e.slice(1,e.length-1):e.slice(2);for(let i=0;i 0 but got ${JSON.stringify(e.filters)}`)}}class Hh extends Yh{constructor(e){super(2,e);Hh.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!mw(e.kernelSize,"number",1,2))throw new q(`Conv2D expects config.kernelSize to be number or number[] with length 1 or 2, but received ${JSON.stringify(e.kernelSize)}.`)}}Hh.className="Conv2D",fe(Hh);class um extends Yh{constructor(e){super(3,e);um.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!(Array.isArray(e.kernelSize)&&(e.kernelSize.length===1||e.kernelSize.length===3)))throw new q(`Conv3D expects config.kernelSize to be number or [number, number, number], but received ${JSON.stringify(e.kernelSize)}.`)}}um.className="Conv3D",fe(um);class rL extends Hh{constructor(e){super(e);if(this.inputSpec=[new Ln({ndim:4})],this.padding!=="same"&&this.padding!=="valid")throw new q(`Conv2DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`)}build(e){if(e=Nt(e),e.length!==4)throw new q("Input should have rank 4; Received input shape: "+JSON.stringify(e));const t=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[t]==null)throw new q("The channel dimension of the inputs should be defined. Found `None`.");const n=e[t],s=this.kernelSize.concat([this.filters,n]);this.kernel=this.addWeight("kernel",s,"float32",this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[new Ln({ndim:4,axes:{[t]:n}})],this.built=!0}call(e,t){return Q(()=>{let n=Xe(e);if(n.shape.length!==4)throw new q(`Conv2DTranspose.call() expects input tensor to be rank-4, but received a tensor of rank-${n.shape.length}`);const s=n.shape,i=s[0];let o,a;this.dataFormat==="channelsFirst"?(o=2,a=3):(o=1,a=2);const c=s[o],h=s[a],d=this.kernelSize[0],m=this.kernelSize[1],f=this.strides[0],b=this.strides[1],w=hm(c,f,d,this.padding),L=hm(h,b,m,this.padding),x=[i,w,L,this.filters];this.dataFormat!=="channelsLast"&&(n=Ye(n,[0,2,3,1]));let v=rp(n,this.kernel.read(),x,this.strides,this.padding);return this.dataFormat!=="channelsLast"&&(v=Ye(v,[0,3,1,2])),this.bias!=null&&(v=Ri(v,this.bias.read(),this.dataFormat)),this.activation!=null&&(v=this.activation.apply(v)),v})}computeOutputShape(e){e=Nt(e);const t=e.slice();let n,s,i;this.dataFormat==="channelsFirst"?(n=1,s=2,i=3):(n=3,s=1,i=2);const o=this.kernelSize[0],a=this.kernelSize[1],c=this.strides[0],h=this.strides[1];return t[n]=this.filters,t[s]=hm(t[s],c,o,this.padding),t[i]=hm(t[i],h,a,this.padding),t}getConfig(){const e=super.getConfig();return delete e.dilationRate,e}}rL.className="Conv2DTranspose",fe(rL);class wN extends Yh{constructor(e,t){super(e,t);if(this.DEFAULT_DEPTHWISE_INITIALIZER="glorotUniform",this.DEFAULT_POINTWISE_INITIALIZER="glorotUniform",this.depthwiseKernel=null,this.pointwiseKernel=null,t.filters==null)throw new q("The `filters` configuration field is required by SeparableConv, but is unspecified.");if(t.kernelInitializer!=null||t.kernelRegularizer!=null||t.kernelConstraint!=null)throw new q("Fields kernelInitializer, kernelRegularizer and kernelConstraint are invalid for SeparableConv2D. Use depthwiseInitializer, depthwiseRegularizer, depthwiseConstraint, pointwiseInitializer, pointwiseRegularizer and pointwiseConstraint instead.");if(t.padding!=null&&t.padding!=="same"&&t.padding!=="valid")throw new q(`SeparableConv${this.rank}D supports only padding modes: 'same' and 'valid', but received ${JSON.stringify(t.padding)}`);this.depthMultiplier=t.depthMultiplier==null?1:t.depthMultiplier,this.depthwiseInitializer=Pt(t.depthwiseInitializer||this.DEFAULT_DEPTHWISE_INITIALIZER),this.depthwiseRegularizer=zt(t.depthwiseRegularizer),this.depthwiseConstraint=gn(t.depthwiseConstraint),this.pointwiseInitializer=Pt(t.depthwiseInitializer||this.DEFAULT_POINTWISE_INITIALIZER),this.pointwiseRegularizer=zt(t.pointwiseRegularizer),this.pointwiseConstraint=gn(t.pointwiseConstraint)}build(e){if(e=Nt(e),e.length{e=Xe(e);let n;if(this.rank===1)throw new Pe("1D separable convolution is not implemented yet.");return this.rank===2&&(this.dataFormat==="channelsFirst"&&(e=Ye(e,[0,2,3,1])),n=Ub(e,this.depthwiseKernel.read(),this.pointwiseKernel.read(),this.strides,this.padding,this.dilationRate,"NHWC")),this.useBias&&(n=Ri(n,this.bias.read(),this.dataFormat)),this.activation!=null&&(n=this.activation.apply(n)),this.dataFormat==="channelsFirst"&&(n=Ye(n,[0,3,1,2])),n})}getConfig(){const e=super.getConfig();return delete e.rank,delete e.kernelInitializer,delete e.kernelRegularizer,delete e.kernelConstraint,e.depthwiseInitializer=Kt(this.depthwiseInitializer),e.pointwiseInitializer=Kt(this.pointwiseInitializer),e.depthwiseRegularizer=Ct(this.depthwiseRegularizer),e.pointwiseRegularizer=Ct(this.pointwiseRegularizer),e.depthwiseConstraint=fn(this.depthwiseConstraint),e.pointwiseConstraint=fn(this.pointwiseConstraint),e}}wN.className="SeparableConv";class oL extends wN{constructor(e){super(2,e)}}oL.className="SeparableConv2D",fe(oL);class dm extends Yh{constructor(e){super(1,e);dm.verifyArgs(e),this.inputSpec=[{ndim:3}]}getConfig(){const e=super.getConfig();return delete e.rank,delete e.dataFormat,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!mw(e.kernelSize,"number",1,1))throw new q(`Conv1D expects config.kernelSize to be number or number[] with length 1, but received ${JSON.stringify(e.kernelSize)}.`)}}dm.className="Conv1D",fe(dm);class aL extends lt{constructor(e){super(e);typeof e.cropping=="number"?this.cropping=[[e.cropping,e.cropping],[e.cropping,e.cropping]]:typeof e.cropping[0]=="number"?this.cropping=[[e.cropping[0],e.cropping[0]],[e.cropping[1],e.cropping[1]]]:this.cropping=e.cropping,this.dataFormat=e.dataFormat===void 0?"channelsLast":e.dataFormat,this.inputSpec=[{ndim:4}]}computeOutputShape(e){return this.dataFormat==="channelsFirst"?[e[0],e[1],e[2]-this.cropping[0][0]-this.cropping[0][1],e[3]-this.cropping[1][0]-this.cropping[1][1]]:[e[0],e[1]-this.cropping[0][0]-this.cropping[0][1],e[2]-this.cropping[1][0]-this.cropping[1][1],e[3]]}call(e,t){return Q(()=>{if(e=Xe(e),this.dataFormat==="channelsLast"){const n=Pp(e,this.cropping[0][0],e.shape[1]-this.cropping[0][0]-this.cropping[0][1],2);return Pp(n,this.cropping[1][0],e.shape[2]-this.cropping[1][1]-this.cropping[1][0],3)}else{const n=Pp(e,this.cropping[0][0],e.shape[2]-this.cropping[0][0]-this.cropping[0][1],3);return Pp(n,this.cropping[1][0],e.shape[3]-this.cropping[1][1]-this.cropping[1][0],4)}})}getConfig(){const e={cropping:this.cropping,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}aL.className="Cropping2D",fe(aL);class cL extends lt{constructor(e){super(e);this.DEFAULT_SIZE=[2,2],this.inputSpec=[{ndim:4}],this.size=e.size==null?this.DEFAULT_SIZE:e.size,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat}computeOutputShape(e){if(this.dataFormat==="channelsFirst"){const t=e[2]==null?null:this.size[0]*e[2],n=e[3]==null?null:this.size[1]*e[3];return[e[0],e[1],t,n]}else{const t=e[1]==null?null:this.size[0]*e[1],n=e[2]==null?null:this.size[1]*e[2];return[e[0],t,n,e[3]]}}call(e,t){return Q(()=>{let n=Xe(e);const s=n.shape;if(this.dataFormat==="channelsFirst"){n=Ye(n,[0,2,3,1]);const i=this.size[0]*s[2],o=this.size[1]*s[3],a=n.resizeNearestNeighbor([i,o]);return Ye(a,[0,3,1,2])}else{const i=this.size[0]*s[1],o=this.size[1]*s[2];return n.resizeNearestNeighbor([i,o])}})}getConfig(){const e={size:this.size,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}cL.className="UpSampling2D",fe(cL);function aV(e,t,n=[1,1],s="valid",i,o){return Q(()=>{i==null&&(i=ei()),jt(i);let a=nL(e,i);if(e.rank!==4)throw new q(`Input for depthwiseConv2d is required to be 4-D, but is instead ${e.rank}-D`);if(t.rank!==4)throw new q(`depthwiseKernel is required to be 4-D, but is instead ${t.rank}-D`);return a=Co(a,t,n,s==="same"?"same":"valid","NHWC",o),i==="channelsFirst"&&(a=Ye(a,[0,3,1,2])),a})}class lL extends iL{constructor(e){super(2,e);this.depthwiseKernel=null,this.depthMultiplier=e.depthMultiplier==null?1:e.depthMultiplier,this.depthwiseInitializer=Pt(e.depthwiseInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.depthwiseConstraint=gn(e.depthwiseConstraint),this.depthwiseRegularizer=zt(e.depthwiseRegularizer)}build(e){if(e=Nt(e),e.length<4)throw new q(`Inputs to DepthwiseConv2D should have rank 4. Received input shape: ${JSON.stringify(e)}.`);const t=this.dataFormat==="channelsFirst"?1:3;if(e[t]==null||e[t]<0)throw new q(`The channel dimension of the inputs to DepthwiseConv2D should be defined, but is not (${e[t]}).`);const n=e[t],s=[this.kernelSize[0],this.kernelSize[1],n,this.depthMultiplier];this.depthwiseKernel=this.addWeight("depthwise_kernel",s,null,this.depthwiseInitializer,this.depthwiseRegularizer,!0,this.depthwiseConstraint),this.useBias?this.bias=this.addWeight("bias",[n*this.depthMultiplier],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return Q(()=>{e=Xe(e);let n=aV(e,this.depthwiseKernel.read(),this.strides,this.padding,this.dataFormat,null);return this.useBias&&(n=Ri(n,this.bias.read(),this.dataFormat)),this.activation!=null&&(n=this.activation.apply(n)),n})}computeOutputShape(e){e=Nt(e);const t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2],s=this.dataFormat==="channelsFirst"?e[1]*this.depthMultiplier:e[3]*this.depthMultiplier,i=oi(t,this.kernelSize[0],this.padding,this.strides[0]),o=oi(n,this.kernelSize[1],this.padding,this.strides[1]);return this.dataFormat==="channelsFirst"?[e[0],s,i,o]:[e[0],i,o,s]}getConfig(){const e=super.getConfig();return e.depthMultiplier=this.depthMultiplier,e.depthwiseInitializer=Kt(this.depthwiseInitializer),e.depthwiseRegularizer=Ct(this.depthwiseRegularizer),e.depthwiseConstraint=fn(this.depthwiseRegularizer),e}}lL.className="DepthwiseConv2D",fe(lL);function LN(e,t,n,s){if(Array.isArray(e)){if(t!=null||n!=null)throw new q("When inputs is an array, neither initialState or constants should be provided");s!=null&&(n=e.slice(e.length-s,e.length),e=e.slice(0,e.length-s)),e.length>1&&(t=e.slice(1,e.length)),e=e[0]}function i(o){return o==null||Array.isArray(o)?o:[o]}return t=i(t),n=i(n),{inputs:e,initialState:t,constants:n}}function SN(e,t,n,s=!1,i,o,a=!1,c=!1){return Q(()=>{const h=t.shape.length;if(h<3)throw new q(`Input should be at least 3D, but is ${h}D.`);const d=[1,0].concat(ni(2,h));if(t=Ye(t,d),o!=null)throw new Pe("The rnn() functoin of the deeplearn.js backend does not support constants yet.");a&&console.warn("Backend rnn(): the unroll = true option is not applicable to the imperative deeplearn.js backend."),i!=null&&(i=i.asType("bool").asType("float32"),i.rank===h-1&&(i=Zn(i,-1)),i=Ye(i,d)),s&&(t=Ts(t,0),i!=null&&(i=Ts(i,0)));const m=[];let f,b=n;const w=t.shape[0],L=Qs(t);let x;i!=null&&(x=Qs(i));for(let N=0;Ne(O,b));if(i==null)f=E[0],b=E[1];else{const k=Q(()=>{const F=x[N],U=Fn(F).sub(F),$=E[0].mul(F).add(b[0].mul(U)),Y=b.map((j,Z)=>E[1][Z].mul(F).add(j.mul(U)));return{output:$,newStates:Y}});f=k.output,b=k.newStates}c&&m.push(f)}let v;if(c){const N=1;v=es(m,N)}return[f,v,b]})}class Ei extends lt{constructor(e){super(e);let t;if(e.cell==null)throw new q("cell property is missing for the constructor of RNN.");if(Array.isArray(e.cell)?t=new fm({cells:e.cell}):t=e.cell,t.stateSize==null)throw new q("The RNN cell should have an attribute `stateSize` (tuple of integers, one integer per RNN state).");this.cell=t,this.returnSequences=e.returnSequences==null?!1:e.returnSequences,this.returnState=e.returnState==null?!1:e.returnState,this.goBackwards=e.goBackwards==null?!1:e.goBackwards,this._stateful=e.stateful==null?!1:e.stateful,this.unroll=e.unroll==null?!1:e.unroll,this.supportsMasking=!0,this.inputSpec=[new Ln({ndim:3})],this.stateSpec=null,this.states_=null,this.numConstants=null,this.keptStates=[]}getStates(){if(this.states_==null){const e=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;return ni(0,e).map(t=>null)}else return this.states_}setStates(e){this.states_=e}computeOutputShape(e){Ew(e)&&(e=e[0]),e=e;let t=this.cell.stateSize;Array.isArray(t)||(t=[t]);const n=t[0];let s;if(this.returnSequences?s=[e[0],e[1],n]:s=[e[0],n],this.returnState){const i=[];for(const o of t)i.push([e[0],o]);return[s].concat(i)}else return s}computeMask(e,t){return Q(()=>{Array.isArray(t)&&(t=t[0]);const n=this.returnSequences?t:null;if(this.returnState){const s=this.states.map(i=>null);return[n].concat(s)}else return n})}get states(){if(this.states_==null){const e=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1,t=[];for(let n=0;na.shape[a.shape.length-1]),o))throw new q(`An initialState was passed that is not compatible with cell.stateSize. Received stateSpec=${this.stateSpec}; However cell.stateSize is ${this.cell.stateSize}`)}else this.stateSpec=o.map(a=>new Ln({shape:[null,a]}));this.stateful&&this.resetStates()}resetStates(e,t=!1){Q(()=>{if(!this.stateful)throw new nr("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape[0];if(n==null)throw new q("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.states_==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(s=>dt([n,s])):this.states_=[dt([n,this.cell.stateSize])];else if(e==null)He(this.states_),this.keptStates!=null&&(He(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(s=>dt([n,s])):this.states_[0]=dt([n,this.cell.stateSize]);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new q(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);t===!0?this.keptStates.push(this.states_.slice()):He(this.states_);for(let s=0;sbn(s.clone()))})}apply(e,t){let n=t==null?null:t.initialState,s=t==null?null:t.constants;t==null&&(t={});const i=LN(e,n,s,this.numConstants);e=i.inputs,n=i.initialState,s=i.constants;let o=[],a=[];if(n!=null){t.initialState=n,o=o.concat(n),this.stateSpec=[];for(const h of n)this.stateSpec.push(new Ln({shape:h.shape}));a=a.concat(this.stateSpec)}s!=null&&(t.constants=s,o=o.concat(s),this.numConstants=s.length);const c=o[0]instanceof ii;if(c){const h=[e].concat(o),d=this.inputSpec.concat(a),m=this.inputSpec;this.inputSpec=d;const f=super.apply(h,t);return this.inputSpec=m,f}else return super.apply(e,t)}call(e,t){return Q(()=>{const n=t==null?null:t.mask,s=t==null?null:t.training;let i=t==null?null:t.initialState;e=Xe(e),i==null&&(this.stateful?i=this.states_:i=this.getInitialState(e));const o=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;if(i.length!==o)throw new q(`RNN Layer has ${o} state(s) but was passed ${i.length} initial state(s).`);this.unroll&&console.warn("Ignoring unroll = true for RNN layer, due to imperative backend.");const a={training:s},c=(w,L)=>{const x=this.cell.call([w].concat(L),a);return[x[0],x.slice(1)]},h=SN(c,e,i,this.goBackwards,n,null,this.unroll,this.returnSequences),d=h[0],m=h[1],f=h[2];this.stateful&&this.resetStates(f,s);const b=this.returnSequences?m:d;return this.returnState?[b].concat(f):b})}getInitialState(e){return Q(()=>{let t=dt(e.shape);return t=$e(t,[1,2]),t=$h(t),Array.isArray(this.cell.stateSize)?this.cell.stateSize.map(n=>n>1?Iw(t,[1,n]):t):this.cell.stateSize>1?[Iw(t,[1,this.cell.stateSize])]:[t]})}get trainableWeights(){return this.trainable?this.cell.trainableWeights:[]}get nonTrainableWeights(){return this.trainable?this.cell.nonTrainableWeights:this.cell.weights}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.cell!=null&&this.cell.setFastWeightInitDuringBuild(e)}getConfig(){const e=super.getConfig(),t={returnSequences:this.returnSequences,returnState:this.returnState,goBackwards:this.goBackwards,stateful:this.stateful,unroll:this.unroll};this.numConstants!=null&&(t.numConstants=this.numConstants);const n=this.cell.getConfig();return this.getClassName()===Ei.className&&(t.cell={className:this.cell.getClassName(),config:n}),Object.assign({},n,e,t)}static fromConfig(e,t,n={}){const s=t.cell,i=ri(s,n);return new e(Object.assign(t,{cell:i}))}}Ei.className="RNN",fe(Ei);class ac extends lt{}class pm extends ac{constructor(e){super(e);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,wn(this.units,"units"),this.activation=Kr(e.activation==null?this.DEFAULT_ACTIVATION:e.activation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Pt(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Pt(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Pt(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=zt(e.kernelRegularizer),this.recurrentRegularizer=zt(e.recurrentRegularizer),this.biasRegularizer=zt(e.biasRegularizer),this.kernelConstraint=gn(e.kernelConstraint),this.recurrentConstraint=gn(e.recurrentConstraint),this.biasConstraint=gn(e.biasConstraint),this.dropout=tc([1,Yr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=tc([1,Yr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){e=Nt(e),this.kernel=this.addWeight("kernel",[e[e.length-1],this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return Q(()=>{if(e=e,e.length!==2)throw new q(`SimpleRNNCell expects 2 input Tensors, got ${e.length}.`);let n=e[1];e=e[0];const s=t.training==null?!1:t.training;0Fn(e),rate:this.dropout,training:s})),0Fn(n),rate:this.recurrentDropout,training:s}));let i;const o=this.dropoutMask,a=this.recurrentDropoutMask;o!=null?i=Ci(X(e,o),this.kernel.read()):i=Ci(e,this.kernel.read()),this.bias!=null&&(i=Ri(i,this.bias.read())),a!=null&&(n=X(n,a));let c=be(i,Ci(n,this.recurrentKernel.read()));return this.activation!=null&&(c=this.activation.apply(c)),[c,c]})}getConfig(){const e=super.getConfig(),t={units:this.units,activation:jr(this.activation),useBias:this.useBias,kernelInitializer:Kt(this.kernelInitializer),recurrentInitializer:Kt(this.recurrentInitializer),biasInitializer:Kt(this.biasInitializer),kernelRegularizer:Ct(this.kernelRegularizer),recurrentRegularizer:Ct(this.recurrentRegularizer),biasRegularizer:Ct(this.biasRegularizer),activityRegularizer:Ct(this.activityRegularizer),kernelConstraint:fn(this.kernelConstraint),recurrentConstraint:fn(this.recurrentConstraint),biasConstraint:fn(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout};return Object.assign({},e,t)}}pm.className="SimpleRNNCell",fe(pm);class hL extends Ei{constructor(e){e.cell=new pm(e),super(e)}call(e,t){return Q(()=>{this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return new e(t)}}hL.className="SimpleRNN",fe(hL);class mm extends ac{constructor(e){super(e);if(this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",e.resetAfter)throw new q("GRUCell does not support reset_after parameter set to true.");this.units=e.units,wn(this.units,"units"),this.activation=Kr(e.activation===void 0?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=Kr(e.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Pt(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Pt(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Pt(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=zt(e.kernelRegularizer),this.recurrentRegularizer=zt(e.recurrentRegularizer),this.biasRegularizer=zt(e.biasRegularizer),this.kernelConstraint=gn(e.kernelConstraint),this.recurrentConstraint=gn(e.recurrentConstraint),this.biasConstraint=gn(e.biasConstraint),this.dropout=tc([1,Yr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=tc([1,Yr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){e=Nt(e);const t=e[e.length-1];this.kernel=this.addWeight("kernel",[t,this.units*3],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*3],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units*3],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return Q(()=>{if(e=e,e.length!==2)throw new q(`GRUCell expects 2 input Tensors (inputs, h, c), got ${e.length}.`);const n=t.training==null?!1:t.training;let s=e[1];e=e[0],0Fn(e),rate:this.dropout,training:n,count:3})),0Fn(s),rate:this.recurrentDropout,training:n,count:3}));const i=this.dropoutMask,o=this.recurrentDropoutMask;let a,c,h;0{this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return t.implmentation===0&&(t.implementation=1),new e(t)}}uL.className="GRU",fe(uL);class qh extends ac{constructor(e){super(e);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,wn(this.units,"units"),this.activation=Kr(e.activation===void 0?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=Kr(e.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Pt(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Pt(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Pt(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.unitForgetBias=e.unitForgetBias,this.kernelRegularizer=zt(e.kernelRegularizer),this.recurrentRegularizer=zt(e.recurrentRegularizer),this.biasRegularizer=zt(e.biasRegularizer),this.kernelConstraint=gn(e.kernelConstraint),this.recurrentConstraint=gn(e.recurrentConstraint),this.biasConstraint=gn(e.biasConstraint),this.dropout=tc([1,Yr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=tc([1,Yr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=[this.units,this.units],this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){var t;e=Nt(e);const n=e[e.length-1];this.kernel=this.addWeight("kernel",[n,this.units*4],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*4],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint);let s;if(this.useBias){if(this.unitForgetBias){const i=this.biasInitializer,o=this.units;s=new(t=class extends Ms{apply(c,h){const d=i.apply([o]),m=new Vp().apply([o]),f=i.apply([o*2]);return yv(yv(d,m),f)}},t.className="CustomInit",t)}else s=this.biasInitializer;this.bias=this.addWeight("bias",[this.units*4],null,s,this.biasRegularizer,!0,this.biasConstraint)}else this.bias=null;this.built=!0}call(e,t){return Q(()=>{const n=t.training==null?!1:t.training;if(e=e,e.length!==3)throw new q(`LSTMCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);let s=e[1];const i=e[2];e=e[0],0Fn(e),rate:this.dropout,training:n,count:4})),0Fn(s),rate:this.recurrentDropout,training:n,count:4}));const o=this.dropoutMask,a=this.recurrentDropoutMask;let c,h,d,m;0{this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return t.implmentation===0&&(t.implementation=1),new e(t)}}dL.className="LSTM",fe(dL);class fm extends ac{constructor(e){super(e);this.cells=e.cells}get stateSize(){const e=[];for(const t of this.cells.slice().reverse())Array.isArray(t.stateSize)?e.push(...t.stateSize):e.push(t.stateSize);return e}call(e,t){return Q(()=>{e=e;let n=e.slice(1);const s=[];for(const a of this.cells.slice().reverse())Array.isArray(a.stateSize)?s.push(n.splice(0,a.stateSize.length)):s.push(n.splice(0,1));s.reverse();const i=[];let o;for(let a=0;a{Bo(`RNNCell_${s}`,()=>{n.build(e),Array.isArray(n.stateSize)?t=n.stateSize[0]:t=n.stateSize,e=[e[0],t]})}),this.built=!0}getConfig(){const e=super.getConfig(),t=i=>({className:i.getClassName(),config:i.getConfig()}),n=this.cells.map(t),s={cells:n};return Object.assign({},e,s)}static fromConfig(e,t,n={}){const s=[];for(const i of t.cells)s.push(ri(i,n));return new e({cells:s})}get trainableWeights(){if(!this.trainable)return[];const e=[];for(const t of this.cells)e.push(...t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.cells)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const n of this.cells)t.push(...n.trainableWeights);return t.concat(e)}return e}getWeights(){const e=[];for(const t of this.cells)e.push(...t.weights);return Dw(e)}setWeights(e){const t=[];for(const n of this.cells){const s=n.weights.length,i=e.splice(s);for(let o=0;owv(t(),n),a=()=>Bh(o,t,s);if(!i||i<=1)return bn(a().clone());const c=Array(i).fill(void 0).map(a);return c.map(h=>bn(h.clone()))}var cV=function(e,t){var n={};for(var s in e)Object.prototype.hasOwnProperty.call(e,s)&&t.indexOf(s)<0&&(n[s]=e[s]);if(e!=null&&typeof Object.getOwnPropertySymbols=="function")for(var i=0,s=Object.getOwnPropertySymbols(e);i{if(this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null),t&&t.constants)throw new q("ConvRNN2D cell does not support constants");const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}computeOutputShape(e){let t=this.computeSingleOutputShape(e);return this.returnSequences||(t=[t[0],...t.slice(2)]),this.returnState&&(t=[t,...Array(2).fill([e[0],...t.slice(-3)])]),t}getInitialState(e){return Q(()=>{const{stateSize:t}=this.cell,n=e.shape,s=this.computeSingleOutputShape(n),i=[s[0],...s.slice(2)],o=dt(i);return Array.isArray(t)?Array(t.length).fill(o):[o]})}resetStates(e,t=!1){Q(()=>{if(!this.stateful)throw new nr("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape,s=this.computeSingleOutputShape(n),i=[s[0],...s.slice(2)],o=n[0];if(o==null)throw new q("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.getStates()==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>dt(i)):this.states_=[dt(i)];else if(e==null)He(this.states_),this.keptStates!=null&&(He(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>dt(i)):this.states_[0]=dt(i);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new q(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);t?this.keptStates.push(this.states_.slice()):He(this.states_);for(let a=0;abn(a.clone()))})}computeSingleOutputShape(e){const{dataFormat:t,filters:n,kernelSize:s,padding:i,strides:o,dilationRate:a}=this.cell,c=t==="channelsFirst",h=e[c?3:2],d=e[c?4:3],m=oi(h,s[0],i,o[0],a[0]),f=oi(d,s[1],i,o[1],a[1]),b=[...e.slice(0,2),...c?[n,m,f]:[m,f,n]];return b}}IN.className="ConvRNN2D";class gm extends qh{constructor(e){const{filters:t,kernelSize:n,strides:s,padding:i,dataFormat:o,dilationRate:a}=e;super(Object.assign({},e,{units:t}));this.filters=t,wn(this.filters,"filters"),this.kernelSize=oc(n,2,"kernelSize"),this.kernelSize.forEach(c=>wn(c,"kernelSize")),this.strides=oc(s||1,2,"strides"),this.strides.forEach(c=>wn(c,"strides")),this.padding=i||"valid",vs(this.padding),this.dataFormat=o||"channelsLast",jt(this.dataFormat),this.dilationRate=oc(a||1,2,"dilationRate"),this.dilationRate.forEach(c=>wn(c,"dilationRate"))}build(e){var t;e=Nt(e);const n=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[n]==null)throw new q(`The channel dimension of the input should be defined. Found ${e[n]}`);const s=e[n],i=4,o=this.kernelSize.concat([s,this.filters*i]);this.kernel=this.addWeight("kernel",o,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint);const a=this.kernelSize.concat([this.filters,this.filters*i]);if(this.recurrentKernel=this.addWeight("recurrent_kernel",a,null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){let c;if(this.unitForgetBias){const h=this.biasInitializer,d=this.filters;c=new(t=class extends Ms{apply(f,b){const w=h.apply([d]),L=Js([d]),x=h.apply([d*2]);return Sw([w,L,x])}},t.className="CustomInit",t)}else c=this.biasInitializer;this.bias=this.addWeight("bias",[this.filters*i],null,c,this.biasRegularizer,!0,this.biasConstraint)}this.built=!0}call(e,t){return Q(()=>{if(e.length!==3)throw new q(`ConvLSTM2DCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);const n=t.training||!1,s=e[0],i=e[1],o=e[2],a=4;0Fn(s),rate:this.dropout,training:n,count:a}));const c=this.dropoutMask,h=(we,Se,xe)=>!Se||!Se[xe]?we:X(Se[xe],we);let d=h(s,c,0),m=h(s,c,1),f=h(s,c,2),b=h(s,c,3);0Fn(i),rate:this.recurrentDropout,training:n,count:a}));const w=this.recurrentDropoutMask;let L=h(i,w,0),x=h(i,w,1),v=h(i,w,2),N=h(i,w,3);const O=3,[E,k,F,U]=hs(this.kernel.read(),a,O),[$,Y,j,Z]=this.useBias?hs(this.bias.read(),a):[null,null,null,null];d=this.inputConv(d,E,$,this.padding),m=this.inputConv(m,k,Y,this.padding),f=this.inputConv(f,F,j,this.padding),b=this.inputConv(b,U,Z,this.padding);const[ie,de,he,ue]=hs(this.recurrentKernel.read(),a,O);L=this.recurrentConv(L,ie),x=this.recurrentConv(x,de),v=this.recurrentConv(v,he),N=this.recurrentConv(N,ue);const me=this.recurrentActivation.apply(be(d,L)),ce=this.recurrentActivation.apply(be(m,x)),ye=be(X(ce,o),X(me,this.activation.apply(be(f,v)))),pe=X(this.recurrentActivation.apply(be(b,N)),this.activation.apply(ye));return[pe,pe,ye]})}getConfig(){const e=super.getConfig(),{units:t}=e,n=cV(e,["units"]),s={filters:this.filters,kernelSize:this.kernelSize,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,strides:this.strides};return Object.assign({},n,s)}inputConv(e,t,n,s){const i=Ji(e,t,this.strides,s||"valid",this.dataFormat==="channelsFirst"?"NCHW":"NHWC",this.dilationRate);return n?Ri(i,n,this.dataFormat):i}recurrentConv(e,t){const n=1;return Ji(e,t,n,"same",this.dataFormat==="channelsFirst"?"NCHW":"NHWC")}}gm.className="ConvLSTM2DCell",fe(gm);class pL extends IN{constructor(e){const t=new gm(e);super(Object.assign({},e,{cell:t}))}static fromConfig(e,t){return new e(t)}}pL.className="ConvLSTM2D",fe(pL);class ym extends lt{constructor(e){super(e);this.rate=Math.max(Math.min(e.rate,1),0),this.noiseShape=e.noiseShape,this.seed=e.seed,this.supportsMasking=!0}getNoiseShape(e){if(this.noiseShape==null)return this.noiseShape;const t=e.shape,n=[];for(let s=0;s{this.invokeCallHook(e,t);const n=Xe(e);if(0wv(n,this.rate,i,this.seed),()=>n,s);return o}return e})}getConfig(){const e={rate:this.rate,noiseShape:this.noiseShape,seed:this.seed},t=super.getConfig();return Object.assign(e,t),e}dispose(){return super.dispose()}}ym.className="Dropout",fe(ym);class mL extends ym{constructor(e){super(e);this.inputSpec=[{ndim:3}]}getNoiseShape(e){const t=e.shape;return[t[0],1,t[2]]}}mL.className="SpatialDropout1D",fe(mL);class fL extends lt{constructor(e){super(e);if(this.activation=null,this.useBias=!0,this.kernel=null,this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",e.batchInputShape==null&&e.inputShape==null&&e.inputDim!=null){let t=null;e.batchSize!=null&&(t=e.batchSize),this.batchInputShape=[t,e.inputDim]}this.units=e.units,wn(this.units,"units"),this.activation=Kr(e.activation),e.useBias!=null&&(this.useBias=e.useBias),this.kernelInitializer=Pt(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.biasInitializer=Pt(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelConstraint=gn(e.kernelConstraint),this.biasConstraint=gn(e.biasConstraint),this.kernelRegularizer=zt(e.kernelRegularizer),this.biasRegularizer=zt(e.biasRegularizer),this.activityRegularizer=zt(e.activityRegularizer),this.supportsMasking=!0,this.inputSpec=[{minNDim:2}]}build(e){e=Nt(e);const t=e[e.length-1];this.kernel==null&&(this.kernel=this.addWeight("kernel",[t,this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint))),this.inputSpec=[{minNDim:2,axes:{[-1]:t}}],this.built=!0}computeOutputShape(e){e=Nt(e);const t=e.slice();return t[t.length-1]=this.units,t}call(e,t){return Q(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=cv(this.activation.getClassName());let i;return s!=null?i=Ci(n,this.kernel.read(),s,this.bias?this.bias.read():null):(i=Ci(n,this.kernel.read()),this.bias!=null&&(i=Ri(i,this.bias.read())),this.activation!=null&&(i=this.activation.apply(i))),i})}getConfig(){const e={units:this.units,activation:jr(this.activation),useBias:this.useBias,kernelInitializer:Kt(this.kernelInitializer),biasInitializer:Kt(this.biasInitializer),kernelRegularizer:Ct(this.kernelRegularizer),biasRegularizer:Ct(this.biasRegularizer),activityRegularizer:Ct(this.activityRegularizer),kernelConstraint:fn(this.kernelConstraint),biasConstraint:fn(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}fL.className="Dense",fe(fL);class gL extends lt{constructor(e){e=e||{},super(e),this.inputSpec=[{minNDim:3}],this.dataFormat=e.dataFormat}computeOutputShape(e){e=Nt(e);for(const t of e.slice(1))if(t==null)throw new q(`The shape of the input to "Flatten" is not fully defined (got ${e.slice(1)}). Make sure to pass a complete "input_shape" or "batch_input_shape" argument to the first layer in your model.`);return[e[0],Gr(e,1)]}call(e,t){return Q(()=>{this.invokeCallHook(e,t);let n=Xe(e);if(this.dataFormat==="channelsFirst"&&n.rank>1){const s=[0];for(let i=2;i{this.invokeCallHook(e,t);const n=Xe(e);return this.activation.apply(n)})}getConfig(){const e={activation:jr(this.activation)},t=super.getConfig();return Object.assign(e,t),e}}yL.className="Activation",fe(yL);class bL extends lt{constructor(e){super(e);this.n=e.n,this.inputSpec=[{ndim:2}]}computeOutputShape(e){return[e[0],this.n,e[1]]}call(e,t){return Q(()=>(e=Xe(e),Rz(e,this.n)))}getConfig(){const e={n:this.n},t=super.getConfig();return Object.assign(e,t),e}}bL.className="RepeatVector",fe(bL);class wL extends lt{constructor(e){super(e);this.targetShape=e.targetShape;for(let t=0;t{this.invokeCallHook(e,t);const n=Xe(e),s=n.shape,i=s.slice(0,1).concat(this.fixUnknownDimension(s.slice(1),this.targetShape));return n.reshape(i)})}getConfig(){const e={targetShape:this.targetShape},t=super.getConfig();return Object.assign(e,t),e}}wL.className="Reshape",fe(wL);class LL extends lt{constructor(e){super(e);if(e.dims==null)throw new Error("Required configuration field `dims` is missing during Permute constructor call.");if(!Array.isArray(e.dims))throw new Error(`Permute constructor requires \`dims\` to be an Array, but received ${e.dims} instead.`);const t=ni(1,e.dims.length+1);if(!ae(e.dims.slice().sort(),t))throw new Error("Invalid permutation `dims`: "+JSON.stringify(e.dims)+" `dims` must contain consecutive integers starting from 1.");this.dims=e.dims,this.dimsIncludingBatch=[0].concat(this.dims),this.inputSpec=[new Ln({ndim:this.dims.length+1})]}computeOutputShape(e){e=Nt(e);const t=e.slice();return this.dims.forEach((n,s)=>{t[s+1]=e[n]}),t}call(e,t){return Ye(Xe(e),this.dimsIncludingBatch)}getConfig(){const e={dims:this.dims},t=super.getConfig();return Object.assign(e,t),e}}LL.className="Permute",fe(LL);class SL extends lt{constructor(e){super(e==null?{}:e);this.supportsMasking=!0,e!=null?this.maskValue=e.maskValue==null?0:e.maskValue:this.maskValue=0}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={maskValue:this.maskValue};return Object.assign(t,e),t}computeMask(e,t){const n=Xe(e),s=-1;return ih(Br(n,this.maskValue),s)}call(e,t){return Q(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=-1,i=!0,o=ih(Br(n,this.maskValue),s,i),a=n.mul(o.asType(n.dtype));return a})}}SL.className="Masking",fe(SL);class IL extends lt{constructor(e){super(e);if(this.embeddings=null,this.DEFAULT_EMBEDDINGS_INITIALIZER="randomUniform",e.batchInputShape==null&&e.inputShape==null){let t=null;e.batchSize!=null&&(t=e.batchSize),e.inputLength==null?this.batchInputShape=[t,null]:this.batchInputShape=[t].concat(Et(e.inputLength))}this.inputDim=e.inputDim,wn(this.inputDim,"inputDim"),this.outputDim=e.outputDim,wn(this.outputDim,"outputDim"),this.embeddingsInitializer=Pt(e.embeddingsInitializer||this.DEFAULT_EMBEDDINGS_INITIALIZER),this.embeddingsRegularizer=zt(e.embeddingsRegularizer),this.activityRegularizer=zt(e.activityRegularizer),this.embeddingsConstraint=gn(e.embeddingsConstraint),this.maskZero=e.maskZero,this.supportsMasking=e.maskZero,this.inputLength=e.inputLength}build(e){this.embeddings=this.addWeight("embeddings",[this.inputDim,this.outputDim],this.dtype,this.embeddingsInitializer,this.embeddingsRegularizer,!0,this.embeddingsConstraint),this.built=!0}warnOnIncompatibleInputShape(e){}computeMask(e,t){return Q(()=>this.maskZero?(e=Xe(e),Br(e,et(e))):null)}computeOutputShape(e){if(e=Nt(e),this.inputLength==null)return[...e,this.outputDim];const t=Et(this.inputLength);if(t.length!==e.length-1)throw new q(`"inputLength" is ${this.inputLength}, but received input shape has shape ${e}`);{let n=0;for(let s=0;s{this.invokeCallHook(e,t);let n=Xe(e);n.dtype!=="int32"&&(n=Wh(n,"int32"));const s=bv(this.embeddings.read(),n.as1D());return s.reshape(Nt(this.computeOutputShape(n.shape)))})}getConfig(){const e={inputDim:this.inputDim,outputDim:this.outputDim,embeddingsInitializer:Kt(this.embeddingsInitializer),embeddingsRegularizer:Ct(this.embeddingsRegularizer),activityRegularizer:Ct(this.activityRegularizer),embeddingsConstraint:fn(this.embeddingsConstraint),maskZero:this.maskZero,inputLength:this.inputLength},t=super.getConfig();return Object.assign(e,t),e}}IL.className="Embedding",fe(IL);class Vo extends lt{constructor(e){super(e||{});this.supportsMasking=!0}mergeFunction(e){throw new Pe}computeElementwiseOpOutputShape(e,t){if(e==null||t==null)return null;if(e.length1)throw new q(`Can not merge tensors with different batch sizes. Got tensors with shapes: ${JSON.stringify(e)}.`);let n=e[0]==null?null:e[0].slice(1);for(let i=1;ii.length);e.indexOf(null)===-1&&Vr(s).length===1?this.reshapeRequired=!1:this.reshapeRequired=!0}call(e,t){return Q(()=>{if(e=e,this.reshapeRequired){const n=[],s=e.map(i=>i.rank);if(s.indexOf(null)===-1){const i=Yr(s);for(let o of e){const a=o.rank;for(let c=0;c1){const d=ni(1,h).concat([0]);n.push(Ye(c,d)),i=!0}else n.push(c)}let o=this.mergeFunction(n);const a=o.rank;if(i){if(a==null){const c=o.shape,h=c.length,d=c[h-1],m=[d].concat(c.slice(0,c.length-1));o=Ye(o.reshape([-1,d]),[1,0]).reshape(m)}else if(a>1){const c=[a-1].concat(ni(0,a-1));o=Ye(o,c)}}return o}}else return this.mergeFunction(e)})}computeOutputShape(e){e=e;let t;e[0]==null?t=null:t=e[0].slice(1);for(let s=1;s{if(t==null)return null;if(!Array.isArray(t))throw new q("`mask` should be an Array");if(!Array.isArray(e))throw new q("`inputs` should be an Array");if(t.length!==e.length)throw new q(`The Array 'inputs' and 'mask' are expected to have the same length, but have different lengths (${e.length} vs ${t.length})`);if(t.every(s=>s==null))return null;t=t.map(s=>s==null?s:Zn(s,0));let n=t[0];for(let s=1;s{let t=e[0].clone();for(let n=1;n{let t=e[0].clone();for(let n=1;n{let t=e[0].clone();for(let n=1;n{let t=e[0];for(let n=1;n{let t=e[0];for(let n=1;n1)throw new q("A `Concatenate` layer requires inputs with matching shapes except for the concat axis. Got input shapes: "+JSON.stringify(e))}mergeFunction(e){return Q(()=>Sw(e,this.axis))}computeOutputShape(e){if(!(Array.isArray(e)&&Array.isArray(e[0])))throw new q("A `Concatenate` layer should be called on a list of inputs.");const t=e,n=t[0].slice(),s=this.axis<0?n.length+this.axis:this.axis;for(const i of t.slice(1)){if(n[s]==null||i[s]==null){n[s]=null;break}n[s]+=i[s]}return n}computeMask(e,t){if(t==null)return null;if(!Array.isArray(t))throw new q("`mask` should be an array for Concatenate");if(!Array.isArray(e))throw new q("`inputs` should be an array for Concatenate");if(t.length!==e.length)throw new q(`Mismatch in the length of mask (${t.length}) and the legnth of inputs (${e.length})`);return Q(()=>{let n=!0;if(t.forEach(o=>{if(o!=null){n=!1;return}}),n)return null;const s=[];for(let o=0;o3||t.shape.length>3)throw new Pe("batchDot is not implemented for tensors of 4D or higher rank yet");if(A(e.shape.length>=2,()=>`batchDot requires the rank of x to be >= 2, but got ${e.shape.length}`),A(e.shape.length>=2,()=>`batchDot requires the rank of y to be >= 2, but got ${t.shape.length}`),typeof n=="number"&&(n=[n,n]),e.dtype==="complex64"||t.dtype==="complex64")throw new Pe("batchDot is not implemented for complex64-type Tensors yet.");const s=e.shape.length,i=t.shape.length;n==null&&(n=[s-1,i-2]);const o=n;return Q(()=>{let a;if(s>i){a=s-i;const h=[];for(let d=0;ds){a=i-s;const h=[];for(let d=0;d0){let h;s>i?h=s+i-3:h=s-1;const d=[];for(let m=h;m"A `Dot` layer should be called on a list of exactly 2 inputs.");const t=e[0],n=e[1];if(t.length>3||n.length>3)throw new Pe("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);if(t[s[0]]!==n[s[1]])throw new q(`Dimension incompatibility: ${t[s[0]]} !== ${n[s[1]]}`)}mergeFunction(e){if(e.length!==2)throw new q(`A \`Dot\` layer must be called on exactly 2 inputs, but received ${e.length} input(s).`);let t=e[0],n=e[1],s;return Array.isArray(this.axes)?s=this.axes.map((i,o)=>eu(i,e[o].shape.length)):s=[eu(this.axes,t.shape.length),eu(this.axes,n.shape.length)],this.normalize&&(t=tm(t,s[0]),n=tm(n,s[1])),lV(t,n,s)}interpretAxes(e,t){let n;return Array.isArray(this.axes)?n=this.axes:n=[eu(this.axes,e.length),eu(this.axes,t.length)],n}computeOutputShape(e){A(Array.isArray(e)&&e.length===2&&Array.isArray(e[0])&&Array.isArray(e[1]),()=>"A `Dot` layer should be called on a list of exactly 2 inputs.");const t=e[0].slice(),n=e[1].slice();if(t.length>3||n.length>3)throw new Pe("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);t.splice(s[0],1),n.splice(s[1],1),n.splice(0,1);const i=t.concat(n);return i.length===1&&i.push(1),i}computeMask(e,t){return null}getConfig(){const e={axes:this.axes,normalize:this.normalize},t=super.getConfig();return Object.assign(e,t),e}}xL.className="Dot",fe(xL);class TL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.stddev=e.stddev}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={stddev:this.stddev};return Object.assign(t,e),t}call(e,t){return Q(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=()=>zp(n.shape,0,this.stddev).add(n),i=Bh(s,()=>n,t.training||!1);return i})}}TL.className="GaussianNoise",fe(TL);class AL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.rate=e.rate}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return Q(()=>{this.invokeCallHook(e,t);const n=Xe(e);if(this.rate>0&&this.rate<1){const s=()=>{const i=Math.sqrt(this.rate/(1-this.rate));return n.mul(zp(n.shape,1,i))};return Bh(s,()=>n,t.training||!1)}return n})}}AL.className="GaussianDropout",fe(AL);class vL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.rate=e.rate,this.noiseShape=e.noiseShape}_getNoiseShape(e){return this.noiseShape||Xe(e).shape}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return Q(()=>{if(this.rate<1&&this.rate>0){const n=this._getNoiseShape(e),s=()=>{const i=Xe(e),o=1.6732632423543772,a=1.0507009873554805,c=-o*a;let h=Zi(ko(n),this.rate);h=Wh(h,"float32");const d=((1-this.rate)*(1+this.rate*c**2))**-.5,m=-d*c*this.rate,f=i.mul(h).add(h.add(-1).mul(c));return f.mul(d).add(m)};return Bh(s,()=>Xe(e),t.training||!1)}return e})}}vL.className="AlphaDropout",fe(vL);function tu(e,t,n,s,i,o=.001){let a;if(e.rank===2)a=qT(e,t,n,s,i,o);else if(e.rank===3)a=jT(e,t,n,s,i,o);else if(e.rank===4)a=KT(e,t,n,s,i,o);else throw new Pe(`batchNormalization is not implemented for array of rank ${e.rank} yet`);return a}function hV(e,t,n,s,i=.001){return Q(()=>{const o=fp(e,s),a=o.mean,c=o.variance,h=tu(e,a,c,n,t,i);return[h,a,c]})}function uV(e,t,n,s,i=.001){return Q(()=>{const o=fp(e,s),a=o.mean,c=o.variance,h=[];for(const L of ni(0,e.rank))s.indexOf(L)!==-1?h.push(1):h.push(e.shape[L]);const d=a.reshape(h),m=c.reshape(h),f=t==null?null:t.reshape(h),b=n==null?null:n.reshape(h),w=tu(e,d,m,b,f,i);return[w,a,c]})}function dV(e,t,n,s,i=.001){return ae(s.slice().sort(),ni(0,e.rank-1))?hV(e,t,n,s,i):uV(e,t,n,s,i)}class NL extends lt{constructor(e){e==null&&(e={}),super(e),this.supportsMasking=!0,this.axis=e.axis==null?-1:e.axis,this.momentum=e.momentum==null?.99:e.momentum,this.epsilon=e.epsilon==null?.001:e.epsilon,this.center=e.center==null?!0:e.center,this.scale=e.scale==null?!0:e.scale,this.betaInitializer=Pt(e.betaInitializer||"zeros"),this.gammaInitializer=Pt(e.gammaInitializer||"ones"),this.movingMeanInitializer=Pt(e.movingMeanInitializer||"zeros"),this.movingVarianceInitializer=Pt(e.movingVarianceInitializer||"ones"),this.betaConstraint=gn(e.betaConstraint),this.gammaConstraint=gn(e.gammaConstraint),this.betaRegularizer=zt(e.betaRegularizer),this.gammaRegularizer=zt(e.gammaRegularizer)}build(e){e=Nt(e);const t=this.axis>=0?this.axis:this.axis+e.length,n=e[t];if(n==null)throw new q(`Axis ${t} of input tensor should have a defined dimension but the layer received an input with shape ${JSON.stringify(e)}.`);this.inputSpec=[new Ln({ndim:e.length,axes:{[t]:n}})];const s=[n];this.scale&&(this.gamma=this.addWeight("gamma",s,null,this.gammaInitializer,this.gammaRegularizer,!0,this.gammaConstraint)),this.center&&(this.beta=this.addWeight("beta",s,null,this.betaInitializer,this.betaRegularizer,!0,this.betaConstraint)),this.movingMean=this.addWeight("moving_mean",s,null,this.movingMeanInitializer,null,!1),this.movingVariance=this.addWeight("moving_variance",s,null,this.movingVarianceInitializer,null,!1),this.built=!0}call(e,t){return Q(()=>{const n=t.training==null?!1:t.training,s=Xe(e),i=s.shape,o=i.length,a=ni(0,o),c=this.axis>=0?this.axis:this.axis+o;a.splice(c,1);const h=$o(1,o);h[c]=i[c];const d=a.slice();d.sort();const m=!ae(d,ni(0,o).slice(0,o-1)),f=()=>{if(m){const N=this.movingMean.read().reshape(h),O=this.movingVariance.read().reshape(h),E=this.center?this.beta.read().reshape(h):null,k=this.scale?this.gamma.read().reshape(h):null;return tu(s,N,O,E,k,this.epsilon)}else return tu(s,this.movingMean.read(),this.movingVariance.read(),this.beta==null?null:this.beta.read(),this.gamma==null?null:this.gamma.read(),this.epsilon)};if(!n)return f();const[b,w,L]=dV(s,this.gamma.read(),this.beta.read(),a,this.epsilon),x=(N,O,E)=>{Q(()=>{const k=1-E,F=N.read(),U=F.sub(O).mul(k);N.write(F.sub(U))})},v=()=>{x(this.movingMean,w,this.momentum),x(this.movingVariance,L,this.momentum)};return v(),b})}getConfig(){const e={axis:this.axis,momentum:this.momentum,epsilon:this.epsilon,center:this.center,scale:this.scale,betaInitializer:Kt(this.betaInitializer),gammaInitializer:Kt(this.gammaInitializer),movingMeanInitializer:Kt(this.movingMeanInitializer),movingVarianceInitializer:Kt(this.movingVarianceInitializer),betaRegularizer:Ct(this.betaRegularizer),gammaRegularizer:Ct(this.gammaRegularizer),betaConstraint:fn(this.betaConstraint),gammaConstraint:fn(this.gammaConstraint)},t=super.getConfig();return Object.assign(e,t),e}}NL.className="BatchNormalization",fe(NL);class CL extends lt{constructor(e){if(e==null&&(e={}),super(e),this.axis=e.axis==null?-1:e.axis,typeof this.axis=="number"){if(!Number.isInteger(this.axis))throw new Error(`Expected axis to be an integer, but received ${this.axis}`)}else if(Array.isArray(this.axis)){for(const t of this.axis)if(!Number.isInteger(t))throw new Error(`Expected axis to be an array of integers, but received ${JSON.stringify(this.axis)}`)}else throw new Error(`Expected axis to be an integer or an array of integers, but received ${JSON.stringify(this.axis)}`);this.epsilon=e.epsilon==null?.001:e.epsilon,this.center=e.center==null?!0:e.center,this.scale=e.scale==null?!0:e.scale,this.betaInitializer=Pt(e.betaInitializer||"zeros"),this.gammaInitializer=Pt(e.gammaInitializer||"ones"),this.betaRegularizer=zt(e.betaRegularizer),this.gammaRegularizer=zt(e.gammaRegularizer),this.supportsMasking=!0}build(e){e=Nt(e);const t=e.length;typeof this.axis=="number"&&(this.axis=[this.axis]);for(let i=0;i=t)throw new Error(`Invalid axis: ${i}`);if(this.axis.length!==Vr(this.axis).length)throw new Error(`Found duplicate axes in: ${this.axis}`);const n=this.axis.map(i=>e[i]),s=!0;this.scale?this.gamma=this.addWeight("gamma",n,"float32",this.gammaInitializer,this.gammaRegularizer,s):this.gamma=null,this.center?this.beta=this.addWeight("beta",n,"float32",this.betaInitializer,this.betaRegularizer,s):this.beta=null,this.built=!0}call(e,t){const n=Xe(e),s=n.shape,i=s.length;return Q(()=>{const o=!0;let{mean:a,variance:c}=fp(n,this.axis,o);const h=$o(1,i);for(const L of this.axis)h[L]=s[L];const d=L=>L!=null&&L.shape.length!==i&&this.axis!==[i-1]?L.reshape(h):L;let m=d(this.gamma.read()),f=d(this.beta.read());const b=[],w=[];for(let L=0;L{if(e.rank!==3)throw new q(`temporalPadding expects input tensor to be 3-D, but received a ${e.rank}-D tensor.`);if(t==null&&(t=[1,1]),t.length!==2)throw new q(`temporalPadding expects input padding pattern to be a length-2 array, but received a length-${t.length} array.`);const n=[[0,0],t,[0,0]];return vi(e,n)})}function pV(e,t,n){return Q(()=>{if(e.rank!==4)throw new q(`temporalPadding expects input tensor to be 4-D, but received a ${e.rank}-D tensor.`);if(t==null&&(t=[[1,1],[1,1]]),t.length!==2||t[0].length!==2||t[1].length!==2)throw new q("spatial2dPadding expects `padding` to be an Array of two Arrays, each of which is an Array of two integers.");if(n==null&&(n=ei()),n!=="channelsLast"&&n!=="channelsFirst")throw new q(`Unknown data format: ${n}. Supported data formats are 'channelsLast' and 'channelsFirst.`);let s;return n==="channelsFirst"?s=[[0,0],[0,0],t[0],t[1]]:s=[[0,0],t[0],t[1],[0,0]],vi(e,s)})}class RL extends lt{constructor(e){if(e==null&&(e={}),super(e),this.dataFormat=e.dataFormat==null?ei():e.dataFormat,e.padding==null)this.padding=[[1,1],[1,1]];else if(typeof e.padding=="number")this.padding=[[e.padding,e.padding],[e.padding,e.padding]];else{if(e.padding=e.padding,e.padding.length!==2)throw new q(`ZeroPadding2D expects padding to be a length-2 array, but received a length-${e.padding.length} array.`);let t,n;if(typeof e.padding[0]=="number")t=[e.padding[0],e.padding[0]],n=[e.padding[1],e.padding[1]];else{if(e.padding=e.padding,e.padding[0].length!==2)throw new q(`ZeroPadding2D expects height padding to be a length-2 array, but received a length-${e.padding[0].length} array.`);if(t=e.padding[0],e.padding[1].length!==2)throw new q(`ZeroPadding2D expects width padding to be a length-2 array, but received a length-${e.padding[1].length} array.`);n=e.padding[1]}this.padding=[t,n]}this.inputSpec=[new Ln({ndim:4})]}computeOutputShape(e){e=Nt(e);let t,n;return this.dataFormat==="channelsFirst"?(e[2]!=null&&e[2]>=0?t=e[2]+this.padding[0][0]+this.padding[0][1]:t=null,e[3]!=null&&e[3]>=0?n=e[3]+this.padding[1][0]+this.padding[1][1]:n=null,[e[0],e[1],t,n]):(e[1]!=null&&e[1]>=0?t=e[1]+this.padding[0][0]+this.padding[0][1]:t=null,e[2]!=null&&e[2]>=0?n=e[2]+this.padding[1][0]+this.padding[1][1]:n=null,[e[0],t,n,e[3]])}call(e,t){return Q(()=>pV(Xe(e),this.padding,this.dataFormat))}getConfig(){const e={padding:this.padding,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}RL.className="ZeroPadding2D",fe(RL);function bm(e,t,n,s,i,o){return Q(()=>{jt(i),uv(o),vs(s),n==null&&(n=[1,1]),s==null&&(s="valid"),i==null&&(i=ei()),o==null&&(o="max"),e=nL(e,i);let a;const c=s==="same"?"same":"valid";return o==="max"?a=fh(e,t,n,c):a=ah(e,t,n,c),i==="channelsFirst"&&(a=Ye(a,[0,3,1,2])),a})}function xN(e,t,n,s,i,o){return Q(()=>{jt(i),uv(o),vs(s),n==null&&(n=[1,1,1]),s==null&&(s="valid"),i==null&&(i=ei()),o==null&&(o="max"),e=gN(e,i);let a;const c=s==="same"?"same":"valid";return o==="max"?a=Ob(e,t,n,c):a=yb(e,t,n,c),i==="channelsFirst"&&(a=Ye(a,[0,4,1,2,3])),a})}class TN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=2),super(e),typeof e.poolSize=="number")this.poolSize=[e.poolSize];else if(Array.isArray(e.poolSize)&&e.poolSize.length===1&&typeof e.poolSize[0]=="number")this.poolSize=e.poolSize;else throw new q(`poolSize for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.poolSize)}`);if(wn(this.poolSize,"poolSize"),e.strides==null)this.strides=this.poolSize;else if(typeof e.strides=="number")this.strides=[e.strides];else if(Array.isArray(e.strides)&&e.strides.length===1&&typeof e.strides[0]=="number")this.strides=e.strides;else throw new q(`strides for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.strides)}`);wn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,vs(this.padding),this.inputSpec=[new Ln({ndim:3})]}computeOutputShape(e){e=Nt(e);const t=oi(e[1],this.poolSize[0],this.padding,this.strides[0]);return[e[0],t,e[2]]}call(e,t){return Q(()=>{this.invokeCallHook(e,t),e=$h(Xe(e),2);const n=this.poolingFunction(Xe(e),[this.poolSize[0],1],[this.strides[0],1],this.padding,"channelsLast");return Mr(n,[2])})}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides},t=super.getConfig();return Object.assign(e,t),e}}class OL extends TN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),bm(e,t,n,s,i,"max")}}OL.className="MaxPooling1D",fe(OL);class EL extends TN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),bm(e,t,n,s,i,"avg")}}EL.className="AveragePooling1D",fe(EL);class AN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=[2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize],e.strides==null)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(e.strides.length!==2)throw new q(`If the strides property of a 2D pooling layer is an Array, it is expected to have a length of 2, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides];wn(this.poolSize,"poolSize"),wn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,jt(this.dataFormat),vs(this.padding),this.inputSpec=[new Ln({ndim:4})]}computeOutputShape(e){e=Nt(e);let t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2];return t=oi(t,this.poolSize[0],this.padding,this.strides[0]),n=oi(n,this.poolSize[1],this.padding,this.strides[1]),this.dataFormat==="channelsFirst"?[e[0],e[1],t,n]:[e[0],t,n,e[3]]}call(e,t){return Q(()=>(this.invokeCallHook(e,t),this.poolingFunction(Xe(e),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class DL extends AN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),bm(e,t,n,s,i,"max")}}DL.className="MaxPooling2D",fe(DL);class kL extends AN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),bm(e,t,n,s,i,"avg")}}kL.className="AveragePooling2D",fe(kL);class vN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=[2,2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize,e.poolSize],e.strides==null)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(e.strides.length!==3)throw new q(`If the strides property of a 3D pooling layer is an Array, it is expected to have a length of 3, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides,e.strides];wn(this.poolSize,"poolSize"),wn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,jt(this.dataFormat),vs(this.padding),this.inputSpec=[new Ln({ndim:5})]}computeOutputShape(e){e=Nt(e);let t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2],s=this.dataFormat==="channelsFirst"?e[4]:e[3];return t=oi(t,this.poolSize[0],this.padding,this.strides[0]),n=oi(n,this.poolSize[1],this.padding,this.strides[1]),s=oi(s,this.poolSize[2],this.padding,this.strides[2]),this.dataFormat==="channelsFirst"?[e[0],e[1],t,n,s]:[e[0],t,n,s,e[4]]}call(e,t){return Q(()=>(this.invokeCallHook(e,t),this.poolingFunction(Xe(e),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class FL extends vN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),xN(e,t,n,s,i,"max")}}FL.className="MaxPooling3D",fe(FL);class _L extends vN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),xN(e,t,n,s,i,"avg")}}_L.className="AveragePooling3D",fe(_L);class NN extends lt{constructor(e){super(e);this.inputSpec=[new Ln({ndim:3})]}computeOutputShape(e){return[e[0],e[2]]}call(e,t){throw new Pe}}class WL extends NN{constructor(e){super(e||{})}call(e,t){return Q(()=>{const n=Xe(e);return qt(n,1)})}}WL.className="GlobalAveragePooling1D",fe(WL);class $L extends NN{constructor(e){super(e||{})}call(e,t){return Q(()=>{const n=Xe(e);return Qn(n,1)})}}$L.className="GlobalMaxPooling1D",fe($L);class CN extends lt{constructor(e){super(e);this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,jt(this.dataFormat),this.inputSpec=[new Ln({ndim:4})]}computeOutputShape(e){return e=e,this.dataFormat==="channelsLast"?[e[0],e[3]]:[e[0],e[1]]}call(e,t){throw new Pe}getConfig(){const e={dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class UL extends CN{call(e,t){return Q(()=>{const n=Xe(e);return this.dataFormat==="channelsLast"?qt(n,[1,2]):qt(n,[2,3])})}}UL.className="GlobalAveragePooling2D",fe(UL);class BL extends CN{call(e,t){return Q(()=>{const n=Xe(e);return this.dataFormat==="channelsLast"?Qn(n,[1,2]):Qn(n,[2,3])})}}BL.className="GlobalMaxPooling2D",fe(BL);class RN extends lt{constructor(e){super(e);this.layer=e.layer}build(e){this.built=!0}get trainable(){return this.layer!=null?this.layer.trainable:!1}set trainable(e){this.layer!=null&&(this.layer.trainable=e)}get trainableWeights(){return this.layer.trainableWeights}get nonTrainableWeights(){return this.layer.nonTrainableWeights}get updates(){return this.layer._updates}get losses(){return this.layer.losses}getWeights(){return this.layer.getWeights()}setWeights(e){this.layer.setWeights(e)}getConfig(){const e={layer:{className:this.layer.getClassName(),config:this.layer.getConfig()}},t=super.getConfig();return Object.assign(e,t),e}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.layer!=null&&this.layer.setFastWeightInitDuringBuild(e)}static fromConfig(e,t,n={}){const s=t.layer,i=ri(s,n);delete t.layer;const o={layer:i};return Object.assign(o,t),new e(o)}}class ML extends RN{constructor(e){super(e);this.supportsMasking=!0}build(e){if(e=Nt(e),e.length<3)throw new q(`TimeDistributed layer expects an input shape >= 3D, but received input shape ${JSON.stringify(e)}`);this.inputSpec=[{shape:e}];const t=[e[0]].concat(e.slice(2));this.layer.built||(this.layer.build(t),this.layer.built=!0),super.build(e)}computeOutputShape(e){e=Nt(e);const t=[e[0]].concat(e.slice(2)),n=this.layer.computeOutputShape(t),s=e[1];return[n[0],s].concat(n.slice(1))}call(e,t){return Q(()=>{e=Xe(e);const n=(o,a)=>{const c=Xe(this.layer.call(o,t));return[c,[]]},s=SN(n,e,[],!1,null,null,!1,!0),i=s[1];return i})}}ML.className="TimeDistributed",fe(ML);function mV(e){Qa(xz,"BidirectionalMergeMode",e)}const fV="concat";class PL extends RN{constructor(e){super(e);const t=e.layer.getConfig(),n={};n.className=e.layer.getClassName(),n.config=t,this.forwardLayer=ri(n),t.goBackwards=!(t.goBackwards===!0);const s={};if(s.className=e.layer.getClassName(),s.config=t,this.backwardLayer=ri(s),this.forwardLayer.name="forward_"+this.forwardLayer.name,this.backwardLayer.name="backward_"+this.backwardLayer.name,this.mergeMode=e.mergeMode===void 0?fV:e.mergeMode,mV(this.mergeMode),e.weights)throw new Pe("weights support is not implemented for Bidirectional layer yet.");this._stateful=e.layer.stateful,this.returnSequences=e.layer.returnSequences,this.returnState=e.layer.returnState,this.supportsMasking=!0,this._trainable=!0,this.inputSpec=e.layer.inputSpec,this.numConstants=null}get trainable(){return this._trainable}set trainable(e){this._trainable=e,this.forwardLayer!=null&&(this.forwardLayer.trainable=e),this.backwardLayer!=null&&(this.backwardLayer.trainable=e)}getWeights(){return this.forwardLayer.getWeights().concat(this.backwardLayer.getWeights())}setWeights(e){const t=e.length,n=Math.floor(t/2);this.forwardLayer.setWeights(e.slice(0,n)),this.backwardLayer.setWeights(e.slice(n))}computeOutputShape(e){let t=this.forwardLayer.computeOutputShape(e);Array.isArray(t)&&Array.isArray(t[0])||(t=[t]),t=t;let n,s,i;return this.returnState&&(i=t.slice(1)),n=t[0],n=n,this.mergeMode==="concat"?(n[n.length-1]*=2,s=[n]):this.mergeMode==null?s=[n,n.slice()]:s=[n],this.returnState?this.mergeMode==null?s.concat(i).concat(i.slice()):[n].concat(i).concat(i.slice()):ts(s)}apply(e,t){let n=t==null?null:t.initialState,s=t==null?null:t.constants;t==null&&(t={});const i=LN(e,n,s,this.numConstants);if(e=i.inputs,n=i.initialState,s=i.constants,Array.isArray(e)&&(n=e.slice(1),e=e[0]),(n==null||n.length===0)&&s==null)return super.apply(e,t);const o=[],a=[];if(n!=null){const h=n.length;if(h%2>0)throw new q("When passing `initialState` to a Bidrectional RNN, the state should be an Array containing the states of the underlying RNNs.");t.initialState=n,o.push(...n);const d=n.map(m=>new Ln({shape:m.shape}));this.forwardLayer.stateSpec=d.slice(0,h/2),this.backwardLayer.stateSpec=d.slice(h/2),a.push(...d)}if(s!=null)throw new Pe("Support for constants in Bidirectional layers is not implemented yet.");const c=o[0]instanceof ii;for(const h of o)if(h instanceof ii!==c)throw new q("The initial state of a Bidirectional layer cannot be specified as a mix of symbolic and non-symbolic tensors");if(c){const h=[e].concat(o),d=this.inputSpec.concat(a),m=this.inputSpec;this.inputSpec=d;const f=super.apply(h,t);return this.inputSpec=m,f}else return super.apply(e,t)}call(e,t){return Q(()=>{const n=t.initialState;let s,i;if(n==null)s=this.forwardLayer.call(e,t),i=this.backwardLayer.call(e,t);else{const c=n.slice(0,n.length/2),h=n.slice(n.length/2);s=this.forwardLayer.call(e,Object.assign(t,{initialState:c})),i=this.backwardLayer.call(e,Object.assign(t,{initialState:h}))}let o;this.returnState&&(Array.isArray(s)&&(o=s.slice(1).concat(i.slice(1))),s=s[0],i=i[0]),this.returnSequences&&(i=Ts(i,1));let a;return this.mergeMode==="concat"?a=Sw([s,i]):this.mergeMode==="sum"?a=be(s,i):this.mergeMode==="ave"?a=X(.5,be(s,i)):this.mergeMode==="mul"?a=X(s,i):this.mergeMode==null&&(a=[s,i]),this.returnState?this.mergeMode==null?a.concat(o):[a].concat(o):a})}resetStates(e){this.forwardLayer.resetStates(),this.backwardLayer.resetStates()}build(e){Bo(this.forwardLayer.name,()=>{this.forwardLayer.build(e)}),Bo(this.backwardLayer.name,()=>{this.backwardLayer.build(e)}),this.built=!0}computeMask(e,t){Array.isArray(t)&&(t=t[0]);let n;if(this.returnSequences?this.mergeMode==null?n=[t,t]:n=t:this.mergeMode==null?n=[null,null]:n=null,this.returnState){const s=this.forwardLayer.states,i=s.map(o=>null);return Array.isArray(n)?n.concat(i).concat(i):[n].concat(i).concat(i)}else return n}get trainableWeights(){return this.forwardLayer.trainableWeights.concat(this.backwardLayer.trainableWeights)}get nonTrainableWeights(){return this.forwardLayer.nonTrainableWeights.concat(this.backwardLayer.nonTrainableWeights)}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.forwardLayer!=null&&this.forwardLayer.setFastWeightInitDuringBuild(e),this.backwardLayer!=null&&this.backwardLayer.setFastWeightInitDuringBuild(e)}getConfig(){const e={mergeMode:this.mergeMode},t=super.getConfig();return Object.assign(e,t),e}static fromConfig(e,t){const n=ri(t.layer);if(delete t.layer,t.numConstants!=null)throw new Pe("Deserialization of a Bidirectional layer with numConstants present is not supported yet.");const s=t;return s.layer=n,new e(s)}}PL.className="Bidirectional",fe(PL);function gV(e){return new nc(e)}function yV(e){return new Qw(e)}function bV(e){return new Xw(e)}function wV(e){return new Jw(e)}function LV(e){return new Zw(e)}function SV(e){return new tL(e)}function IV(e){return new eL(e)}function xV(e){return new dm(e)}function TV(e){return new Hh(e)}function AV(e){return new rL(e)}function vV(e){return new um(e)}function NV(e){return new oL(e)}function CV(e){return new aL(e)}function RV(e){return new cL(e)}function OV(e){return new lL(e)}function EV(e){return new yL(e)}function DV(e){return new fL(e)}function kV(e){return new ym(e)}function FV(e){return new mL(e)}function _V(e){return new gL(e)}function WV(e){return new bL(e)}function $V(e){return new wL(e)}function UV(e){return new LL(e)}function BV(e){return new IL(e)}function MV(e){return new jh(e)}function PV(e){return new Xh(e)}function zV(e){return new Qh(e)}function VV(e){return new Jh(e)}function GV(e){return new Zh(e)}function YV(e){return new Kh(e)}function HV(e){return new xL(e)}function qV(e){return new NL(e)}function jV(e){return new CL(e)}function KV(e){return new RL(e)}function zL(e){return new EL(e)}function XV(e){return zL(e)}function JV(e){return zL(e)}function VL(e){return new kL(e)}function ZV(e){return VL(e)}function QV(e){return VL(e)}function GL(e){return new _L(e)}function eG(e){return GL(e)}function tG(e){return GL(e)}function nG(e){return new WL(e)}function sG(e){return new UL(e)}function ON(e){return new $L(e)}function EN(e){return new BL(e)}function DN(e){return new OL(e)}function kN(e){return new DL(e)}function iG(e){return new FL(e)}function rG(e){return new uL(e)}function oG(e){return new mm(e)}function aG(e){return new dL(e)}function cG(e){return new qh(e)}function lG(e){return new hL(e)}function hG(e){return new pm(e)}function uG(e){return new pL(e)}function dG(e){return new gm(e)}function pG(e){return new Ei(e)}function mG(e){return new fm(e)}function fG(e){return new PL(e)}function gG(e){return new ML(e)}const yG=ON,bG=EN,wG=DN,LG=kN;function SG(e){return new TL(e)}function IG(e){return new AL(e)}function xG(e){return new vL(e)}function TG(e){return new SL(e)}var AG=Object.freeze({__proto__:null,inputLayer:gV,elu:yV,reLU:bV,leakyReLU:wV,prelu:LV,softmax:SV,thresholdedReLU:IV,conv1d:xV,conv2d:TV,conv2dTranspose:AV,conv3d:vV,separableConv2d:NV,cropping2D:CV,upSampling2d:RV,depthwiseConv2d:OV,activation:EV,dense:DV,dropout:kV,spatialDropout1d:FV,flatten:_V,repeatVector:WV,reshape:$V,permute:UV,embedding:BV,add:MV,average:PV,concatenate:zV,maximum:VV,minimum:GV,multiply:YV,dot:HV,batchNormalization:qV,layerNormalization:jV,zeroPadding2d:KV,averagePooling1d:zL,avgPool1d:XV,avgPooling1d:JV,averagePooling2d:VL,avgPool2d:ZV,avgPooling2d:QV,averagePooling3d:GL,avgPool3d:eG,avgPooling3d:tG,globalAveragePooling1d:nG,globalAveragePooling2d:sG,globalMaxPooling1d:ON,globalMaxPooling2d:EN,maxPooling1d:DN,maxPooling2d:kN,maxPooling3d:iG,gru:rG,gruCell:oG,lstm:aG,lstmCell:cG,simpleRNN:lG,simpleRNNCell:hG,convLstm2d:uG,convLstm2dCell:dG,rnn:pG,stackedRNNCells:mG,bidirectional:fG,timeDistributed:gG,globalMaxPool1d:yG,globalMaxPool2d:bG,maxPool1d:wG,maxPool2d:LG,Layer:lt,RNN:Ei,RNNCell:ac,input:eN,gaussianNoise:SG,gaussianDropout:IG,alphaDropout:xG,masking:TG});function vG(e,t){return $w(e,t)}function NG(e,t){return _v(e,t)}function CG(e,t){return Wv(e,t)}function RG(e,t){return Uw(e,t)}function OG(e,t){return Bw(e,t)}function EG(e,t){return Fv(e,t)}function DG(e,t){return b3(e,t)}function kG(e,t){return im(e,t)}function FG(e,t){return ic(e,t)}function _G(e,t){return qr(e,t)}function WG(e,t){return qr(e,t)}function $G(e,t){return qr(e,t)}function UG(e,t){return ir(e,t)}function BG(e,t){return ir(e,t)}function MG(e,t){return ir(e,t)}var PG=Object.freeze({__proto__:null,binaryAccuracy:vG,binaryCrossentropy:NG,sparseCategoricalAccuracy:CG,categoricalAccuracy:RG,categoricalCrossentropy:OG,precision:EG,recall:DG,cosineProximity:kG,meanAbsoluteError:FG,meanAbsolutePercentageError:_G,MAPE:WG,mape:$G,meanSquaredError:UG,MSE:BG,mse:MG});var zG=Object.freeze({__proto__:null,modelFromJSON:J3});function VG(e){return new Gh(e)}function GG(e){return rV(e)}function YG(e){return oV(e)}var HG=Object.freeze({__proto__:null,l1l2:VG,l1:GG,l2:YG});class FN extends sc{constructor(){super(...arguments);this.model=null}setModel(e){if(!(e instanceof rr))throw new Error("model must be a LayersModel, not some other Container");this.model=e}}function wm(e,t){return et}class WN extends FN{constructor(e){super();if(e==null&&(e={}),e.restoreBestWeights)throw new Pe("restoreBestWeights = True is not implemented in EarlyStopping yet.");this.monitor=e.monitor||"val_loss",this.minDelta=Math.abs(e.minDelta||0),this.patience=e.patience||0,this.verbose=e.verbose||0,this.mode=e.mode||"auto",this.baseline=e.baseline,["auto","min","max"].indexOf(this.mode)===-1&&(console.warn(`EarlyStopping mode '${this.mode}' is invalid. Falling back to mode 'auto'.`),this.mode="auto"),this.mode==="min"?this.monitorFunc=wm:this.mode==="max"?this.monitorFunc=_N:this.monitor.indexOf("acc")!==-1?this.monitorFunc=_N:this.monitorFunc=wm,this.monitorFunc===wm&&(this.minDelta*=-1)}async onTrainBegin(e){this.wait=0,this.stoppedEpoch=0,this.baseline!=null?this.best=this.baseline:this.best=this.monitorFunc===wm?Infinity:-Infinity}async onEpochEnd(e,t){await Hr(t);const n=this.getMonitorValue(t);if(n==null)return;this.monitorFunc(n-this.minDelta,this.best)?(this.best=n,this.wait=0):(this.wait++,this.wait>=this.patience&&(this.stoppedEpoch=e,this.model.stopTraining=!0))}async onTrainEnd(e){this.stoppedEpoch>0&&this.verbose&&console.log(`Epoch ${this.stoppedEpoch}: early stopping.`)}getMonitorValue(e){e==null&&(e={});const t=e[this.monitor];return t==null&&console.warn(`Metric for EarlyStopping ${this.monitor} is not available. Available metrics are: ${Object.keys(e)}`),t}}function qG(e){return new WN(e)}const jG={earlyStopping:qG};var ai;(function(e){e[e.DT_INVALID=0]="DT_INVALID",e[e.DT_FLOAT=1]="DT_FLOAT",e[e.DT_DOUBLE=2]="DT_DOUBLE",e[e.DT_INT32=3]="DT_INT32",e[e.DT_UINT8=4]="DT_UINT8",e[e.DT_INT16=5]="DT_INT16",e[e.DT_INT8=6]="DT_INT8",e[e.DT_STRING=7]="DT_STRING",e[e.DT_COMPLEX64=8]="DT_COMPLEX64",e[e.DT_INT64=9]="DT_INT64",e[e.DT_BOOL=10]="DT_BOOL",e[e.DT_QINT8=11]="DT_QINT8",e[e.DT_QUINT8=12]="DT_QUINT8",e[e.DT_QINT32=13]="DT_QINT32",e[e.DT_BFLOAT16=14]="DT_BFLOAT16",e[e.DT_FLOAT_REF=101]="DT_FLOAT_REF",e[e.DT_DOUBLE_REF=102]="DT_DOUBLE_REF",e[e.DT_INT32_REF=103]="DT_INT32_REF",e[e.DT_UINT8_REF=104]="DT_UINT8_REF",e[e.DT_INT16_REF=105]="DT_INT16_REF",e[e.DT_INT8_REF=106]="DT_INT8_REF",e[e.DT_STRING_REF=107]="DT_STRING_REF",e[e.DT_COMPLEX64_REF=108]="DT_COMPLEX64_REF",e[e.DT_INT64_REF=109]="DT_INT64_REF",e[e.DT_BOOL_REF=110]="DT_BOOL_REF",e[e.DT_QINT8_REF=111]="DT_QINT8_REF",e[e.DT_QUINT8_REF=112]="DT_QUINT8_REF",e[e.DT_QINT32_REF=113]="DT_QINT32_REF",e[e.DT_BFLOAT16_REF=114]="DT_BFLOAT16_REF"})(ai||(ai={}));var $N;(function(e){let t;(function(n){n[n.LEGACY=0]="LEGACY",n[n.V1=1]="V1",n[n.V2=2]="V2"})(t=e.CheckpointFormatVersion||(e.CheckpointFormatVersion={}))})($N||($N={}));const YL={};function KG(e,t){const n={tfOpName:e,category:"custom",inputs:[],attrs:[],customExecutor:t};YL[e]=n}function UN(e){return YL[e]}function XG(e){delete YL[e]}function R(e,t,n,s,i){const o=t.inputParams[e];if(o&&o.inputIndexStart!==void 0){const c=o.inputIndexStart,h=o.inputIndexEnd===0?void 0:o.inputIndexEnd===void 0?c+1:o.inputIndexEnd;if(o.type==="tensor")return ss(t.inputNames[o.inputIndexStart],n,s,i);if(o.type==="tensors"){const f=t.inputNames.slice(c,h);return f.map(b=>ss(b,n,s,i))}const d=ss(t.inputNames.slice(c)[0],n,s,i),m=d.dataSync();return o.type==="number"?m[0]:Ls(d.shape,m)}const a=t.attrParams[e];return a&&a.value}function ss(e,t,n,s){const[i,o]=ds(e);if(s!=null){const c=s.getHashTableHandleByName(i);if(c!=null)return c}const a=n.currentContextIds.find(c=>!!t[Lm(i,c)]);return a!==void 0?t[Lm(i,a)][o]:void 0}function JG(e,t,n){return t[Lm(e,n.currentContextId)]}function or(e,t){const[n,s]=ds(e);return[Lm(n,t&&t.currentContextId),s]}function Lm(e,t){return t?`${e}-${t}`:e}function ds(e){const t=e.split(":");if(t.length===1)return[e,0];const n=t[0];return[n,Number(t[t.length-1])]}function Cte(e,t){const n=[];for(let s=0;sn.json));this.opMappers=t.reduce((n,s)=>(n[s.tfOpName]=s,n),{})}transformGraph(e,t={}){const n=e.node,s=[],i=[],o=[],a=n.reduce((L,x)=>(L[x.name]=this.mapNode(x),x.op.startsWith("Placeholder")?s.push(L[x.name]):x.op==="Const"?i.push(L[x.name]):(x.input==null||x.input.length===0)&&o.push(L[x.name]),L),{});let c=[];const h=[];let d={},m={};t!=null&&(d=this.mapSignatureEntries(t.inputs),m=this.mapSignatureEntries(t.outputs));const f=Object.keys(a);f.forEach(L=>{const x=a[L];x.inputNames.forEach(v=>{const[N]=or(v);x.inputs.push(a[N]),a[N].children.push(x)})}),Object.keys(m).length===0?f.forEach(L=>{const x=a[L];x.children.length===0&&h.push(x)}):Object.keys(m).forEach(L=>{const[x]=or(L),v=a[x];v!=null&&(v.signatureKey=m[L],h.push(v))}),Object.keys(d).length>0?Object.keys(d).forEach(L=>{const[x]=or(L),v=a[x];v&&(v.signatureKey=d[L],c.push(v))}):c=s;let b={};e.library!=null&&e.library.function!=null&&(b=e.library.function.reduce((L,x)=>(L[x.signature.name]=this.mapFunction(x),L),{}));const w={nodes:a,inputs:c,outputs:h,weights:i,placeholders:s,signature:t,functions:b};return o.length>0&&(w.initNodes=o),w}mapSignatureEntries(e){return Object.keys(e||{}).reduce((t,n)=>(t[e[n].name]=n,t),{})}mapNode(e){const t=UN(e.op)||this.opMappers[e.op]||{};e.attr==null&&(e.attr={});const n={name:e.name,op:e.op,category:t.category,inputNames:(e.input||[]).map(s=>s.startsWith("^")?s.substr(1):s),inputs:[],children:[],inputParams:{},attrParams:{},rawAttrs:e.attr};return t.inputs!=null&&(n.inputParams=t.inputs.reduce((s,i)=>(s[i.name]={type:i.type,inputIndexStart:i.start,inputIndexEnd:i.end},s),{})),t.attrs!=null&&(n.attrParams=t.attrs.reduce((s,i)=>{const o=i.type;let a;switch(i.type){case"string":a=HL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=HL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"string[]":a=eS(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=eS(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"number":a=jL(e.attr,i.tfName,i.defaultValue||0),a===void 0&&!!i.tfDeprecatedName&&(a=jL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"number[]":a=QL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=QL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"bool":a=qL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=qL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"bool[]":a=nS(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=nS(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"shape":a=ZL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=ZL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"shape[]":a=tS(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=tS(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"dtype":a=XL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=XL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"dtype[]":a=JL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=JL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"func":a=PN(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=PN(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"tensor":case"tensors":break;default:throw new Error(`Unsupported param type: ${i.type} for op: ${e.op}`)}return s[i.name]={value:a,type:o},s},{})),n}mapFunction(e){const t=e.nodeDef,n=[],s=[];let i={};t!=null&&(i=t.reduce((m,f)=>(m[f.name]=this.mapNode(f),f.op==="Const"&&s.push(m[f.name]),m),{}));const o=[],a=[];e.signature.inputArg.forEach(m=>{const[f]=or(m.name),b={name:f,op:"Placeholder",inputs:[],inputNames:[],category:"graph",inputParams:{},attrParams:{dtype:{value:KL(m.type),type:"dtype"}},children:[]};b.signatureKey=m.name,o.push(b),i[f]=b});const c=Object.keys(i);c.forEach(m=>{const f=i[m];f.inputNames.forEach(b=>{const[w]=or(b);f.inputs.push(i[w]),i[w].children.push(f)})});const h=e.ret;e.signature.outputArg.forEach(m=>{const[f,b]=or(h[m.name]),w=i[f];w!=null&&(w.defaultOutput=b,a.push(w))});const d=this.mapArgsToSignature(e);return{nodes:i,inputs:o,outputs:a,weights:s,placeholders:n,signature:d}}mapArgsToSignature(e){return{methodName:e.signature.name,inputs:e.signature.inputArg.reduce((t,n)=>(t[n.name]=this.mapArgToTensorInfo(n),t),{}),outputs:e.signature.outputArg.reduce((t,n)=>(t[n.name]=this.mapArgToTensorInfo(n,e.ret),t),{})}}mapArgToTensorInfo(e,t){let n=e.name;return t!=null&&(n=t[n]),{name:n,dtype:e.type}}}function DY(e){const t=oe().global;if(typeof t.atob!="undefined")return t.atob(e);if(typeof Buffer!="undefined")return new Buffer(e,"base64").toString();throw new Error("Unable to decode base64 in this environment. Missing built-in atob() or Buffer()")}function MN(e,t){const n=Array.isArray(e)?String.fromCharCode.apply(null,e):DY(e);return t?n:n.toLowerCase()}function HL(e,t,n,s=!1){const i=e[t];return i!=null?MN(i.s,s):n}function qL(e,t,n){const s=e[t];return s?s.b:n}function jL(e,t,n){const s=e[t]||{},i=s.i!=null?s.i:s.f!=null?s.f:n;return typeof i=="number"?i:parseInt(i,10)}function KL(e){typeof e=="string"&&(e=ai[e]);switch(e){case ai.DT_FLOAT:return"float32";case ai.DT_INT32:case ai.DT_INT64:case ai.DT_INT8:case ai.DT_UINT8:return"int32";case ai.DT_BOOL:return"bool";case ai.DT_DOUBLE:return"float32";case ai.DT_STRING:return"string";default:return null}}function PN(e,t,n){const s=e[t];return s&&s.func?s.func.name:n}function XL(e,t,n){const s=e[t];return s&&s.type?KL(s.type):n}function JL(e,t,n){const s=e[t];return s&&s.list&&s.list.type?s.list.type.map(i=>KL(i)):n}function zN(e){return e.unknownRank?void 0:e.dim!=null?e.dim.map(t=>typeof t.size=="number"?t.size:parseInt(t.size,10)):[]}function ZL(e,t,n){const s=e[t];return s&&s.shape?zN(s.shape):n}function QL(e,t,n){const s=e[t];return s?((s.list.f&&s.list.f.length?s.list.f:s.list.i)||[]).map(i=>typeof i=="number"?i:parseInt(i,10)):n}function eS(e,t,n,s=!1){const i=e[t];return i&&i.list&&i.list.s?i.list.s.map(o=>MN(o,s)):n}function tS(e,t,n){const s=e[t];return s&&s.list&&s.list.shape?s.list.shape.map(i=>zN(i)):n}function nS(e,t,n){const s=e[t];return s&&s.list&&s.list.b?s.list.b:n}class kY{constructor(e,t,n){this.node=e,this.tensorMap=t,this.context=n,this.inputs=[],this.attrs={},this.inputs=e.inputNames.map(s=>this.getInput(s)),e.rawAttrs!=null&&(this.attrs=Object.keys(e.rawAttrs).reduce((s,i)=>(s[i]=this.getAttr(i),s),{}))}getInput(e){return ss(e,this.tensorMap,this.context)}getAttr(e,t){const n=this.node.rawAttrs[e];if(n.tensor!=null)return ss(e,this.tensorMap,this.context);if(n.i!=null||n.f!=null)return jL(this.node.rawAttrs,e,t);if(n.s!=null)return HL(this.node.rawAttrs,e,t);if(n.b!=null)return qL(this.node.rawAttrs,e,t);if(n.shape!=null)return ZL(this.node.rawAttrs,e,t);if(n.type!=null)return XL(this.node.rawAttrs,e,t);if(n.list!=null){if(n.list.i!=null||n.list.f!=null)return QL(this.node.rawAttrs,e,t);if(n.list.s!=null)return eS(this.node.rawAttrs,e,t);if(n.list.shape!=null)return tS(this.node.rawAttrs,e,t);if(n.list.b!=null)return nS(this.node.rawAttrs,e,t);if(n.list.type!=null)return JL(this.node.rawAttrs,e,t)}return t}}const FY=(e,t,n)=>{switch(e.op){case"BiasAdd":case"AddV2":case"Add":return[be(R("a",e,t,n),R("b",e,t,n))];case"AddN":return[YT(R("tensors",e,t,n))];case"FloorMod":case"Mod":return[mp(R("a",e,t,n),R("b",e,t,n))];case"Mul":return[X(R("a",e,t,n),R("b",e,t,n))];case"RealDiv":case"Div":return[We(R("a",e,t,n),R("b",e,t,n))];case"DivNoNan":return[xb(R("a",e,t,n),R("b",e,t,n))];case"FloorDiv":return[Zd(R("a",e,t,n),R("b",e,t,n))];case"Sub":return[Re(R("a",e,t,n),R("b",e,t,n))];case"Minimum":return[Oo(R("a",e,t,n),R("b",e,t,n))];case"Maximum":return[$s(R("a",e,t,n),R("b",e,t,n))];case"Pow":return[Zs(R("a",e,t,n),R("b",e,t,n))];case"SquaredDifference":return[Ih(R("a",e,t,n),R("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Rte="arithmetic";const _Y=(e,t,n)=>{switch(e.op){case"Abs":case"ComplexAbs":return[dn(R("x",e,t,n))];case"Acos":return[ob(R("x",e,t,n))];case"Acosh":return[ab(R("x",e,t,n))];case"Asin":return[hb(R("x",e,t,n))];case"Asinh":return[ub(R("x",e,t,n))];case"Atan":return[db(R("x",e,t,n))];case"Atan2":return[pb(R("x",e,t,n),R("y",e,t,n))];case"Atanh":return[mb(R("x",e,t,n))];case"Ceil":return[bb(R("x",e,t,n))];case"Complex":return[ji(R("real",e,t,n),R("imag",e,t,n))];case"Cos":return[hh(R("x",e,t,n))];case"Cosh":return[op(R("x",e,t,n))];case"Elu":return[Ua(R("x",e,t,n))];case"Erf":return[Tb(R("x",e,t,n))];case"Exp":return[Is(R("x",e,t,n))];case"Expm1":return[Ab(R("x",e,t,n))];case"Floor":return[Ma(R("x",e,t,n))];case"Log":return[cs(R("x",e,t,n))];case"Log1p":return[hp(R("x",e,t,n))];case"Imag":return[dh(R("x",e,t,n))];case"Neg":return[Ht(R("x",e,t,n))];case"Reciprocal":return[_b(R("x",e,t,n))];case"Real":return[Ga(R("x",e,t,n))];case"Relu":return[Ni(R("x",e,t,n))];case"Round":return[$b(R("x",e,t,n))];case"Selu":return[bp(R("x",e,t,n))];case"Sigmoid":return[Ti(R("x",e,t,n))];case"Sin":return[wp(R("x",e,t,n))];case"Sign":return[Bb(R("x",e,t,n))];case"Sinh":return[Lp(R("x",e,t,n))];case"Softplus":return[za(R("x",e,t,n))];case"Sqrt":return[Nn(R("x",e,t,n))];case"Square":return[At(R("x",e,t,n))];case"Tanh":return[$a(R("x",e,t,n))];case"Tan":return[zb(R("x",e,t,n))];case"Relu6":case"ClipByValue":return[Jn(R("x",e,t,n),R("clipValueMin",e,t,n),R("clipValueMax",e,t,n))];case"Rsqrt":return[yp(ss(e.inputNames[0],t,n))];case"Prod":return[gp(R("x",e,t,n),R("axes",e,t,n))];case"LeakyRelu":return[lp(R("x",e,t,n),R("alpha",e,t,n))];case"Prelu":return[yh(R("x",e,t,n),R("alpha",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Ote="basic_math";function zs(e,t,n=""){A(WY(e,t),()=>n+` Shapes ${e} and ${t} must match`)}function WY(e,t){if(e.length!==t.length)return!1;for(let n=0;n{(e==null||!e.has(t.tensor.id))&&t.tensor.dispose()}),this.tensors=[],this.closed_=!0,this.idTensor.dispose()}size(){return this.tensors.length}read(e){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||e>=this.size())throw new Error(`Tried to read from index ${e}, but array size is: ${this.size()}`);const t=this.tensors[e];if(t.cleared)throw new Error(`TensorArray ${this.name}: Could not read index ${e} twice because it was cleared after a previous read (perhaps try setting clear_after_read = false?).`);return this.clearAfterRead&&(t.cleared=!0),t.read=!0,t.tensor}readMany(e){return e.map(t=>this.read(t))}write(e,t){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||!this.dynamicSize&&e>=this.maxSize)throw new Error(`Tried to write to index ${e}, but array is not resizeable and size is: ${this.maxSize}`);const n=this.tensors[e]||{};if(t.dtype!==this.dtype)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, + because the value dtype is ${t.dtype}, but TensorArray dtype is ${this.dtype}.`);if(this.size()===0&&(this.elementShape==null||this.elementShape.length===0)&&(this.elementShape=t.shape),zs(this.elementShape,t.shape,`TensorArray ${this.name}: Could not write to TensorArray index ${e}.`),n.read)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, because it has already been read.`);if(n.written)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, because it has already been written.`);n.tensor=t,bn(t),n.written=!0,this.tensors[e]=n}writeMany(e,t){if(e.length!==t.length)throw new Error(`TensorArray ${this.name}: could not write multiple tensors,because the index size: ${e.length} is not the same as tensors size: ${t.length}.`);e.forEach((n,s)=>this.write(n,t[s]))}gather(e,t){if(!!t&&t!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but gather requested dtype ${t}`);if(e)e=e.slice(0,this.size());else{e=[];for(let s=0;s=this.maxSize)throw new Error(`Max index must be < array size (${n} vs. ${this.maxSize})`);this.writeMany(e,Qs(t,0))}split(e,t){if(t.dtype!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${t.dtype}`);let n=0;const s=e.map(c=>(n+=c,n));if(n!==t.shape[0])throw new Error(`Expected sum of lengths to be equal to tensor.shape[0], but sum of lengths is - ${n}, and tensor's shape is: ${t.shape}`);if(!this.dynamicSize&&e.length!==this.maxSize)throw new Error(`TensorArray's size is not equal to the size of lengths (${this.maxSize} vs. ${e.length}), and the TensorArray is not marked as dynamically resizeable`);const i=n===0?0:t.size/n,o=[];ee(()=>{t=K(t,[1,n,i]);for(let c=0;c{if(n!==i.dtype)throw new Error(`Invalid data types; op elements ${n}, but list elements ${i.dtype}`);Ms(t,i.shape,"TensorList shape mismatch: "),Nn(i)}),this.idTensor=Ne(0),this.maxNumElements=s,Nn(this.idTensor)}get id(){return this.idTensor.id}copy(){return new Ph([...this.tensors],this.elementShape,this.elementDtype)}clearAndClose(e){this.tensors.forEach(t=>{(e==null||!e.has(t.id))&&t.dispose()}),this.tensors.length=0,this.idTensor.dispose()}size(){return this.tensors.length}stack(e,t,n=-1){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);if(n!==-1&&this.tensors.length!==n)throw new Error(`Operation expected a list with ${n} elements but got a list with ${this.tensors.length} elements.`);return Ms(e,this.elementShape,"TensorList shape mismatch: "),ee(()=>{const s=this.tensors.map(i=>K(i,e));return is(s,0)})}popBack(e,t){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);if(this.size()===0)throw new Error("Trying to pop from an empty list.");const n=this.tensors.pop();return Ms(n.shape,e,"TensorList shape mismatch: "),K(n,e)}pushBack(e){if(e.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${this.elementDtype}`);if(Ms(e.shape,this.elementShape,"TensorList shape mismatch: "),this.maxNumElements===this.size())throw new Error("Trying to push element into a full list.");Nn(e),this.tensors.push(e)}resize(e){if(e<0)throw new Error(`TensorListResize expects size to be non-negative. Got: ${e}`);if(this.maxNumElements!==-1&&e>this.maxNumElements)throw new Error(`TensorListResize input size ${e} is greater maxNumElement ${this.maxNumElements}.`);this.tensors.length=e}getItem(e,t,n){if(n!==this.elementDtype)throw new Error(`Invalid data types; op elements ${n}, but list elements ${this.elementDtype}`);if(e<0||e>this.tensors.length)throw new Error(`Trying to access element ${e} in a list with ${this.tensors.length} elements.`);if(this.tensors[e]==null)throw new Error(`element at index ${e} is null.`);return Ms(this.tensors[e].shape,t,"TensorList shape mismatch: "),this.tensors[e]}setItem(e,t){if(t.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t.dtype}, but list elements ${this.elementDtype}`);if(e<0||this.maxNumElements!==-1&&e>=this.maxNumElements)throw new Error(`Trying to set element ${e} in a list with max ${this.maxNumElements} elements.`);Ms(this.elementShape,t.shape,"TensorList shape mismatch: "),Nn(t),this.tensors[e]=t}gather(e,t,n){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);return Ms(this.elementShape,n,"TensorList shape mismatch: "),e=e.slice(0,this.size()),e.length===0?en([],[0].concat(this.elementShape)):ee(()=>{const s=e.map(i=>K(this.tensors[i],n));return is(s,0)})}concat(e,t){if(!!e&&e!==this.elementDtype)throw new Error(`TensorList dtype is ${this.elementDtype} but concat requested dtype ${e}`);return Ms(this.elementShape,t,"TensorList shape mismatch: "),this.size()===0?en([],[0].concat(this.elementShape)):ee(()=>{const n=this.tensors.map(s=>K(s,t));return Mt(n,0)})}}function eY(e,t,n){const s=e.dtype;if(e.shape.length<1)throw new Error(`Tensor must be at least a vector, but saw shape: ${e.shape}`);if(e.dtype!==n)throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${n}`);const i=e.shape.slice(1);Ms(i,t,"TensorList shape mismatch: ");const o=Oi(e);return new Ph(o,t,s)}function tY(e,t,n){return new Ph([],e,t,n)}function nY(e,t,n,s){if(t.length!==e.shape[0])throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${t.length} vs. ${e.shape[0]}`);const i=Math.max(...t);if(s!=null&&s!==-1&&i>=s)throw new Error(`Max index must be < array size (${i} vs. ${s})`);const o=new Ph([],n,e.dtype,s),a=Oi(e,0);return t.forEach((c,h)=>{o.setItem(c,a[h])}),o}function sY(e,t,n){let s=0;const i=t.map(h=>(s+=h,s));if(s!==e.shape[0])throw new Error(`Expected sum of lengths to be equal to + ${n}, and tensor's shape is: ${t.shape}`);if(!this.dynamicSize&&e.length!==this.maxSize)throw new Error(`TensorArray's size is not equal to the size of lengths (${this.maxSize} vs. ${e.length}), and the TensorArray is not marked as dynamically resizeable`);const i=n===0?0:t.size/n,o=[];Q(()=>{t=K(t,[1,n,i]);for(let c=0;c{if(n!==i.dtype)throw new Error(`Invalid data types; op elements ${n}, but list elements ${i.dtype}`);zs(t,i.shape,"TensorList shape mismatch: "),bn(i)}),this.idTensor=Ce(0),this.maxNumElements=s,bn(this.idTensor)}get id(){return this.idTensor.id}copy(){return new nu([...this.tensors],this.elementShape,this.elementDtype)}clearAndClose(e){this.tensors.forEach(t=>{(e==null||!e.has(t.id))&&t.dispose()}),this.tensors.length=0,this.idTensor.dispose()}size(){return this.tensors.length}stack(e,t,n=-1){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);if(n!==-1&&this.tensors.length!==n)throw new Error(`Operation expected a list with ${n} elements but got a list with ${this.tensors.length} elements.`);return zs(e,this.elementShape,"TensorList shape mismatch: "),Q(()=>{const s=this.tensors.map(i=>K(i,e));return es(s,0)})}popBack(e,t){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);if(this.size()===0)throw new Error("Trying to pop from an empty list.");const n=this.tensors.pop();return zs(n.shape,e,"TensorList shape mismatch: "),K(n,e)}pushBack(e){if(e.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${this.elementDtype}`);if(zs(e.shape,this.elementShape,"TensorList shape mismatch: "),this.maxNumElements===this.size())throw new Error("Trying to push element into a full list.");bn(e),this.tensors.push(e)}resize(e){if(e<0)throw new Error(`TensorListResize expects size to be non-negative. Got: ${e}`);if(this.maxNumElements!==-1&&e>this.maxNumElements)throw new Error(`TensorListResize input size ${e} is greater maxNumElement ${this.maxNumElements}.`);this.tensors.length=e}getItem(e,t,n){if(n!==this.elementDtype)throw new Error(`Invalid data types; op elements ${n}, but list elements ${this.elementDtype}`);if(e<0||e>this.tensors.length)throw new Error(`Trying to access element ${e} in a list with ${this.tensors.length} elements.`);if(this.tensors[e]==null)throw new Error(`element at index ${e} is null.`);return zs(this.tensors[e].shape,t,"TensorList shape mismatch: "),this.tensors[e]}setItem(e,t){if(t.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t.dtype}, but list elements ${this.elementDtype}`);if(e<0||this.maxNumElements!==-1&&e>=this.maxNumElements)throw new Error(`Trying to set element ${e} in a list with max ${this.maxNumElements} elements.`);zs(this.elementShape,t.shape,"TensorList shape mismatch: "),bn(t),this.tensors[e]=t}gather(e,t,n){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);return zs(this.elementShape,n,"TensorList shape mismatch: "),e=e.slice(0,this.size()),e.length===0?sn([],[0].concat(this.elementShape)):Q(()=>{const s=e.map(i=>K(this.tensors[i],n));return es(s,0)})}concat(e,t){if(!!e&&e!==this.elementDtype)throw new Error(`TensorList dtype is ${this.elementDtype} but concat requested dtype ${e}`);return zs(this.elementShape,t,"TensorList shape mismatch: "),this.size()===0?sn([],[0].concat(this.elementShape)):Q(()=>{const n=this.tensors.map(s=>K(s,t));return Yt(n,0)})}}function UY(e,t,n){const s=e.dtype;if(e.shape.length<1)throw new Error(`Tensor must be at least a vector, but saw shape: ${e.shape}`);if(e.dtype!==n)throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${n}`);const i=e.shape.slice(1);zs(i,t,"TensorList shape mismatch: ");const o=Qs(e);return new nu(o,t,s)}function BY(e,t,n){return new nu([],e,t,n)}function MY(e,t,n,s){if(t.length!==e.shape[0])throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${t.length} vs. ${e.shape[0]}`);const i=Math.max(...t);if(s!=null&&s!==-1&&i>=s)throw new Error(`Max index must be < array size (${i} vs. ${s})`);const o=new nu([],n,e.dtype,s),a=Qs(e,0);return t.forEach((c,h)=>{o.setItem(c,a[h])}),o}function PY(e,t,n){let s=0;const i=t.map(h=>(s+=h,s));if(s!==e.shape[0])throw new Error(`Expected sum of lengths to be equal to tensor.shape[0], but sum of lengths is - ${s}, and tensor's shape is: ${e.shape}`);const o=s===0?0:e.size/s,a=ee(()=>{const h=[];e=K(e,[1,s,o]);for(let d=0;d{switch(e.op){case"If":case"StatelessIf":{const s=R("thenBranch",e,t,n),i=R("elseBranch",e,t,n),o=R("cond",e,t,n),a=R("args",e,t,n),c=await o.data();return c[0]?n.functionMap[s].executeFunctionAsync(a,n.tensorArrayMap,n.tensorListMap):n.functionMap[i].executeFunctionAsync(a,n.tensorArrayMap,n.tensorListMap)}case"While":case"StatelessWhile":{const s=R("body",e,t,n),i=R("cond",e,t,n),o=R("args",e,t,n),a=await n.functionMap[i].executeFunctionAsync(o,n.tensorArrayMap,n.tensorListMap),c=o.map(m=>m.id);let h=await a[0].data();a.forEach(m=>{!m.kept&&c.indexOf(m.id)===-1&&m.dispose()});let d=o;for(;h[0];){const m=d;d=await n.functionMap[s].executeFunctionAsync(d,n.tensorArrayMap,n.tensorListMap);const y=d.map(w=>w.id);m.forEach(w=>{!w.kept&&c.indexOf(w.id)===-1&&y.indexOf(w.id)===-1&&w.dispose()});const b=await n.functionMap[i].executeFunctionAsync(d,n.tensorArrayMap,n.tensorListMap);h=await b[0].data(),b.forEach(w=>{!w.kept&&c.indexOf(w.id)===-1&&y.indexOf(w.id)===-1&&w.dispose()})}return d}case"LoopCond":{const s=R("pred",e,t,n);return[ir(s)]}case"Switch":{const s=R("pred",e,t,n);let i=R("data",e,t,n);return i.kept||(i=ir(i)),(await s.data())[0]?[void 0,i]:[i,void 0]}case"Merge":{const s=e.inputNames.find(i=>Xn(i,t,n)!==void 0);if(s){const i=Xn(s,t,n);return[ir(i)]}return}case"Enter":{const s=R("frameName",e,t,n),i=R("tensor",e,t,n);return n.enterFrame(s),[ir(i)]}case"Exit":{const s=R("tensor",e,t,n);return n.exitFrame(),[ir(s)]}case"NextIteration":{const s=R("tensor",e,t,n);return n.nextIteration(),[ir(s)]}case"TensorArrayV3":{const s=R("size",e,t,n),i=R("dtype",e,t,n),o=R("elementShape",e,t,n),a=R("dynamicSize",e,t,n),c=R("clearAfterRead",e,t,n),h=R("identicalElementShapes",e,t,n),d=R("name",e,t,n),m=new QV(d,i,s,o,h,a,c);return n.addTensorArray(m),[m.idTensor,Ne(1)]}case"TensorArrayWriteV3":{const s=R("tensorArrayId",e,t,n),i=R("index",e,t,n),o=R("tensor",e,t,n),a=n.getTensorArray(s.id);return a.write(i,o),[a.idTensor]}case"TensorArrayReadV3":{const s=R("tensorArrayId",e,t,n),i=R("index",e,t,n),o=n.getTensorArray(s.id);return[o.read(i)]}case"TensorArrayGatherV3":{const s=R("tensorArrayId",e,t,n),i=R("indices",e,t,n),o=R("dtype",e,t,n),a=n.getTensorArray(s.id);return[a.gather(i,o)]}case"TensorArrayScatterV3":{const s=R("tensorArrayId",e,t,n),i=R("indices",e,t,n),o=R("tensor",e,t,n),a=n.getTensorArray(s.id);return a.scatter(i,o),[a.idTensor]}case"TensorArrayConcatV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id),o=R("dtype",e,t,n);return[i.concat(o)]}case"TensorArraySplitV3":{const s=R("tensorArrayId",e,t,n),i=R("tensor",e,t,n),o=R("lengths",e,t,n),a=n.getTensorArray(s.id);return a.split(o,i),[a.idTensor]}case"TensorArraySizeV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id);return[Ne(i.size(),"int32")]}case"TensorArrayCloseV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id);return i.clearAndClose(),[i.idTensor]}case"TensorListSetItem":{const s=R("tensorListId",e,t,n),i=R("index",e,t,n),o=R("tensor",e,t,n),a=n.getTensorList(s.id);return a.setItem(i,o),[a.idTensor]}case"TensorListGetItem":{const s=R("tensorListId",e,t,n),i=R("index",e,t,n),o=R("elementShape",e,t,n),a=R("elementDType",e,t,n),c=n.getTensorList(s.id);return[c.getItem(i,o,a)]}case"TensorListScatterV2":case"TensorListScatter":{const s=R("indices",e,t,n),i=R("tensor",e,t,n),o=R("elementShape",e,t,n),a=R("numElements",e,t,n),c=nY(i,s,o,a);return n.addTensorList(c),[c.idTensor]}case"TensorListReserve":{const s=R("elementShape",e,t,n),i=R("elementDType",e,t,n),o=R("numElements",e,t,n),a=tY(s,i,o);return n.addTensorList(a),[a.idTensor]}case"TensorListGather":{const s=R("tensorListId",e,t,n),i=R("indices",e,t,n),o=R("elementShape",e,t,n),a=R("elementDType",e,t,n),c=n.getTensorList(s.id);return[c.gather(i,a,o)]}case"TensorListStack":{const s=R("tensorListId",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=R("numElements",e,t,n),c=n.getTensorList(s.id);return[c.stack(i,o,a)]}case"TensorListFromTensor":{const s=R("tensor",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=eY(s,i,o);return n.addTensorList(a),[a.idTensor]}case"TensorListConcat":{const s=R("tensorListId",e,t,n),i=n.getTensorList(s.id),o=R("dtype",e,t,n),a=R("elementShape",e,t,n);return[i.concat(o,a)]}case"TensorListPushBack":{const s=R("tensorListId",e,t,n),i=R("tensor",e,t,n),o=n.getTensorList(s.id);return o.pushBack(i),[o.idTensor]}case"TensorListPopBack":{const s=R("tensorListId",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=n.getTensorList(s.id);return[a.popBack(i,o)]}case"TensorListSplit":{const s=R("tensor",e,t,n),i=R("elementShape",e,t,n),o=R("lengths",e,t,n),a=sY(s,o,i);return n.addTensorList(a),[a.idTensor]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},HQ="control";function AN(e,t,n){const[s,i]=R("fusedOps",e,t,n),o=s==="biasadd",a=i==="prelu",c=s==="fusedbatchnorm",h=R("numArgs",e,t,n);if(o){if(a&&h!==2)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!a&&h!==1)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd must have one extra argument: bias.")}if(c)throw new Error("FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported.");const d=R("strides",e,t,n),m=Vp(e,t,n),y=R("dataFormat",e,t,n).toUpperCase(),b=R("dilations",e,t,n),[w,L]=R("args",e,t,n);return{stride:d,pad:m,dataFormat:y,dilations:b,biasArg:w,preluArg:L,activationFunc:i}}const rY=(e,t,n)=>{switch(e.op){case"Conv1D":{const s=R("stride",e,t,n),i=R("pad",e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilation",e,t,n);return[Nd(R("x",e,t,n),R("filter",e,t,n),s,i,o,a)]}case"Conv2D":{const s=R("strides",e,t,n),i=Vp(e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilations",e,t,n);return[ji(R("x",e,t,n),R("filter",e,t,n),[s[1],s[2]],i,o,[a[1],a[2]])]}case"_FusedConv2D":{const{stride:s,pad:i,dataFormat:o,dilations:a,biasArg:c,preluArg:h,activationFunc:d}=AN(e,t,n);return[vb({x:R("x",e,t,n),filter:R("filter",e,t,n),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[a[1],a[2]],bias:c,activation:d,preluActivationWeights:h})]}case"FusedDepthwiseConv2dNative":{const{stride:s,pad:i,dataFormat:o,dilations:a,biasArg:c,preluArg:h,activationFunc:d}=AN(e,t,n);return[bA({x:R("x",e,t,n),filter:R("filter",e,t,n),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[a[1],a[2]],bias:c,activation:d,preluActivationWeights:h})]}case"Conv2DBackpropInput":case"Conv2dTranspose":{const s=R("outputShape",e,t,n),i=R("strides",e,t,n),o=Vp(e,t,n);return[Cd(R("x",e,t,n),R("filter",e,t,n),s,[i[1],i[2]],o)]}case"DepthwiseConv2dNative":case"DepthwiseConv2d":{const s=R("strides",e,t,n),i=Vp(e,t,n),o=R("dilations",e,t,n),a=R("dataFormat",e,t,n).toUpperCase();return[wo(R("input",e,t,n),R("filter",e,t,n),[s[1],s[2]],i,a,[o[1],o[2]])]}case"Conv3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilations",e,t,n);return[Zy(R("x",e,t,n),R("filter",e,t,n),[s[1],s[2],s[3]],i,o,[a[1],a[2],a[3]])]}case"AvgPool":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[Hl(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPool":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[eh(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPoolWithArgmax":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n),a=R("includeBatchInIndex",e,t,n),{result:c,indexes:h}=VT(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i,a);return[c,h]}case"AvgPool3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[jy(R("x",e,t,n),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"MaxPool3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[cb(R("x",e,t,n),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"Dilation2D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("dilations",e,t,n),a=s[1],c=s[2],h=o[1],d=o[2];return[eb(R("x",e,t,n),R("filter",e,t,n),[a,c],i,[h,d],"NHWC")]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},qQ="convolution";const oY=(e,t,n)=>{switch(e.op){case"Fill":{const s=R("shape",e,t,n),i=R("dtype",e,t,n),o=R("value",e,t,n);return[Xl(s,o,i)]}case"LinSpace":{const s=R("start",e,t,n),i=R("stop",e,t,n),o=R("num",e,t,n);return[PT(s,i,o)]}case"Multinomial":{const s=R("logits",e,t,n),i=R("numSamples",e,t,n),o=R("seed",e,t,n);return[YT(s,i,o)]}case"OneHot":{const s=R("indices",e,t,n),i=R("depth",e,t,n),o=R("onValue",e,t,n),a=R("offValue",e,t,n);return[fo(s,i,o,a)]}case"Ones":return[Qs(R("shape",e,t,n),R("dtype",e,t,n))];case"OnesLike":return[On(R("x",e,t,n))];case"RandomUniform":return[vo(R("shape",e,t,n),R("minval",e,t,n),R("maxval",e,t,n),R("dtype",e,t,n))];case"Range":{const s=R("start",e,t,n),i=R("stop",e,t,n),o=R("step",e,t,n);return[sh(s,i,o,R("dtype",e,t,n))]}case"TruncatedNormal":{const s=R("shape",e,t,n),i=R("mean",e,t,n),o=R("stdDev",e,t,n),a=R("seed",e,t,n);return[ch(s,i,o,R("dtype",e,t,n),a)]}case"Zeros":return[ct(R("shape",e,t,n),R("dtype",e,t,n))];case"ZerosLike":return[Qe(R("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},jQ="creation";function FL(e,t,n){const s=R("boxes",e,t,n),i=R("scores",e,t,n),o=R("maxOutputSize",e,t,n),a=R("iouThreshold",e,t,n),c=R("scoreThreshold",e,t,n),h=R("softNmsSigma",e,t,n);return{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}}const aY=async(e,t,n)=>{switch(e.op){case"NonMaxSuppressionV5":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}=FL(e,t,n),d=await Wr.nonMaxSuppressionWithScoreAsync(s,i,o,a,c,h);return[d.selectedIndices,d.selectedScores]}case"NonMaxSuppressionV4":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=FL(e,t,n),h=R("padToMaxOutputSize",e,t,n),d=await Wr.nonMaxSuppressionPaddedAsync(s,i,o,a,c,h);return[d.selectedIndices,d.validOutputs]}case"NonMaxSuppressionV3":case"NonMaxSuppressionV2":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=FL(e,t,n);return[await Wr.nonMaxSuppressionAsync(s,i,o,a,c)]}case"Where":{const s=ve(R("condition",e,t,n),"bool"),i=[await Ib(s)];return s.dispose(),i}case"ListDiff":return qT(R("x",e,t,n),R("y",e,t,n));default:throw TypeError(`Node type ${e.op} is not implemented`)}},KQ="dynamic";const cY=(e,t,n)=>{switch(e.op){case"TopKV2":{const s=R("x",e,t,n),i=R("k",e,t,n),o=R("sorted",e,t,n),a=Lb(s,i,o);return[a.values,a.indices]}case"Unique":{const s=R("x",e,t,n),i=qd(s);return[i.values,i.indices]}case"UniqueV2":{const s=R("x",e,t,n),i=R("axis",e,t,n),o=qd(s,i);return[o.values,o.indices]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},XQ="evaluation";const lY=(e,t,n)=>{switch(e.op){case"Const":return t[e.name];case"PlaceholderWithDefault":const s=R("default",e,t,n);return[Xn(e.name,t,n)||s];case"Placeholder":return[Xn(e.name,t,n)];case"Identity":case"StopGradient":case"FakeQuantWithMinMaxVars":{const d=R("x",e,t,n);return[ir(d)]}case"IdentityN":return R("x",e,t,n).map(d=>ir(d));case"Snapshot":const i=R("x",e,t,n);return[ir(i)];case"Shape":return[ns(R("x",e,t,n).shape,"int32")];case"ShapeN":return R("x",e,t,n).map(d=>ns(d.shape));case"Size":return[Ne(R("x",e,t,n).size,"int32")];case"Rank":return[Ne(R("x",e,t,n).rank,"int32")];case"NoOp":return[Ne(1)];case"Print":const o=R("x",e,t,n),a=R("data",e,t,n),c=R("message",e,t,n),h=R("summarize",e,t,n);console.warn("The graph has a tf.print() operation,usually used for debugging, which slows down performance."),console.log(c);for(let d=0;d{switch(e.op){case"ResizeBilinear":{const s=R("images",e,t,n),i=R("size",e,t,n),o=R("alignCorners",e,t,n);return[Wr.resizeBilinear(s,[i[0],i[1]],o)]}case"ResizeNearestNeighbor":{const s=R("images",e,t,n),i=R("size",e,t,n),o=R("alignCorners",e,t,n);return[Wr.resizeNearestNeighbor(s,[i[0],i[1]],o)]}case"CropAndResize":{const s=R("image",e,t,n),i=R("boxes",e,t,n),o=R("boxInd",e,t,n),a=R("cropSize",e,t,n),c=R("method",e,t,n),h=R("extrapolationValue",e,t,n);return[Wr.cropAndResize(s,i,o,a,c,h)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},ZQ="image";const uY=(e,t,n)=>{switch(e.op){case"Equal":return[Zs(R("a",e,t,n),R("b",e,t,n))];case"NotEqual":return[kr(R("a",e,t,n),R("b",e,t,n))];case"Greater":return[Ss(R("a",e,t,n),R("b",e,t,n))];case"GreaterEqual":return[Ki(R("a",e,t,n),R("b",e,t,n))];case"Less":return[Zl(R("a",e,t,n),R("b",e,t,n))];case"LessEqual":return[Dr(R("a",e,t,n),R("b",e,t,n))];case"LogicalAnd":return[Ws(R("a",e,t,n),R("b",e,t,n))];case"LogicalNot":return[Ql(R("a",e,t,n))];case"LogicalOr":return[Wd(R("a",e,t,n),R("b",e,t,n))];case"Select":case"SelectV2":return[_n(R("condition",e,t,n),R("a",e,t,n),R("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},QQ="logical";const dY=(e,t,n)=>{switch(e.op){case"BatchMatMul":case"BatchMatMulV2":case"MatMul":return[at(R("a",e,t,n),R("b",e,t,n),R("transposeA",e,t,n),R("transposeB",e,t,n))];case"Transpose":return[Me(R("x",e,t,n),R("perm",e,t,n))];case"_FusedMatMul":const[s,i]=R("fusedOps",e,t,n),o=s==="biasadd",a=i==="prelu",c=R("numArgs",e,t,n);if(o){if(a&&c!==2)throw new Error("Fused MatMul with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!a&&c!==1)throw new Error("Fused MatMul with BiasAdd must have one extra argument: bias.")}const[h,d]=R("args",e,t,n);return[ep({a:R("a",e,t,n),b:R("b",e,t,n),transposeA:R("transposeA",e,t,n),transposeB:R("transposeB",e,t,n),bias:h,activation:i,preluActivationWeights:d})];default:throw TypeError(`Node type ${e.op} is not implemented`)}},eee="matrices";const pY=(e,t,n)=>{switch(e.op){case"FusedBatchNorm":case"FusedBatchNormV2":return[bo(R("x",e,t,n),R("mean",e,t,n),R("variance",e,t,n),R("offset",e,t,n),R("scale",e,t,n),R("epsilon",e,t,n))];case"FusedBatchNormV3":return[bo(R("x",e,t,n),R("mean",e,t,n),R("variance",e,t,n),R("offset",e,t,n),R("scale",e,t,n),R("epsilon",e,t,n))];case"LRN":return[rb(R("x",e,t,n),R("radius",e,t,n),R("bias",e,t,n),R("alpha",e,t,n),R("beta",e,t,n))];case"Softmax":return[No(R("x",e,t,n))];case"LogSoftmax":return[_d(R("x",e,t,n))];case"SparseToDense":return[xb(R("sparseIndices",e,t,n),R("outputShape",e,t,n),R("sparseValues",e,t,n),R("defaultValue",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},tee="normalization";const mY=(e,t,n)=>{switch(e.op){case"Max":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[qn(R("x",e,t,n),s,i)]}case"Mean":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[zt(R("x",e,t,n),s,i)]}case"Min":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[ka(R("x",e,t,n),s,i)]}case"Sum":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Ue(R("x",e,t,n),s,i)]}case"All":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[xd(R("x",e,t,n),s,i)]}case"Any":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Pl(R("x",e,t,n),s,i)]}case"ArgMax":{const s=R("axis",e,t,n);return[zl(R("x",e,t,n),s)]}case"ArgMin":{const s=R("axis",e,t,n);return[My(R("x",e,t,n),s)]}case"Prod":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Bd(R("x",e,t,n),s,i)]}case"Cumsum":{const s=R("axis",e,t,n),i=R("exclusive",e,t,n),o=R("reverse",e,t,n);return[Od(R("x",e,t,n),s,i,o)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},nee="reduction";const fY=(e,t,n)=>{switch(e.op){case"ConcatV2":case"Concat":{const s=R("n",e,t,n),i=R("axis",e,t,n);let o=R("tensors",e,t,n);return o=o.slice(0,s),[Mt(o,i)]}case"GatherV2":case"Gather":{const s=R("axis",e,t,n),i=R("x",e,t,n),o=R("indices",e,t,n);return[Oa(i,ve(o,"int32"),s)]}case"ReverseV2":case"Reverse":{const s=R("axis",e,t,n),i=R("x",e,t,n);return[Is(i,s)]}case"Slice":{const s=R("begin",e,t,n),i=R("size",e,t,n);return[nt(R("x",e,t,n),s,i)]}case"StridedSlice":{const s=R("begin",e,t,n),i=R("end",e,t,n),o=R("strides",e,t,n),a=R("beginMask",e,t,n),c=R("endMask",e,t,n),h=R("ellipsisMask",e,t,n),d=R("newAxisMask",e,t,n),m=R("shrinkAxisMask",e,t,n),y=R("x",e,t,n);return[bb(y,s,i,o,a,c,h,d,m)]}case"Pack":return ee(()=>{const s=R("axis",e,t,n),i=R("tensors",e,t,n),o=i[0].shape,a=Fr(i[0]).shape,c=i.map(h=>{const d=ot(h.shape,o);if(!d&&!ot(Fr(h).shape,a))throw new Error("the input tensors shape does not match");return d?h:K(h,o)});return[is(c,s)]});case"Unpack":{const s=R("axis",e,t,n),i=R("tensor",e,t,n);return Oi(i,s)}case"Tile":{const s=R("reps",e,t,n);return[Er(R("x",e,t,n),s)]}case"Split":case"SplitV":{const s=R("axis",e,t,n),i=R("numOrSizeSplits",e,t,n),o=R("x",e,t,n);return ss(o,i,s)}case"ScatterNd":{const s=R("indices",e,t,n),i=R("values",e,t,n),o=R("shape",e,t,n);return[dA(s,i,o)]}case"GatherNd":{const s=R("x",e,t,n),i=R("indices",e,t,n);return[pA(s,i)]}case"SparseToDense":{const s=R("sparseIndices",e,t,n),i=R("outputShape",e,t,n),o=R("sparseValues",e,t,n),a=R("defaultValue",e,t,n);return[xb(s,o,i,o.dtype===a.dtype?a:ve(a,o.dtype))]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},see="slice_join";const gY=(e,t,n)=>{switch(e.op){case"FFT":return[rh(R("x",e,t,n))];case"IFFT":return[Wa(R("x",e,t,n))];case"RFFT":return[oh(R("x",e,t,n))];case"IRFFT":return[Hd(R("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},iee="spectral";const yY=(e,t,n)=>{switch(e.op){case"Cast":return[ve(R("x",e,t,n),R("dtype",e,t,n))];case"ExpandDims":{const s=R("axis",e,t,n);return[Hn(R("x",e,t,n),s)]}case"Squeeze":{const s=R("axis",e,t,n);return[Fr(R("x",e,t,n),s)]}case"Reshape":return[K(R("x",e,t,n),R("shape",e,t,n))];case"PadV2":case"Pad":return[Ci(R("x",e,t,n),R("padding",e,t,n),R("constantValue",e,t,n))];case"SpaceToBatchND":{const s=R("blockShape",e,t,n),i=R("paddings",e,t,n);return[th(R("x",e,t,n),s,i)]}case"BatchToSpaceND":{const s=R("blockShape",e,t,n),i=R("crops",e,t,n);return[ql(R("x",e,t,n),s,i)]}case"DepthToSpace":{const s=R("blockSize",e,t,n),i=R("dataFormat",e,t,n).toUpperCase();return[Qy(R("x",e,t,n),s,i)]}case"BroadcastTo":return[jl(R("x",e,t,n),R("shape",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},ree="transformation";function vN(e,t,n){const s=((i,o,a)=>{switch(i.category){case"arithmetic":return ee(()=>XV(i,o,a));case"basic_math":return ee(()=>JV(i,o,a));case"control":return iY(i,o,a);case"convolution":return ee(()=>rY(i,o,a));case"creation":return ee(()=>oY(i,o,a));case"dynamic":return aY(i,o,a);case"evaluation":return ee(()=>cY(i,o,a));case"image":return ee(()=>hY(i,o,a));case"graph":return ee(()=>lY(i,o,a));case"logical":return ee(()=>uY(i,o,a));case"matrices":return ee(()=>dY(i,o,a));case"normalization":return ee(()=>pY(i,o,a));case"reduction":return ee(()=>mY(i,o,a));case"slice_join":return ee(()=>fY(i,o,a));case"spectral":return ee(()=>gY(i,o,a));case"transformation":return ee(()=>yY(i,o,a));case"custom":const c=LN(i.op);if(c&&c.customExecutor)return c.customExecutor(new KV(i,o,a));throw TypeError(`Custom op ${i.op} is not registered.`);default:throw TypeError(`Unknown op '${i.op}'. File an issue at https://github.com/tensorflow/tfjs/issues so we can add it, or register a custom execution with tf.registerOp()`)}})(e,t,n);return s instanceof Promise?s.then(i=>[].concat(i)):[].concat(s)}class NN{constructor(e={},t={},n={},s={}){this.weightMap=e,this.tensorArrayMap=t,this.tensorListMap=n,this.functionMap=s,this.rootContext={id:0,frameName:"",iterationId:0},this.contexts=[this.rootContext],this.lastId=0,this.generateCurrentContextIds()}newFrame(e,t){return{id:e,frameName:t,iterationId:0}}set currentContext(e){this.contexts!==e&&(this.contexts=e,this.generateCurrentContextIds())}get currentContext(){return this.contexts}get currentContextId(){return this._currentContextIds[0]}get currentContextIds(){return this._currentContextIds}generateCurrentContextIds(){const e=[];for(let t=0;tt.id===0&&t.iterationId===0?"":`${t.frameName}-${t.iterationId}`).join("/"):""}enterFrame(e){this.contexts&&(this.lastId++,this.contexts=this.contexts.slice(),this.contexts.push(this.newFrame(this.lastId,e)),this._currentContextIds.unshift(this.contextIdforContexts(this.contexts)))}exitFrame(){if(this.contexts&&this.contexts.length>1)this.contexts=this.contexts.slice(),this.contexts.splice(-1),this.currentContextIds.shift();else throw new Error("Cannot exit frame, the context is empty")}nextIteration(){if(this.contexts&&this.contexts.length>0){this.contexts=this.contexts.slice(),this.lastId++;const e=Object.assign({},this.contexts[this.contexts.length-1]);e.iterationId+=1,e.id=this.lastId,this.contexts.splice(-1,1,e),this._currentContextIds.splice(0,1,this.contextIdforContexts(this.contexts))}else throw new Error("Cannot increase frame iteration, the context is empty")}getWeight(e){return this.weightMap[e]}addTensorArray(e){this.tensorArrayMap[e.id]=e}getTensorArray(e){return this.tensorArrayMap[e]}addTensorList(e){this.tensorListMap[e.id]=e}getTensorList(e){return this.tensorListMap[e]}dispose(e){for(const t in this.tensorArrayMap)this.tensorArrayMap[t].clearAndClose(e);for(const t in this.tensorListMap)this.tensorListMap[t].clearAndClose(e)}}function CN(e,t,n,s){const i=new Set,o=[];let a=null,c=null;const h=new Set,d=Object.keys(e).map(b=>os(b)[0]);let m=[];s!=null&&(m=s.map(b=>os(b.name)[0]));const y=[...t];for(;y.length>0;){const b=y.pop();if((RN(b)||SY(b))&&(a==null&&(a=b,c=a.children.map(w=>w.name).filter(w=>i.has(w)))),i.add(b.name),n[b.name]!=null)continue;if(d.indexOf(b.name)!==-1)continue;if(m.indexOf(b.name)!==-1)continue;if(b.inputs.length===0){o.push(b.name);continue}b.inputs.forEach(w=>{if(h.has(w.name))return;h.add(w.name),y.push(w)})}return{inputs:e,outputs:t,usedNodes:i,missingInputs:o,dynamicNode:a,syncInputs:c}}function bY(e,t,n){const{usedNodes:s,inputs:i}=n,o=[],a=Object.keys(i).map(m=>os(m)[0]).map(m=>e.nodes[m]),c=e.initNodes;a.forEach(m=>{s.has(m.name)&&o.push(m)}),e.weights.forEach(m=>{s.has(m.name)&&o.push(m)}),c!=null&&c.forEach(m=>{s.has(m.name)&&o.push(m)});const h=new Set,d=[];for(;o.length>0;){const m=o.pop();h.add(m.name),t[m.name]||d.push(m),m.children.forEach(y=>{!h.has(y.name)&&s.has(y.name)&&y.inputs.every(b=>h.has(b.name))&&o.push(y)})}return d}const wY=["Switch","Merge","Enter","Exit","NextIteration","StatelessIf","StatelessWhile","if","While"],LY=["NonMaxSuppressionV2","NonMaxSuppressionV3","NonMaxSuppressionV5","Where"];function RN(e){return wY.indexOf(e.op)>=0}function SY(e){return LY.indexOf(e.op)>=0}class _L{constructor(e,t){this.graph=e,this.parent=t,this.compiledMap=new Map,this._weightMap={},this.SEPERATOR=",",this._functions={},this._functionExecutorMap={},this._outputs=e.outputs,this._inputs=e.inputs,this._initNodes=e.initNodes,this._signature=e.signature,this._functions=e.functions,e.functions!=null&&Object.keys(e.functions).forEach(n=>{this._functionExecutorMap[n]=new _L(e.functions[n],this)})}get weightIds(){return this.parent?this.parent.weightIds:this._weightIds}get functionExecutorMap(){return this.parent?this.parent.functionExecutorMap:this._functionExecutorMap}get weightMap(){return this.parent?this.parent.weightMap:this._weightMap}set weightMap(e){const t=Object.keys(e).map(n=>e[n].map(s=>s.id));this._weightIds=[].concat(...t),this._weightMap=e}get inputs(){return this._inputs.map(e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0}))}get outputs(){return this._outputs.map(e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0}))}get inputNodes(){return this._inputs.map(e=>e.signatureKey||e.name)}get outputNodes(){return this._outputs.map(e=>{const t=e.signatureKey||e.name;return e.defaultOutput?`${t}:${e.defaultOutput}`:t})}get functions(){return Object.keys(this._functions).reduce((e,t)=>(e[t]=this._functions[t].signature,e),{})}getCompilationKey(e,t){const n=e.map(i=>i.name).sort(),s=t.map(i=>i.name).sort();return n.join(this.SEPERATOR)+"--"+s.join(this.SEPERATOR)}compile(e,t){const n=CN(e,t,this.weightMap,this._initNodes),{missingInputs:s,dynamicNode:i,syncInputs:o}=n;if(i!=null)throw new Error(`This execution contains the node '${i.name}', which has the dynamic op '${i.op}'. Please use model.executeAsync() instead. Alternatively, to avoid the dynamic ops, specify the inputs [${o}]`);if(s.length>0){const a=t.map(h=>h.name),c=Object.keys(e);throw new Error(`Cannot compute the outputs [${a}] from the provided inputs [${c}]. Missing the following inputs: [${s}]`)}return bY(this.graph,this.weightMap,n)}execute(e,t){e=this.mapInputs(e);const n=Object.keys(e).sort();this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t);const s=n.map(m=>this.graph.nodes[os(m)[0]]),i=t.map(m=>os(m)[0]);let o=i.map(m=>this.graph.nodes[m]);o.length===0&&(o=this._outputs);const a=this.getCompilationKey(s,o);let c=this.compiledMap.get(a);c==null&&(c=this.compile(e,o),this.compiledMap.set(a,c));const h={},d={};return ee(()=>{const m=new NN(this.weightMap,h,d,this.functionExecutorMap),y=Object.assign({},this.weightMap);Object.keys(e).forEach(L=>{const[T,A]=os(L),N=[];N[A]=e[L],y[T]=N});const b=this.getFrozenTensorIds(y),w={};for(let L=0;LXn(L,y,m))})}getFrozenTensorIds(e){const t=[].concat.apply([],Object.keys(e).map(n=>e[n]).map(n=>n.map(s=>s.id)));return new Set(t)}checkTensorForDisposal(e,t,n,s,i,o,a){if(t.category==="control"||o.indexOf(e)!==-1)return;n[e].forEach(c=>{c!=null&&(a[c.id]=(a[c.id]||0)+t.children.length)}),t.inputs.forEach(c=>{if(c.category!=="control"){const h=fV(c.name,n,s);h!=null&&h.forEach(d=>{if(d&&!i.has(d.id)){const m=a[d.id];m===1?(d.dispose(),delete a[d.id]):m!=null&&a[d.id]--}})}})}async executeAsync(e,t){return this._executeAsync(e,t)}async _executeAsync(e,t,n=!1,s={},i={}){n||(e=this.mapInputs(e),this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t));const o=new NN(this.weightMap,s,i,this.functionExecutorMap),a=await this.executeWithControlFlow(e,o,t,n),c=t.map(y=>Xn(y,a,o)),h=c.map(y=>y.id),d=Object.keys(e).map(y=>e[y].id),m=new Set([...h,...d,...this.weightIds]);return Object.keys(a).forEach(y=>{const b=a[y];b.forEach(w=>{w&&!w.isDisposed&&!m.has(w.id)&&w.dispose()})}),this.parent==null&&o.dispose(m),c}async executeFunctionAsync(e,t,n){const s=e.reduce((i,o,a)=>(i[this.inputs[a].name]=o,i),{});return this._executeAsync(s,this.outputNodes,!0,t,n)}async executeWithControlFlow(e,t,n,s){const i=Object.keys(e),o=i.map(E=>this.graph.nodes[os(E)[0]]),a=n.map(E=>os(E)[0]),c=a.map(E=>this.graph.nodes[E]),{usedNodes:h,missingInputs:d,dynamicNode:m,syncInputs:y}=CN(e,c,this.weightMap),b=[...o,...this.graph.weights].map(E=>({node:E,contexts:t.currentContext})),w=Object.assign({},this.weightMap);Object.keys(e).forEach(E=>{const[D,F]=os(E),_=[];_[F]=e[E],w[D]=_});const L={},T=this.getFrozenTensorIds(w),A={};for(;b.length>0;){const E=this.processStack(o,b,t,w,A,T,a,L,h);await Promise.all(E)}m==null&&!s&&console.warn("This model execution did not contain any nodes with control flow or dynamic output shapes. You can use model.execute() instead.");const N=c.filter(E=>!RN(E)&&!Xn(E.name,w,t)).map(E=>E.name);if(N.length>0){let E="";throw m!=null&&(E=`Alternatively, to avoid the dynamic ops, use model.execute() and specify the inputs [${y}]`),new Error(`Cannot compute the outputs [${N}] from the provided inputs [${i}]. Consider providing the following inputs: [${d}]. ${E}`)}return w}processStack(e,t,n,s,i,o,a,c,h){const d=[];for(;t.length>0;){const m=t.pop();n.currentContext=m.contexts;let y="";if(m.node.op==="Enter"&&R("isConstant",m.node,s,n)&&([y]=sr(m.node.name,n)),e.indexOf(m.node)===-1){const b=vN(m.node,s,n);y||([y]=sr(m.node.name,n));const w=n.currentContext;b instanceof Promise?d.push(b.then(L=>(s[y]=L,n.currentContext=w,this.checkTensorForDisposal(y,m.node,s,n,o,a,c),this.processChildNodes(m.node,t,n,s,i,h),L))):(s[y]=b,this.checkTensorForDisposal(y,m.node,s,n,o,a,c),this.processChildNodes(m.node,t,n,s,i,h))}else this.processChildNodes(m.node,t,n,s,i,h)}return d}processChildNodes(e,t,n,s,i,o){e.children.forEach(a=>{const[c]=sr(a.name,n);if(i[c]||!o.has(a.name))return;a.op==="Merge"?a.inputNames.some(h=>!!Xn(h,s,n))&&(i[c]=!0,t.push({contexts:n.currentContext,node:a})):a.inputNames.every(h=>!!Xn(h,s,n))&&(i[c]=!0,t.push({contexts:n.currentContext,node:a}))})}dispose(){Object.keys(this.weightMap).forEach(e=>this.weightMap[e].forEach(t=>t.dispose()))}checkInputShapeAndType(e){Object.keys(e).forEach(t=>{const n=e[t],[s]=os(t),i=this.graph.nodes[s];if(i.attrParams.shape&&i.attrParams.shape.value){const o=i.attrParams.shape.value,a=o.length===n.shape.length&&n.shape.every((c,h)=>o[h]===-1||o[h]===c);k(a,()=>`The shape of dict['${i.name}'] provided in model.execute(dict) must be [${o}], but was [${n.shape}]`)}i.attrParams.dtype&&i.attrParams.dtype.value&&k(n.dtype===i.attrParams.dtype.value,()=>`The dtype of dict['${i.name}'] provided in model.execute(dict) must be ${i.attrParams.dtype.value}, but was ${n.dtype}`)})}mapInputs(e){const t={};for(const n in e)if(this._signature!=null&&this._signature.inputs!=null&&this._signature.inputs[n]!=null){const s=this._signature.inputs[n];t[s.name]=e[n]}else t[n]=e[n];return t}checkInputs(e){const t=Object.keys(e).filter(n=>{const[s]=os(n);return this.graph.nodes[s]==null});if(t.length>0)throw new Error(`The dict provided in model.execute(dict) has keys: [${t}] that are not part of graph`)}mapOutputs(e){return e.map(t=>{if(this._signature!=null&&this._signature.outputs!=null&&this._signature.outputs[t]!=null){const n=this._signature.outputs[t];return n.name}return t},{})}checkOutputs(e){e.forEach(t=>{const[n]=os(t);if(!this.graph.nodes[n])throw new Error(`The output '${t}' is not found in the graph`)})}}const IY="?tfjs-format=file",xY="model.json";class ON{constructor(e,t={}){this.modelUrl=e,this.loadOptions=t,this.version="n/a",t==null&&(this.loadOptions={})}get modelVersion(){return this.version}get inputNodes(){return this.executor.inputNodes}get outputNodes(){return this.executor.outputNodes}get inputs(){return this.executor.inputs}get outputs(){return this.executor.outputs}get weights(){return this.executor.weightMap}findIOHandler(){const e=this.modelUrl;if(e.load!=null)this.handler=e;else if(this.loadOptions.requestInit!=null)this.handler=fd(e,this.loadOptions);else{const t=Ly(e,this.loadOptions);if(t.length===0)t.push(fd(e,this.loadOptions));else if(t.length>1)throw new Error(`Found more than one (${t.length}) load handlers for URL '${[e]}'`);this.handler=t[0]}}async load(){if(this.findIOHandler(),this.handler.load==null)throw new Error("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const e=await this.handler.load();return this.loadSync(e)}loadSync(e){this.artifacts=e;const t=this.artifacts.modelTopology;let n={};this.artifacts.userDefinedMetadata!=null&&(n=this.artifacts.userDefinedMetadata.signature),this.version=`${t.versions.producer}.${t.versions.minConsumer}`;const s=ud(this.artifacts.weightData,this.artifacts.weightSpecs);if(this.executor=new _L(SN.Instance.transformGraph(t,n)),this.executor.weightMap=this.convertTensorMapToTensorsMap(s),e.modelInitializer!=null){const i=SN.Instance.transformGraph(e.modelInitializer);this.initializer=new _L(i),this.initializer.weightMap=this.executor.weightMap,this.initializer.execute({},[])}return!0}async save(e,t){if(typeof e=="string"){const n=wy(e);if(n.length===0)throw new Error(`Cannot find any save handlers for URL '${e}'`);if(n.length>1)throw new Error(`Found more than one (${n.length}) save handlers for URL '${e}'`);e=n[0]}if(e.save==null)throw new Error("GraphModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");return e.save(this.artifacts)}predict(e,t){return this.execute(e,this.outputNodes)}normalizeInputs(e){if(!(e instanceof Q)&&!Array.isArray(e))return e;if(e=Array.isArray(e)?e:[e],e.length!==this.inputNodes.length)throw new Error(`Input tensor count mismatch,the graph model has ${this.inputNodes.length} placeholders, while there are ${e.length} input tensors.`);return this.inputNodes.reduce((t,n,s)=>(t[n]=e[s],t),{})}normalizeOutputs(e){return e=e||this.outputNodes,Array.isArray(e)?e:[e]}execute(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=this.executor.execute(e,t);return n.length>1?n:n[0]}async executeAsync(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=await this.executor.executeAsync(e,t);return n.length>1?n:n[0]}convertTensorMapToTensorsMap(e){return Object.keys(e).reduce((t,n)=>(t[n]=[e[n]],t),{})}dispose(){this.executor.dispose(),this.initializer&&this.initializer.dispose()}}async function TY(e,t={}){if(e==null)throw new Error("modelUrl in loadGraphModel() cannot be null. Please provide a url or an IOHandler that loads the model");t==null&&(t={}),t.fromTFHub&&(e.load==null&&(e.endsWith("/")||(e=e+"/"),e=`${e}${xY}${IY}`));const n=new ON(e,t);return await n.load(),n}const EN="2.6.0";function AY(e,t){return Yp(e,t)}function Yp(e,t,n=new Map,s=new Set){if(e==null)return null;if(s.has(e))throw new Error("Circular references are not supported.");if(n.has(e))return n.get(e);const i=t(e);if(i.recurse&&i.value!==null)throw new Error("A deep map function may not return both a value and recurse=true.");if(i.recurse)if(Ja(e)){const o=Array.isArray(e)?[]:{};s.add(e);for(const a in e){const c=e[a],h=Yp(c,t,n,s);o[a]=h}return s.delete(e),o}else throw new Error(`Can't recurse into non-iterable type: ${e}`);else return n.set(e,i.value),i.value}function vY(e,t=kN){return DN(e,t)}function DN(e,t,n=new Set){const s=e[0];if(n.has(s))throw new Error("Circular references are not supported.");const i=t(e);if(i.recurse&&i.value!==null)throw new Error("A deep zip function may not return both a value and recurse=true.");if(i.recurse)if(Ja(s)){const o=Array.isArray(s)?[]:{};n.add(s);for(const a in s){const c=e.map(d=>d[a]),h=DN(c,t,n);o[a]=h}return n.delete(s),o}else throw new Error(`Can't recurse into non-iterable type: ${s}`);else return i.value}function kN(e){return e===null?null:Ja(e[0])?{value:null,recurse:!0}:{value:e,recurse:!1}}async function FN(e,t){const n=new Map;Yp(e,t,n);for(const i of Array.from(n.keys())){const o=n.get(i);if(o instanceof Promise){const a=await o;n.set(i,a)}}const s=Yp(e,t,n);return s}function Ja(e){return e!=null&&!ArrayBuffer.isView(e)&&(Array.isArray(e)||typeof e=="object"&&!(e instanceof Q))}function NY(e){return e==null||CY(e)||Array.isArray(e)||typeof e=="object"&&e instanceof Q||wn(e)}function CY(e){return e===null||typeof e!="object"&&typeof e!="function"}function RY(e){return AY(e,OY)}function OY(e){return e instanceof Q?{value:e.clone(),recurse:!1}:Ja(e)?{value:null,recurse:!0}:{value:e,recurse:!1}}class _N{constructor(e){if(this.capacity=e,this.begin=0,this.end=0,e==null)throw new RangeError("Can't create a ring buffer of unknown capacity.");if(e<1)throw new RangeError("Can't create ring buffer of capacity < 1.");this.data=new Array(e),this.doubledCapacity=2*e}wrap(e){for(;e<0;)e+=this.doubledCapacity;return e%this.doubledCapacity}get(e){if(e<0)throw new RangeError("Can't get item at a negative index.");return this.data[e%this.capacity]}set(e,t){if(e<0)throw new RangeError("Can't set item at a negative index.");this.data[e%this.capacity]=t}length(){let e=this.end-this.begin;return e<0&&(e=this.doubledCapacity+e),e}isFull(){return this.length()===this.capacity}isEmpty(){return this.length()===0}push(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.set(this.end,e),this.end=this.wrap(this.end+1)}pushAll(e){for(const t of e)this.push(t)}pop(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");this.end=this.wrap(this.end-1);const e=this.get(this.end);return this.set(this.end,void 0),e}unshift(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.begin=this.wrap(this.begin-1),this.set(this.begin,e)}shift(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const e=this.get(this.begin);return this.set(this.begin,void 0),this.begin=this.wrap(this.begin+1),e}shuffleExcise(e){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const t=this.wrap(this.begin+e),n=this.get(t);return this.set(t,this.pop()),n}}class WL extends _N{constructor(){super(WL.INITIAL_CAPACITY)}isFull(){return!1}push(e){super.isFull()&&this.expand(),super.push(e)}unshift(e){super.isFull()&&this.expand(),super.unshift(e)}expand(){const e=this.capacity*2,t=new Array(e),n=this.length();for(let s=0;s({value:t++,done:!1}))}function zh(e){return new kY(e)}function $N(e,t){return new BN(e,t)}function aee(e,t,n){return $N(zh(e).take(t),n)}function EY(e,t=Yr.FAIL){return new zY(e,t)}class fn{async toArray(){const e=[];let t=await this.next();for(;!t.done;)e.push(t.value),t=await this.next();return e}async toArrayForTest(){const e=this.prefetch(100),t=[];let n=await e.next();for(;!n.done;)t.push(n.value),n=await e.next();return t}async resolveFully(){let e=await this.next();for(;!e.done;)e=await this.next()}async resolveWhile(e){let t=await this.next(),n=e(t.value);for(;!t.done&&n;)t=await this.next(),n=e(t.value)}handleErrors(e){return new MY(this,e)}filter(e){return new UY(this,e)}map(e){return new BY(this,e)}mapAsync(e){return new UN(this,e)}serialMapAsync(e){return new UN(this,e).serial()}flatmap(e){return new PY(this,e)}async forEachAsync(e){return this.map(e).resolveFully()}async serialForEach(e){return this.serialMapAsync(e).resolveWhile(t=>t===!0)}rowMajorBatch(e,t=!0){return new $Y(this,e,t)}columnMajorBatch(e,t=!0,n=kN){const s=this.rowMajorBatch(e,t);return s.map(i=>vY(i,n))}concatenate(e,t){return new BN(WN([this,e]),t)}take(e){return e<0||e==null?this:new WY(this,e)}skip(e){return e<0||e==null?this:new _Y(this,e)}prefetch(e){return new MN(this,e)}shuffle(e,t){return new GY(this,e,t)}serial(){return new FY(this)}}class DY extends fn{constructor(e){super();this.items=e,this.trav=0}summary(){return`Array of ${this.items.length} items`}async next(){if(this.trav>=this.items.length)return{value:null,done:!0};const e=this.items[this.trav];return this.trav++,{value:RY(e),done:!1}}}class kY extends fn{constructor(e){super();this.nextFn=e}summary(){return"Function call"}async next(){try{return this.nextFn()}catch(e){throw e.message=`Error thrown while iterating through a dataset: ${e.message}`,e}}}class FY extends fn{constructor(e){super();this.upstream=e,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Serial`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){return this.upstream.next()}}class _Y extends fn{constructor(e,t){super();this.upstream=e,this.maxCount=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Skip`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.count++ Take`}async next(){return this.count++>=this.maxCount?{value:null,done:!0}:this.upstream.next()}}class $Y extends fn{constructor(e,t,n=!0){super();this.upstream=e,this.batchSize=t,this.enableSmallLastBatch=n,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> RowMajorBatch`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){const e=[];for(;e.length0?{value:e,done:!1}:{value:null,done:!0};e.push(t.value)}return{value:e,done:!1}}}class UY extends fn{constructor(e,t){super();this.upstream=e,this.predicate=t,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Filter`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;){const e=await this.upstream.next();if(e.done||this.predicate(e.value))return e;He(e.value)}}}class BY extends fn{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Map`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=Hi(e.value),n=this.transform(e.value),s=Hi(n);for(const i of t)ld(i,s)||i.dispose();return{value:n,done:!1}}}class MY extends fn{constructor(e,t){super();this.upstream=e,this.handler=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> handleErrors`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;)try{return await this.upstream.next()}catch(e){if(!this.handler(e))return{value:null,done:!0}}}}class UN extends fn{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> AsyncMap`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=Hi(e.value),n=await this.transform(e.value),s=Hi(n);for(const i of t)ld(i,s)||i.dispose();return{value:n,done:!1}}}class $L extends fn{constructor(){super();this.outputQueue=new WL,this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.outputQueue.length()===0;)if(!await this.pump())return{value:null,done:!0};return{value:this.outputQueue.shift(),done:!1}}}class PY extends $L{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Flatmap`}async pump(){const e=await this.upstream.next();if(e.done)return!1;const t=Hi(e.value),n=this.transform(e.value),s=Hi(n);this.outputQueue.pushAll(n);for(const i of t)ld(i,s)||i.dispose();return!0}}class BN extends fn{constructor(e,t){super();this.baseErrorHandler=t,this.lastRead=null,this.iterator=null,this.moreIterators=e}summary(){const e="TODO: fill in upstream of chained summaries";return`${e} -> Chained`}async next(){return this.lastRead=this.readFromChain(this.lastRead),this.lastRead}async readFromChain(e){if(await e,this.iterator==null){const n=await this.moreIterators.next();if(n.done)return{value:null,done:!0};this.iterator=n.value,this.baseErrorHandler!=null&&(this.iterator=this.iterator.handleErrors(this.baseErrorHandler))}const t=await this.iterator.next();return t.done?(this.iterator=null,this.readFromChain(e)):t}}var Yr;(function(e){e[e.FAIL=0]="FAIL",e[e.SHORTEST=1]="SHORTEST",e[e.LONGEST=2]="LONGEST"})(Yr||(Yr={}));class zY extends fn{constructor(e,t=Yr.FAIL){super();this.iterators=e,this.mismatchMode=t,this.count=0,this.currentPromise=null}summary(){const e="TODO: fill in upstream of zip summaries";return`{${e}} -> Zip`}async nextState(e){await e;let t=0,n=0;function s(o){if(o instanceof fn){const a=o.next();return{value:a.then(c=>(t++,c.done&&n++,c.value)),recurse:!1}}else return{value:null,recurse:!0}}const i=await FN(this.iterators,s);if(t===n)return{value:null,done:!0};if(n>0)switch(this.mismatchMode){case Yr.FAIL:throw new Error(`Zipped streams should have the same length. Mismatched at element ${this.count}.`);case Yr.SHORTEST:return{value:null,done:!0};case Yr.LONGEST:default:}return this.count++,{value:i,done:!1}}async next(){return this.currentPromise=this.nextState(this.currentPromise),this.currentPromise}}class MN extends fn{constructor(e,t){super();this.upstream=e,this.bufferSize=t,this.buffer=new _N(t)}summary(){return`${this.upstream.summary()} -> Prefetch`}refill(){for(;!this.buffer.isFull();){const e=this.upstream.next();this.buffer.push(e)}}next(){return this.refill(),this.buffer.shift()}}class GY extends MN{constructor(e,t,n){super(e,t);this.upstream=e,this.windowSize=t,this.upstreamExhausted=!1,this.random=_a(n||Vn().toString()),this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}randomInt(e){return Math.floor(this.random()*e)}chooseIndex(){return this.randomInt(this.buffer.length())}async serialNext(){for(this.upstreamExhausted||this.refill();!this.buffer.isEmpty();){const e=this.chooseIndex(),t=await this.buffer.shuffleExcise(e);if(t.done)this.upstreamExhausted=!0;else return this.refill(),t}return{value:null,done:!0}}}class Za{constructor(){this.size=null}batch(e,t=!0){const n=this;k(e>0,()=>`batchSize needs to be positive, but it is - ${e}`);let s;return this.size===Infinity||this.size==null?s=this.size:t?s=Math.ceil(this.size/e):s=Math.floor(this.size/e),as(async()=>(await n.iterator()).columnMajorBatch(e,t,HY),s)}concatenate(e){const t=this;let n;return this.size===Infinity||e.size===Infinity?n=Infinity:this.size!=null&&e.size!=null?n=this.size+e.size:n=null,as(async()=>(await t.iterator()).concatenate(await e.iterator()),n)}filter(e){const t=this;let n;return this.size===Infinity?n=Infinity:n=null,as(async()=>(await t.iterator()).filter(s=>ee(()=>e(s))),n)}async forEachAsync(e){return(await this.iterator()).forEachAsync(e)}map(e){const t=this;return as(async()=>(await t.iterator()).map(n=>ee(()=>e(n))),this.size)}mapAsync(e){const t=this;return as(async()=>(await t.iterator()).mapAsync(e),this.size)}prefetch(e){if(e==null)throw new RangeError("`Dataset.prefetch()` requires bufferSize to be specified.");const t=this;return as(async()=>(await t.iterator()).prefetch(e),this.size)}repeat(e){const t=this;let n;return this.size!=null&&e>0?n=this.size*e:e===0?n=0:this.size!=null&&(e===void 0||e<0)?n=Infinity:n=null,as(async()=>{const s=zh(async()=>({value:await t.iterator(),done:!1}));return $N(s.take(e))},n)}skip(e){const t=this;let n;return this.size!=null&&e>=0&&this.size>=e?n=this.size-e:this.size!=null&&(this.size(await t.iterator()).skip(e),n)}shuffle(e,t,n=!0){if(e==null||e<0)throw this.size==null?new RangeError("`Dataset.shuffle()` requires bufferSize to be specified."):new RangeError(`\`Dataset.shuffle()\` requires bufferSize to be specified. If your data fits in main memory (for regular JS objects), and/or GPU memory (for \`tf.Tensor\`s), consider setting bufferSize to the dataset size (${this.size} elements)`);const s=this,i=_a(t||Vn().toString());return as(async()=>{let o=i.int32();return n&&(o+=i.int32()),(await s.iterator()).shuffle(e,o.toString())},this.size)}take(e){const t=this;let n;return this.size!=null&&this.size>e?n=e:this.size!=null&&this.size<=e?n=this.size:n=null,as(async()=>(await t.iterator()).take(e),n)}async toArray(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArray()}async toArrayForTest(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArrayForTest()}}Za.MAX_BUFFER_SIZE=1e4;function as(e,t=null){return new class extends Za{constructor(){super(...arguments);this.size=t}async iterator(){return e()}}}function VY(e){return as(async()=>WN(e),e.length)}function YY(e){if(!Ja(e))throw new Error("The argument to zip() must be an object or array.");let t;if(Array.isArray(e))for(let n=0;n{const n=await FN(e,s=>{if(s instanceof Za)return{value:s.iterator(),recurse:!1};if(Ja(s))return{value:null,recurse:!0};throw new Error("Leaves of the structure passed to zip() must be Datasets, not primitives.")});return EY(n,Yr.SHORTEST)},t)}function HY(e){if(e===null)return null;const t=e[0];if(NY(t)){const n=qY(e);return{value:n,recurse:!1}}return{value:null,recurse:!0}}function qY(e){if(e.length===0)throw new Error("Can't make a batch of zero elements.");return e[0]instanceof Q?is(e):en(e)}class PN extends Za{constructor(e){super();this.input=e}async iterator(){const e=await this.input.iterator(),t=e.decodeUTF8(),n=t.split(` -`).map(s=>(s.endsWith("\r")&&(s=s.slice(0,-1)),s));return n}}const Hp='"',Gh=Symbol("out"),zN=Symbol("field"),qp=Symbol("quote"),UL=Symbol("quoteafterquote"),GN=Symbol("quoteinquote");class VN extends Za{constructor(e,t){super();this.input=e,this.hasHeader=!0,this.fullColumnNames=null,this.columnNamesValidated=!1,this.columnConfigs=null,this.configuredColumnsOnly=!1,this.delimiter=",",this.delimWhitespace=!1,this.base=new PN(e),t||(t={}),this.hasHeader=!(t.hasHeader===!1),this.fullColumnNames=t.columnNames,this.columnConfigs=t.columnConfigs,this.configuredColumnsOnly=t.configuredColumnsOnly,t.delimWhitespace?(k(t.delimiter==null,()=>"Delimiter should not be provided when delimWhitespace is true."),this.delimWhitespace=!0,this.delimiter=" "):this.delimiter=t.delimiter?t.delimiter:","}async columnNames(){return this.columnNamesValidated||await this.setColumnNames(),this.configuredColumnsOnly?Object.keys(this.columnConfigs):this.fullColumnNames}async setColumnNames(){const e=await this.maybeReadHeaderLine();if(!this.fullColumnNames&&!e)throw new Error("Column names must be provided if there is no header line.");this.fullColumnNames&&e&&k(e.length===this.fullColumnNames.length,()=>"The length of provided columnNames ("+this.fullColumnNames.length.toString()+") does not match the length of the header line read from file ("+e.length.toString()+")."),this.fullColumnNames||(this.fullColumnNames=e);const t=this.fullColumnNames.reduce((s,i)=>(s[i]=s[i]+1||1,s),{}),n=Object.keys(t).filter(s=>t[s]>1);if(k(n.length===0,()=>"Duplicate column names found: "+n.toString()),this.columnConfigs)for(const s of Object.keys(this.columnConfigs)){const i=this.fullColumnNames.indexOf(s);if(i===-1)throw new Error('The key "'+s+'" provided in columnConfigs does not match any of the column names ('+this.fullColumnNames.toString()+").")}this.columnNamesValidated=!0}async maybeReadHeaderLine(){if(this.hasHeader){const e=await this.base.iterator(),t=await e.next();if(t.done)throw new Error("No data was found for CSV parsing.");const n=t.value,s=this.parseRow(n,!1);return s}else return null}async iterator(){this.columnNamesValidated||await this.setColumnNames();let e=await this.base.iterator();return this.hasHeader&&(e=e.skip(1)),e.map(t=>this.makeDataElement(t))}makeDataElement(e){const t=this.parseRow(e),n={},s={};for(let i=0;i14||!Number.isInteger(t))throw new Error(`Invalid fftSize: it must be a power of 2 between 2 to 4 and 2 to 14, but got ${this.fftSize}`);if(this.numFrames=e.numFramesPerSpectrogram||43,this.sampleRateHz=e.sampleRateHz,this.columnTruncateLength=e.columnTruncateLength||this.fftSize,this.audioTrackConstraints=e.audioTrackConstraints,this.smoothingTimeConstant=e.smoothingTimeConstant||0,this.includeSpectrogram=!(e.includeSpectrogram===!1),this.includeWaveform=e.includeWaveform===!0,!this.includeSpectrogram&&!this.includeWaveform)throw new Error("Both includeSpectrogram and includeWaveform are false. At least one type of data should be returned.")}summary(){return"microphone"}static async create(e={}){if(C().get("IS_NODE"))throw new Error("microphone API is only supported in browser environment.");const t=new YN(e);return await t.start(),t}async start(){try{this.stream=await navigator.mediaDevices.getUserMedia({audio:this.audioTrackConstraints==null?!0:this.audioTrackConstraints,video:!1})}catch(n){throw new Error(`Error thrown while initializing video stream: ${n.message}`)}if(!this.stream)throw new Error("Could not obtain audio from microphone.");const e=window.AudioContext||window.webkitAudioContext;if(this.audioContext=new e,!this.sampleRateHz)this.sampleRateHz=this.audioContext.sampleRate;else if(this.audioContext.sampleRate!==this.sampleRateHz)throw new Error(`Mismatch in sampling rate: Expected: ${this.sampleRateHz}; Actual: ${this.audioContext.sampleRate}`);const t=this.audioContext.createMediaStreamSource(this.stream);this.analyser=this.audioContext.createAnalyser(),this.analyser.fftSize=this.fftSize*2,this.analyser.smoothingTimeConstant=this.smoothingTimeConstant,t.connect(this.analyser),this.freqData=new Float32Array(this.fftSize),this.timeData=new Float32Array(this.fftSize);return}async next(){if(this.isClosed)return{value:null,done:!0};let e,t;const n=await this.getAudioData();if(this.includeSpectrogram){const s=this.flattenQueue(n.freqDataQueue);e=this.getTensorFromAudioDataArray(s,[this.numFrames,this.columnTruncateLength,1])}if(this.includeWaveform){const s=this.flattenQueue(n.timeDataQueue);t=this.getTensorFromAudioDataArray(s,[this.numFrames*this.fftSize,1])}return{value:{spectrogram:e,waveform:t},done:!1}}async capture(){return(await this.next()).value}async getAudioData(){const e=[],t=[];let n=0;return new Promise(s=>{const i=setInterval(()=>{this.includeSpectrogram&&(this.analyser.getFloatFrequencyData(this.freqData),this.freqData[0]===-Infinity&&s({freqDataQueue:e,timeDataQueue:t}),e.push(this.freqData.slice(0,this.columnTruncateLength))),this.includeWaveform&&(this.analyser.getFloatTimeDomainData(this.timeData),t.push(this.timeData.slice())),++n===this.numFrames&&(clearInterval(i),s({freqDataQueue:e,timeDataQueue:t}))},this.fftSize/this.sampleRateHz*1e3)})}stop(){this.isClosed||(this.isClosed=!0,this.analyser.disconnect(),this.audioContext.close(),this.stream!=null&&this.stream.getTracks().length>0&&this.stream.getTracks()[0].stop())}toArray(){throw new Error("Can not convert infinite audio stream to array.")}getSampleRate(){return this.sampleRateHz}flattenQueue(e){const t=e[0].length,n=new Float32Array(e.length*t);return e.forEach((s,i)=>n.set(s,i*t)),n}getTensorFromAudioDataArray(e,t){const n=new Float32Array(we(t));return n.set(e,n.length-e.length),en(n,t)}}class HN extends fn{constructor(e,t){super();if(this.webcamVideoElement=e,this.webcamConfig=t,this.isClosed=!0,this.resize=!1,this.needToResize())if(this.resize=!0,this.cropSize=[this.webcamConfig.resizeHeight,this.webcamConfig.resizeWidth],this.cropBoxInd=ns([0],"int32"),this.webcamConfig.centerCrop){const n=this.webcamConfig.resizeWidth*1/this.webcamVideoElement.width,s=this.webcamConfig.resizeHeight*1/this.webcamVideoElement.height,i=(1-n)/2,o=(1-s)/2,a=i+n,c=s+o;this.cropBox=_r([o,i,c,a],[1,4])}else this.cropBox=_r([0,0,1,1],[1,4])}summary(){return"webcam"}static async create(e,t={}){if(C().get("IS_NODE"))throw new Error("tf.data.webcam is only supported in browser environment.");if(!e){if(e=document.createElement("video"),!t.resizeWidth||!t.resizeHeight)throw new Error("Please provide webcam video element, or resizeWidth and resizeHeight to create a hidden video element.");e.width=t.resizeWidth,e.height=t.resizeHeight}const n=new HN(e,t);return await n.start(),n}async start(){this.webcamConfig.facingMode&&k(this.webcamConfig.facingMode==="user"||this.webcamConfig.facingMode==="environment",()=>`Invalid webcam facing mode: ${this.webcamConfig.facingMode}. Please provide 'user' or 'environment'`);try{this.stream=await navigator.mediaDevices.getUserMedia({video:{deviceId:this.webcamConfig.deviceId,facingMode:this.webcamConfig.facingMode?this.webcamConfig.facingMode:"user",width:this.webcamVideoElement.width,height:this.webcamVideoElement.height}})}catch(e){throw e.message=`Error thrown while initializing video stream: ${e.message}`,e}if(!this.stream)throw new Error("Could not obtain video from webcam.");try{this.webcamVideoElement.srcObject=this.stream}catch(e){console.log(e),this.webcamVideoElement.src=window.URL.createObjectURL(this.stream)}return this.webcamVideoElement.play(),this.isClosed=!1,new Promise(e=>{this.webcamVideoElement.onloadedmetadata=()=>{e()}})}async next(){if(this.isClosed)return{value:null,done:!0};let e;try{e=uT(this.webcamVideoElement)}catch(t){throw new Error(`Error thrown converting video to pixels: ${JSON.stringify(t)}`)}if(this.resize)try{return{value:this.cropAndResizeFrame(e),done:!1}}catch(t){throw new Error(`Error thrown cropping the video: ${t.message}`)}finally{e.dispose()}else return{value:e,done:!1}}needToResize(){return!!(this.webcamConfig.resizeWidth&&this.webcamConfig.resizeHeight&&(this.webcamVideoElement.width!==this.webcamConfig.resizeWidth||this.webcamVideoElement.height!==this.webcamConfig.resizeHeight))}cropAndResizeFrame(e){return ee(()=>{const t=e.toFloat().expandDims(0);let n;n=Wr.cropAndResize(t,this.cropBox,this.cropBoxInd,this.cropSize,"bilinear");const s=n.shape;return n.reshape(s.slice(1))})}async capture(){return(await this.next()).value}stop(){const e=this.stream.getTracks();e.forEach(t=>t.stop());try{this.webcamVideoElement.srcObject=null}catch(t){console.log(t),this.webcamVideoElement.src=null}this.isClosed=!0}toArray(){throw new Error("Can not convert infinite video stream to array.")}}class qN{}class jN extends fn{split(e){return new jY(this,e)}}class jY extends jN{constructor(e,t){super();this.upstream=e,this.impl=new KY(e,t)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class KY extends $L{constructor(e,t){super();this.upstream=e,this.separator=t,this.carryover=""}summary(){return`${this.upstream.summary()} -> Split('${this.separator}')`}async pump(){const e=await this.upstream.next();if(e.done)return this.carryover===""?!1:(this.outputQueue.push(this.carryover),this.carryover="",!0);const t=e.value.split(this.separator);t[0]=this.carryover+t[0];for(const n of t.slice(0,-1))this.outputQueue.push(n);return this.carryover=t[t.length-1],!0}}class XY extends fn{decodeUTF8(){return new JY(this)}}class JY extends jN{constructor(e){super();this.upstream=e,this.impl=new ZY(e)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class ZY extends $L{constructor(e){super();if(this.upstream=e,C().get("IS_BROWSER"))this.decoder=new TextDecoder("utf-8");else{const{StringDecoder:t}=require("string_decoder");this.decoder=new t("utf8")}}summary(){return`${this.upstream.summary()} -> Utf8`}async pump(){const e=await this.upstream.next();let t;if(e.done)return!1;t=e.value;let n;return C().get("IS_BROWSER")?n=this.decoder.decode(t,{stream:!0}):n=this.decoder.write(Buffer.from(t.buffer)),this.outputQueue.push(n),!0}}class KN extends XY{constructor(e,t={}){super();this.file=e,this.options=t,k(e instanceof Uint8Array||(C().get("IS_BROWSER")?e instanceof File||e instanceof Blob:!1),()=>"FileChunkIterator only supports File, Blob and Uint8Array right now."),this.offset=t.offset||0,this.chunkSize=t.chunkSize||1024*1024}summary(){return`FileChunks ${this.file}`}async next(){if(this.offset>=(this.file instanceof Uint8Array?this.file.byteLength:this.file.size))return{value:null,done:!0};const e=new Promise((t,n)=>{const s=this.offset+this.chunkSize;if(this.file instanceof Uint8Array)t(new Uint8Array(this.file.slice(this.offset,s)));else{const i=new FileReader;i.onload=a=>{let c=i.result;if(c instanceof ArrayBuffer&&(c=new Uint8Array(c)),!(c instanceof Uint8Array))return n(new TypeError("FileReader returned unknown type."));t(c)},i.onabort=a=>n(new Error("Aborted")),i.onerror=a=>n(new Error(a.type));const o=this.file.slice(this.offset,s);i.readAsArrayBuffer(o)}this.offset=s});return{value:await e,done:!1}}}async function QY(e,t={}){let n,s;typeof e=="string"?n=e:(n=e.url,s=eH(e));const i=await $x(n,s);if(i.ok){const o=new Uint8Array(await i.arrayBuffer());return new KN(o,t)}else throw new Error(i.statusText)}const eH=e=>{const t={method:e.method,headers:e.headers,body:e.body,mode:e.mode,credentials:e.credentials,cache:e.cache,redirect:e.redirect,referrer:e.referrer,integrity:e.integrity};return t};function XN(e){return typeof e=="string"&&e.substr(0,7)==="file://"}class JN extends qN{constructor(e,t={}){super();this.input=e,this.options=t}async iterator(){if(XN(this.input)&&C().get("IS_NODE")){const e=require("fs");this.input=e.readFileSync(this.input.substr(7))}return new KN(this.input,this.options)}}class ZN extends qN{constructor(e,t={}){super();this.url=e,this.fileOptions=t}async iterator(){return XN(this.url)?new JN(this.url,this.fileOptions).iterator():QY(this.url,this.fileOptions)}}function tH(e,t={}){return new VN(new ZN(e),t)}function nH(e){const t=zh(e);return as(async()=>t)}function sH(e){return as(async()=>{const t=await e();return zh(()=>t.next())})}async function iH(e,t){return HN.create(e,t)}async function rH(e){return YN.create(e)}const QN="2.6.0";var oH=Object.freeze({__proto__:null,array:VY,Dataset:Za,zip:YY,CSVDataset:VN,TextLineDataset:PN,csv:tH,func:nH,generator:sH,microphone:rH,webcam:iH,FileDataSource:JN,URLDataSource:ZN,version_data:QN});function xe(e,t){Array.isArray(e)||(e=[e]),e.forEach(n=>{n!=null&&k(n.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the CPU backend.`)})}const aH=tp,cH=Mb,lH=Pb,hH=zb,uH=jd;function BL(e,t,n,s){if(n==="linear")return e.linear(t);if(n==="relu")return e.relu(t);if(n==="elu")return So(t);if(n==="relu6")return e.relu6(t);if(n==="prelu")return e.prelu(t,s);throw new Error(`Activation ${n} has not been implemented for the CPU backend.`)}class dH extends g{constructor(){super();this.blockSize=48,this.firstUse=!0,this.data=new p(this,Fs())}write(e,t,n){this.firstUse&&(this.firstUse=!1,C().get("IS_NODE")&&Pa(` + ${s}, and tensor's shape is: ${e.shape}`);const o=s===0?0:e.size/s,a=Q(()=>{const h=[];e=K(e,[1,s,o]);for(let d=0;d{switch(e.op){case"If":case"StatelessIf":{const s=R("thenBranch",e,t,n),i=R("elseBranch",e,t,n),o=R("cond",e,t,n),a=R("args",e,t,n),c=await o.data();return c[0]?n.functionMap[s].executeFunctionAsync(a,n.tensorArrayMap,n.tensorListMap):n.functionMap[i].executeFunctionAsync(a,n.tensorArrayMap,n.tensorListMap)}case"While":case"StatelessWhile":{const s=R("body",e,t,n),i=R("cond",e,t,n),o=R("args",e,t,n),a=await n.functionMap[i].executeFunctionAsync(o,n.tensorArrayMap,n.tensorListMap),c=o.map(m=>m.id);let h=await a[0].data();a.forEach(m=>{!m.kept&&c.indexOf(m.id)===-1&&m.dispose()});let d=o;for(;h[0];){const m=d;d=await n.functionMap[s].executeFunctionAsync(d,n.tensorArrayMap,n.tensorListMap);const f=d.map(w=>w.id);m.forEach(w=>{!w.kept&&c.indexOf(w.id)===-1&&f.indexOf(w.id)===-1&&w.dispose()});const b=await n.functionMap[i].executeFunctionAsync(d,n.tensorArrayMap,n.tensorListMap);h=await b[0].data(),b.forEach(w=>{!w.kept&&c.indexOf(w.id)===-1&&f.indexOf(w.id)===-1&&w.dispose()})}return d}case"LoopCond":{const s=R("pred",e,t,n);return[ar(s)]}case"Switch":{const s=R("pred",e,t,n);let i=R("data",e,t,n);return i.kept||(i=ar(i)),(await s.data())[0]?[void 0,i]:[i,void 0]}case"Merge":{const s=e.inputNames.find(i=>ss(i,t,n)!==void 0);if(s){const i=ss(s,t,n);return[ar(i)]}return}case"Enter":{const s=R("frameName",e,t,n),i=R("tensor",e,t,n);return n.enterFrame(s),[ar(i)]}case"Exit":{const s=R("tensor",e,t,n);return n.exitFrame(),[ar(s)]}case"NextIteration":{const s=R("tensor",e,t,n);return n.nextIteration(),[ar(s)]}case"TensorArrayV3":{const s=R("size",e,t,n),i=R("dtype",e,t,n),o=R("elementShape",e,t,n),a=R("dynamicSize",e,t,n),c=R("clearAfterRead",e,t,n),h=R("identicalElementShapes",e,t,n),d=R("name",e,t,n),m=new $Y(d,i,s,o,h,a,c);return n.addTensorArray(m),[m.idTensor,Ce(1)]}case"TensorArrayWriteV3":{const s=R("tensorArrayId",e,t,n),i=R("index",e,t,n),o=R("tensor",e,t,n),a=n.getTensorArray(s.id);return a.write(i,o),[a.idTensor]}case"TensorArrayReadV3":{const s=R("tensorArrayId",e,t,n),i=R("index",e,t,n),o=n.getTensorArray(s.id);return[o.read(i)]}case"TensorArrayGatherV3":{const s=R("tensorArrayId",e,t,n),i=R("indices",e,t,n),o=R("dtype",e,t,n),a=n.getTensorArray(s.id);return[a.gather(i,o)]}case"TensorArrayScatterV3":{const s=R("tensorArrayId",e,t,n),i=R("indices",e,t,n),o=R("tensor",e,t,n),a=n.getTensorArray(s.id);return a.scatter(i,o),[a.idTensor]}case"TensorArrayConcatV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id),o=R("dtype",e,t,n);return[i.concat(o)]}case"TensorArraySplitV3":{const s=R("tensorArrayId",e,t,n),i=R("tensor",e,t,n),o=R("lengths",e,t,n),a=n.getTensorArray(s.id);return a.split(o,i),[a.idTensor]}case"TensorArraySizeV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id);return[Ce(i.size(),"int32")]}case"TensorArrayCloseV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id);return i.clearAndClose(),[i.idTensor]}case"TensorListSetItem":{const s=R("tensorListId",e,t,n),i=R("index",e,t,n),o=R("tensor",e,t,n),a=n.getTensorList(s.id);return a.setItem(i,o),[a.idTensor]}case"TensorListGetItem":{const s=R("tensorListId",e,t,n),i=R("index",e,t,n),o=R("elementShape",e,t,n),a=R("elementDType",e,t,n),c=n.getTensorList(s.id);return[c.getItem(i,o,a)]}case"TensorListScatterV2":case"TensorListScatter":{const s=R("indices",e,t,n),i=R("tensor",e,t,n),o=R("elementShape",e,t,n),a=R("numElements",e,t,n),c=MY(i,s,o,a);return n.addTensorList(c),[c.idTensor]}case"TensorListReserve":{const s=R("elementShape",e,t,n),i=R("elementDType",e,t,n),o=R("numElements",e,t,n),a=BY(s,i,o);return n.addTensorList(a),[a.idTensor]}case"TensorListGather":{const s=R("tensorListId",e,t,n),i=R("indices",e,t,n),o=R("elementShape",e,t,n),a=R("elementDType",e,t,n),c=n.getTensorList(s.id);return[c.gather(i,a,o)]}case"TensorListStack":{const s=R("tensorListId",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=R("numElements",e,t,n),c=n.getTensorList(s.id);return[c.stack(i,o,a)]}case"TensorListFromTensor":{const s=R("tensor",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=UY(s,i,o);return n.addTensorList(a),[a.idTensor]}case"TensorListConcat":{const s=R("tensorListId",e,t,n),i=n.getTensorList(s.id),o=R("dtype",e,t,n),a=R("elementShape",e,t,n);return[i.concat(o,a)]}case"TensorListPushBack":{const s=R("tensorListId",e,t,n),i=R("tensor",e,t,n),o=n.getTensorList(s.id);return o.pushBack(i),[o.idTensor]}case"TensorListPopBack":{const s=R("tensorListId",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=n.getTensorList(s.id);return[a.popBack(i,o)]}case"TensorListSplit":{const s=R("tensor",e,t,n),i=R("elementShape",e,t,n),o=R("lengths",e,t,n),a=PY(s,o,i);return n.addTensorList(a),[a.idTensor]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Ete="control";function VN(e,t,n){const[s,i]=R("fusedOps",e,t,n),o=s==="biasadd",a=i==="prelu",c=s==="fusedbatchnorm",h=R("numArgs",e,t,n);if(o){if(a&&h!==2)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!a&&h!==1)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd must have one extra argument: bias.")}if(c)throw new Error("FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported.");const d=R("strides",e,t,n),m=Sm(e,t,n),f=R("dataFormat",e,t,n).toUpperCase(),b=R("dilations",e,t,n),[w,L]=R("args",e,t,n);return{stride:d,pad:m,dataFormat:f,dilations:b,biasArg:w,preluArg:L,activationFunc:i}}const VY=(e,t,n)=>{switch(e.op){case"Conv1D":{const s=R("stride",e,t,n),i=R("pad",e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilation",e,t,n);return[ip(R("x",e,t,n),R("filter",e,t,n),s,i,o,a)]}case"Conv2D":{const s=R("strides",e,t,n),i=Sm(e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilations",e,t,n);return[Ji(R("x",e,t,n),R("filter",e,t,n),[s[1],s[2]],i,o,[a[1],a[2]])]}case"_FusedConv2D":{const{stride:s,pad:i,dataFormat:o,dilations:a,biasArg:c,preluArg:h,activationFunc:d}=VN(e,t,n);return[Kb({x:R("x",e,t,n),filter:R("filter",e,t,n),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[a[1],a[2]],bias:c,activation:d,preluActivationWeights:h})]}case"FusedDepthwiseConv2dNative":{const{stride:s,pad:i,dataFormat:o,dilations:a,biasArg:c,preluArg:h,activationFunc:d}=VN(e,t,n);return[$A({x:R("x",e,t,n),filter:R("filter",e,t,n),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[a[1],a[2]],bias:c,activation:d,preluActivationWeights:h})]}case"Conv2DBackpropInput":case"Conv2dTranspose":{const s=R("outputShape",e,t,n),i=R("strides",e,t,n),o=Sm(e,t,n);return[rp(R("x",e,t,n),R("filter",e,t,n),s,[i[1],i[2]],o)]}case"DepthwiseConv2dNative":case"DepthwiseConv2d":{const s=R("strides",e,t,n),i=Sm(e,t,n),o=R("dilations",e,t,n),a=R("dataFormat",e,t,n).toUpperCase();return[Co(R("input",e,t,n),R("filter",e,t,n),[s[1],s[2]],i,a,[o[1],o[2]])]}case"Conv3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilations",e,t,n);return[Lb(R("x",e,t,n),R("filter",e,t,n),[s[1],s[2],s[3]],i,o,[a[1],a[2],a[3]])]}case"AvgPool":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[ah(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPool":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[fh(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPoolWithArgmax":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n),a=R("includeBatchInIndex",e,t,n),{result:c,indexes:h}=lA(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i,a);return[c,h]}case"AvgPool3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[yb(R("x",e,t,n),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"MaxPool3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[Ob(R("x",e,t,n),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"Dilation2D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("dilations",e,t,n),a=s[1],c=s[2],h=o[1],d=o[2];return[Ib(R("x",e,t,n),R("filter",e,t,n),[a,c],i,[h,d],"NHWC")]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Dte="convolution";const GY=(e,t,n)=>{switch(e.op){case"Fill":{const s=R("shape",e,t,n),i=R("dtype",e,t,n),o=R("value",e,t,n);return[Ba(s,o,i)]}case"LinSpace":{const s=R("start",e,t,n),i=R("stop",e,t,n),o=R("num",e,t,n);return[oA(s,i,o)]}case"Multinomial":{const s=R("logits",e,t,n),i=R("numSamples",e,t,n),o=R("seed",e,t,n);return[hA(s,i,o)]}case"OneHot":{const s=R("indices",e,t,n),i=R("depth",e,t,n),o=R("onValue",e,t,n),a=R("offValue",e,t,n);return[To(s,i,o,a)]}case"Ones":return[Js(R("shape",e,t,n),R("dtype",e,t,n))];case"OnesLike":return[Fn(R("x",e,t,n))];case"RandomUniform":return[ko(R("shape",e,t,n),R("minval",e,t,n),R("maxval",e,t,n),R("dtype",e,t,n))];case"Range":{const s=R("start",e,t,n),i=R("stop",e,t,n),o=R("step",e,t,n);return[bh(s,i,o,R("dtype",e,t,n))]}case"TruncatedNormal":{const s=R("shape",e,t,n),i=R("mean",e,t,n),o=R("stdDev",e,t,n),a=R("seed",e,t,n);return[xh(s,i,o,R("dtype",e,t,n),a)]}case"Zeros":return[dt(R("shape",e,t,n),R("dtype",e,t,n))];case"ZerosLike":return[et(R("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},kte="creation";function sS(e,t,n){const s=R("boxes",e,t,n),i=R("scores",e,t,n),o=R("maxOutputSize",e,t,n),a=R("iouThreshold",e,t,n),c=R("scoreThreshold",e,t,n),h=R("softNmsSigma",e,t,n);return{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}}const YY=async(e,t,n)=>{switch(e.op){case"NonMaxSuppressionV5":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}=sS(e,t,n),d=await zr.nonMaxSuppressionWithScoreAsync(s,i,o,a,c,h);return[d.selectedIndices,d.selectedScores]}case"NonMaxSuppressionV4":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=sS(e,t,n),h=R("padToMaxOutputSize",e,t,n),d=await zr.nonMaxSuppressionPaddedAsync(s,i,o,a,c,h);return[d.selectedIndices,d.validOutputs]}case"NonMaxSuppressionV3":case"NonMaxSuppressionV2":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=sS(e,t,n);return[await zr.nonMaxSuppressionAsync(s,i,o,a,c)]}case"Where":{const s=Ae(R("condition",e,t,n),"bool"),i=[await Yb(s)];return s.dispose(),i}case"ListDiff":return dA(R("x",e,t,n),R("y",e,t,n));default:throw TypeError(`Node type ${e.op} is not implemented`)}},Fte="dynamic";const HY=(e,t,n)=>{switch(e.op){case"TopKV2":{const s=R("x",e,t,n),i=R("k",e,t,n),o=R("sorted",e,t,n),a=Vb(s,i,o);return[a.values,a.indices]}case"Unique":{const s=R("x",e,t,n),i=Tp(s);return[i.values,i.indices]}case"UniqueV2":{const s=R("x",e,t,n),i=R("axis",e,t,n),o=Tp(s,i);return[o.values,o.indices]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},_te="evaluation";const qY=(e,t,n)=>{switch(e.op){case"Const":return t[e.name];case"PlaceholderWithDefault":const s=R("default",e,t,n);return[ss(e.name,t,n)||s];case"Placeholder":return[ss(e.name,t,n)];case"Identity":case"StopGradient":case"FakeQuantWithMinMaxVars":{const d=R("x",e,t,n);return[ar(d)]}case"IdentityN":return R("x",e,t,n).map(d=>ar(d));case"Snapshot":const i=R("x",e,t,n);return[ar(i)];case"Shape":return[ls(R("x",e,t,n).shape,"int32")];case"ShapeN":return R("x",e,t,n).map(d=>ls(d.shape));case"Size":return[Ce(R("x",e,t,n).size,"int32")];case"Rank":return[Ce(R("x",e,t,n).rank,"int32")];case"NoOp":return[Ce(1)];case"Print":const o=R("x",e,t,n),a=R("data",e,t,n),c=R("message",e,t,n),h=R("summarize",e,t,n);console.warn("The graph has a tf.print() operation,usually used for debugging, which slows down performance."),console.log(c);for(let d=0;de.dispose()),this.tensorMap.clear(),this.handle.dispose()}size(){return this.tensorMap.size}async import(e,t){this.checkKeyAndValueTensor(e,t);const n=await e.data();return this.tensorMap.forEach(s=>s.dispose()),this.tensorMap.clear(),Q(()=>{const s=Qs(t),i=n.length,o=s.length;A(i===o,()=>`The number of elements doesn't match, keys has ${i} elements, the values has ${o} elements.`);for(let a=0;a{const s=[];for(let i=0;i{switch(e.op){case"HashTable":case"HashTableV2":{const i=R("keyDType",e,t,n),o=R("valueDType",e,t,n),a=new jY(i,o);return s.addHashTable(e.name,a),[a.handle]}case"LookupTableImport":case"LookupTableImportV2":{const i=R("tableHandle",e,t,n,s),o=R("keys",e,t,n),a=R("values",e,t,n),c=s.getHashTableById(i.id);return[await c.import(o,a)]}case"LookupTableFind":case"LookupTableFindV2":{const i=R("tableHandle",e,t,n,s),o=R("keys",e,t,n),a=R("defaultValue",e,t,n),c=s.getHashTableById(i.id);return[await c.find(o,a)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},$te="hash_table";const XY=(e,t,n)=>{switch(e.op){case"ResizeBilinear":{const s=R("images",e,t,n),i=R("size",e,t,n),o=R("alignCorners",e,t,n);return[zr.resizeBilinear(s,[i[0],i[1]],o)]}case"ResizeNearestNeighbor":{const s=R("images",e,t,n),i=R("size",e,t,n),o=R("alignCorners",e,t,n);return[zr.resizeNearestNeighbor(s,[i[0],i[1]],o)]}case"CropAndResize":{const s=R("image",e,t,n),i=R("boxes",e,t,n),o=R("boxInd",e,t,n),a=R("cropSize",e,t,n),c=R("method",e,t,n),h=R("extrapolationValue",e,t,n);return[zr.cropAndResize(s,i,o,a,c,h)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Ute="image";const JY=(e,t,n)=>{switch(e.op){case"Equal":return[Xs(R("a",e,t,n),R("b",e,t,n))];case"NotEqual":return[Br(R("a",e,t,n),R("b",e,t,n))];case"Greater":return[xs(R("a",e,t,n),R("b",e,t,n))];case"GreaterEqual":return[Zi(R("a",e,t,n),R("b",e,t,n))];case"Less":return[ph(R("a",e,t,n),R("b",e,t,n))];case"LessEqual":return[Ur(R("a",e,t,n),R("b",e,t,n))];case"LogicalAnd":return[Us(R("a",e,t,n),R("b",e,t,n))];case"LogicalNot":return[mh(R("a",e,t,n))];case"LogicalOr":return[pp(R("a",e,t,n),R("b",e,t,n))];case"Select":case"SelectV2":return[Bn(R("condition",e,t,n),R("a",e,t,n),R("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Bte="logical";const ZY=(e,t,n)=>{switch(e.op){case"BatchMatMul":case"BatchMatMulV2":case"MatMul":return[ct(R("a",e,t,n),R("b",e,t,n),R("transposeA",e,t,n),R("transposeB",e,t,n))];case"Transpose":return[Ye(R("x",e,t,n),R("perm",e,t,n))];case"_FusedMatMul":const[s,i]=R("fusedOps",e,t,n),o=s==="biasadd",a=i==="prelu",c=R("numArgs",e,t,n);if(o){if(a&&c!==2)throw new Error("Fused MatMul with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!a&&c!==1)throw new Error("Fused MatMul with BiasAdd must have one extra argument: bias.")}const[h,d]=R("args",e,t,n);return[Ep({a:R("a",e,t,n),b:R("b",e,t,n),transposeA:R("transposeA",e,t,n),transposeB:R("transposeB",e,t,n),bias:h,activation:i,preluActivationWeights:d})];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Mte="matrices";const QY=(e,t,n)=>{switch(e.op){case"FusedBatchNorm":case"FusedBatchNormV2":return[No(R("x",e,t,n),R("mean",e,t,n),R("variance",e,t,n),R("offset",e,t,n),R("scale",e,t,n),R("epsilon",e,t,n))];case"FusedBatchNormV3":return[No(R("x",e,t,n),R("mean",e,t,n),R("variance",e,t,n),R("offset",e,t,n),R("scale",e,t,n),R("epsilon",e,t,n))];case"LRN":return[Nb(R("x",e,t,n),R("radius",e,t,n),R("bias",e,t,n),R("alpha",e,t,n),R("beta",e,t,n))];case"Softmax":return[Fo(R("x",e,t,n))];case"LogSoftmax":return[dp(R("x",e,t,n))];case"SparseToDense":return[Hb(R("sparseIndices",e,t,n),R("outputShape",e,t,n),R("sparseValues",e,t,n),R("defaultValue",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Pte="normalization";const eH=(e,t,n)=>{switch(e.op){case"Max":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Qn(R("x",e,t,n),s,i)]}case"Mean":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[qt(R("x",e,t,n),s,i)]}case"Min":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Va(R("x",e,t,n),s,i)]}case"Sum":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[$e(R("x",e,t,n),s,i)]}case"All":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Qd(R("x",e,t,n),s,i)]}case"Any":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[ih(R("x",e,t,n),s,i)]}case"ArgMax":{const s=R("axis",e,t,n);return[rh(R("x",e,t,n),s)]}case"ArgMin":{const s=R("axis",e,t,n);return[lb(R("x",e,t,n),s)]}case"Prod":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[gp(R("x",e,t,n),s,i)]}case"Cumsum":{const s=R("axis",e,t,n),i=R("exclusive",e,t,n),o=R("reverse",e,t,n);return[ap(R("x",e,t,n),s,i,o)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},zte="reduction";const tH=(e,t,n)=>{switch(e.op){case"ConcatV2":case"Concat":{const s=R("n",e,t,n),i=R("axis",e,t,n);let o=R("tensors",e,t,n);return o=o.slice(0,s),[Yt(o,i)]}case"GatherV2":case"Gather":{const s=R("axis",e,t,n),i=R("x",e,t,n),o=R("indices",e,t,n);return[Pa(i,Ae(o,"int32"),s)]}case"ReverseV2":case"Reverse":{const s=R("axis",e,t,n),i=R("x",e,t,n);return[Ts(i,s)]}case"Slice":{const s=R("begin",e,t,n),i=R("size",e,t,n);return[tt(R("x",e,t,n),s,i)]}case"StridedSlice":{const s=R("begin",e,t,n),i=R("end",e,t,n),o=R("strides",e,t,n),a=R("beginMask",e,t,n),c=R("endMask",e,t,n),h=R("ellipsisMask",e,t,n),d=R("newAxisMask",e,t,n),m=R("shrinkAxisMask",e,t,n),f=R("x",e,t,n);return[Pb(f,s,i,o,a,c,h,d,m)]}case"Pack":return Q(()=>{const s=R("axis",e,t,n),i=R("tensors",e,t,n),o=i[0].shape,a=Mr(i[0]).shape,c=i.map(h=>{const d=ae(h.shape,o);if(!d&&!ae(Mr(h).shape,a))throw new Error("the input tensors shape does not match");return d?h:K(h,o)});return[es(c,s)]});case"Unpack":{const s=R("axis",e,t,n),i=R("tensor",e,t,n);return Qs(i,s)}case"Tile":{const s=R("reps",e,t,n);return[$r(R("x",e,t,n),s)]}case"Split":case"SplitV":{const s=R("axis",e,t,n),i=R("numOrSizeSplits",e,t,n),o=R("x",e,t,n);return hs(o,i,s)}case"ScatterNd":{const s=R("indices",e,t,n),i=R("values",e,t,n),o=R("shape",e,t,n);return[EA(s,i,o)]}case"GatherNd":{const s=R("x",e,t,n),i=R("indices",e,t,n);return[DA(s,i)]}case"SparseToDense":{const s=R("sparseIndices",e,t,n),i=R("outputShape",e,t,n),o=R("sparseValues",e,t,n),a=R("defaultValue",e,t,n);return[Hb(s,o,i,o.dtype===a.dtype?a:Ae(a,o.dtype))]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Vte="slice_join";const nH=(e,t,n)=>{switch(e.op){case"FFT":return[Lh(R("x",e,t,n))];case"IFFT":return[qa(R("x",e,t,n))];case"RFFT":return[Sh(R("x",e,t,n))];case"IRFFT":return[xp(R("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Gte="spectral";const sH=(e,t,n)=>{switch(e.op){case"Cast":return[Ae(R("x",e,t,n),R("dtype",e,t,n))];case"ExpandDims":{const s=R("axis",e,t,n);return[Zn(R("x",e,t,n),s)]}case"Squeeze":{const s=R("axis",e,t,n);return[Mr(R("x",e,t,n),s)]}case"Reshape":return[K(R("x",e,t,n),R("shape",e,t,n))];case"MirrorPad":return[Eb(R("x",e,t,n),R("padding",e,t,n),R("mode",e,t,n))];case"PadV2":case"Pad":return[vi(R("x",e,t,n),R("padding",e,t,n),R("constantValue",e,t,n))];case"SpaceToBatchND":{const s=R("blockShape",e,t,n),i=R("paddings",e,t,n);return[gh(R("x",e,t,n),s,i)]}case"BatchToSpaceND":{const s=R("blockShape",e,t,n),i=R("crops",e,t,n);return[ch(R("x",e,t,n),s,i)]}case"DepthToSpace":{const s=R("blockSize",e,t,n),i=R("dataFormat",e,t,n).toUpperCase();return[Sb(R("x",e,t,n),s,i)]}case"BroadcastTo":return[lh(R("x",e,t,n),R("shape",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Yte="transformation";function GN(e,t,n,s){const i=((o,a,c)=>{switch(o.category){case"arithmetic":return Q(()=>FY(o,a,c));case"basic_math":return Q(()=>_Y(o,a,c));case"control":return zY(o,a,c);case"convolution":return Q(()=>VY(o,a,c));case"creation":return Q(()=>GY(o,a,c));case"dynamic":return YY(o,a,c);case"evaluation":return Q(()=>HY(o,a,c));case"image":return Q(()=>XY(o,a,c));case"graph":return Q(()=>qY(o,a,c));case"logical":return Q(()=>JY(o,a,c));case"matrices":return Q(()=>ZY(o,a,c));case"normalization":return Q(()=>QY(o,a,c));case"reduction":return Q(()=>eH(o,a,c));case"slice_join":return Q(()=>tH(o,a,c));case"spectral":return Q(()=>nH(o,a,c));case"transformation":return Q(()=>sH(o,a,c));case"hash_table":return KY(o,a,c,s);case"custom":const h=UN(o.op);if(h&&h.customExecutor)return h.customExecutor(new kY(o,a,c));throw TypeError(`Custom op ${o.op} is not registered.`);default:throw TypeError(`Unknown op '${o.op}'. File an issue at https://github.com/tensorflow/tfjs/issues so we can add it, or register a custom execution with tf.registerOp()`)}})(e,t,n);return bo(i)?i.then(o=>[].concat(o)):[].concat(i)}class YN{constructor(e={},t={},n={},s={}){this.weightMap=e,this.tensorArrayMap=t,this.tensorListMap=n,this.functionMap=s,this.rootContext={id:0,frameName:"",iterationId:0},this.contexts=[this.rootContext],this.lastId=0,this.generateCurrentContextIds()}newFrame(e,t){return{id:e,frameName:t,iterationId:0}}set currentContext(e){this.contexts!==e&&(this.contexts=e,this.generateCurrentContextIds())}get currentContext(){return this.contexts}get currentContextId(){return this._currentContextIds[0]}get currentContextIds(){return this._currentContextIds}generateCurrentContextIds(){const e=[];for(let t=0;tt.id===0&&t.iterationId===0?"":`${t.frameName}-${t.iterationId}`).join("/"):""}enterFrame(e){this.contexts&&(this.lastId++,this.contexts=this.contexts.slice(),this.contexts.push(this.newFrame(this.lastId,e)),this._currentContextIds.unshift(this.contextIdforContexts(this.contexts)))}exitFrame(){if(this.contexts&&this.contexts.length>1)this.contexts=this.contexts.slice(),this.contexts.splice(-1),this.currentContextIds.shift();else throw new Error("Cannot exit frame, the context is empty")}nextIteration(){if(this.contexts&&this.contexts.length>0){this.contexts=this.contexts.slice(),this.lastId++;const e=Object.assign({},this.contexts[this.contexts.length-1]);e.iterationId+=1,e.id=this.lastId,this.contexts.splice(-1,1,e),this._currentContextIds.splice(0,1,this.contextIdforContexts(this.contexts))}else throw new Error("Cannot increase frame iteration, the context is empty")}getWeight(e){return this.weightMap[e]}addTensorArray(e){this.tensorArrayMap[e.id]=e}getTensorArray(e){return this.tensorArrayMap[e]}addTensorList(e){this.tensorListMap[e.id]=e}getTensorList(e){return this.tensorListMap[e]}dispose(e){for(const t in this.tensorArrayMap)this.tensorArrayMap[t].clearAndClose(e);for(const t in this.tensorListMap)this.tensorListMap[t].clearAndClose(e)}}function HN(e,t,n,s){const i=new Set,o=[];let a=null,c=null;const h=new Set,d=Object.keys(e).map(b=>ds(b)[0]);let m=[];s!=null&&(m=s.map(b=>ds(b.name)[0]));const f=[...t];for(;f.length>0;){const b=f.pop();if((qN(b)||cH(b)||lH(b))&&(a==null&&(a=b,c=a.children.map(w=>w.name).filter(w=>i.has(w)))),i.add(b.name),n[b.name]!=null)continue;if(d.indexOf(b.name)!==-1)continue;if(m.indexOf(b.name)!==-1)continue;if(b.inputs.length===0){o.push(b.name);continue}b.inputs.forEach(w=>{if(h.has(w.name))return;h.add(w.name),f.push(w)})}return{inputs:e,outputs:t,usedNodes:i,missingInputs:o,dynamicNode:a,syncInputs:c}}function iH(e,t,n){const{usedNodes:s,inputs:i}=n,o=[],a=Object.keys(i).map(m=>ds(m)[0]).map(m=>e.nodes[m]),c=e.initNodes;a.forEach(m=>{s.has(m.name)&&o.push(m)}),e.weights.forEach(m=>{s.has(m.name)&&o.push(m)}),c!=null&&c.forEach(m=>{s.has(m.name)&&o.push(m)});const h=new Set,d=[];for(;o.length>0;){const m=o.pop();h.add(m.name),t[m.name]||d.push(m),m.children.forEach(f=>{!h.has(f.name)&&s.has(f.name)&&f.inputs.every(b=>h.has(b.name))&&o.push(f)})}return d}const rH=["Switch","Merge","Enter","Exit","NextIteration","StatelessIf","StatelessWhile","if","While"],oH=["NonMaxSuppressionV2","NonMaxSuppressionV3","NonMaxSuppressionV5","Where"],aH=["HashTable","HashTableV2","LookupTableImport","LookupTableImportV2","LookupTableFind","LookupTableFindV2"];function qN(e){return rH.indexOf(e.op)>=0}function cH(e){return oH.indexOf(e.op)>=0}function lH(e){return aH.indexOf(e.op)>=0}class iS{constructor(e,t){this.graph=e,this.parent=t,this.compiledMap=new Map,this._weightMap={},this.SEPERATOR=",",this._functions={},this._functionExecutorMap={},this._outputs=e.outputs,this._inputs=e.inputs,this._initNodes=e.initNodes,this._signature=e.signature,this._functions=e.functions,e.functions!=null&&Object.keys(e.functions).forEach(n=>{this._functionExecutorMap[n]=new iS(e.functions[n],this)})}get weightIds(){return this.parent?this.parent.weightIds:this._weightIds}get functionExecutorMap(){return this.parent?this.parent.functionExecutorMap:this._functionExecutorMap}get weightMap(){return this.parent?this.parent.weightMap:this._weightMap}set weightMap(e){const t=Object.keys(e).map(n=>e[n].map(s=>s.id));this._weightIds=[].concat(...t),this._weightMap=e}set resourceManager(e){this._resourceManager=e}get inputs(){return this._inputs.map(e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0}))}get outputs(){return this._outputs.map(e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0}))}get inputNodes(){return this._inputs.map(e=>e.signatureKey||e.name)}get outputNodes(){return this._outputs.map(e=>{const t=e.signatureKey||e.name;return e.defaultOutput?`${t}:${e.defaultOutput}`:t})}get functions(){return Object.keys(this._functions).reduce((e,t)=>(e[t]=this._functions[t].signature,e),{})}getCompilationKey(e,t){const n=e.map(i=>i.name).sort(),s=t.map(i=>i.name).sort();return n.join(this.SEPERATOR)+"--"+s.join(this.SEPERATOR)}compile(e,t){const n=HN(e,t,this.weightMap,this._initNodes),{missingInputs:s,dynamicNode:i,syncInputs:o}=n;if(i!=null)throw new Error(`This execution contains the node '${i.name}', which has the dynamic op '${i.op}'. Please use model.executeAsync() instead. Alternatively, to avoid the dynamic ops, specify the inputs [${o}]`);if(s.length>0){const a=t.map(h=>h.name),c=Object.keys(e);throw new Error(`Cannot compute the outputs [${a}] from the provided inputs [${c}]. Missing the following inputs: [${s}]`)}return iH(this.graph,this.weightMap,n)}execute(e,t){e=this.mapInputs(e);const n=Object.keys(e).sort();this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t);const s=n.map(m=>this.graph.nodes[ds(m)[0]]),i=t.map(m=>ds(m)[0]);let o=i.map(m=>this.graph.nodes[m]);o.length===0&&(o=this._outputs);const a=this.getCompilationKey(s,o);let c=this.compiledMap.get(a);c==null&&(c=this.compile(e,o),this.compiledMap.set(a,c));const h={},d={};return Q(()=>{const m=new YN(this.weightMap,h,d,this.functionExecutorMap),f=Object.assign({},this.weightMap);Object.keys(e).forEach(L=>{const[x,v]=ds(L),N=[];N[v]=e[L],f[x]=N});const b=this.getFrozenTensorIds(f),w={};for(let L=0;Lss(L,f,m))})}getFrozenTensorIds(e){const t=[].concat.apply([],Object.keys(e).map(n=>e[n]).map(n=>n.map(s=>s.id)));return new Set(t)}checkTensorForDisposal(e,t,n,s,i,o,a){if(t.category==="control"||o.indexOf(e)!==-1)return;n[e].forEach(c=>{c!=null&&(a[c.id]=(a[c.id]||0)+t.children.length)}),t.inputs.forEach(c=>{if(c.category!=="control"){const h=JG(c.name,n,s);h!=null&&h.forEach(d=>{if(d&&!i.has(d.id)){const m=a[d.id];m===1?(d.dispose(),delete a[d.id]):m!=null&&a[d.id]--}})}})}async executeAsync(e,t){return this._executeAsync(e,t)}async _executeAsync(e,t,n=!1,s={},i={}){n||(e=this.mapInputs(e),this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t));const o=new YN(this.weightMap,s,i,this.functionExecutorMap),a=await this.executeWithControlFlow(e,o,t,n),c=t.map(f=>ss(f,a,o)),h=c.map(f=>f.id),d=Object.keys(e).map(f=>e[f].id),m=new Set([...h,...d,...this.weightIds]);return Object.keys(a).forEach(f=>{const b=a[f];b.forEach(w=>{w&&!w.isDisposed&&!m.has(w.id)&&w.dispose()})}),this.parent==null&&o.dispose(m),c}async executeFunctionAsync(e,t,n){const s=e.reduce((i,o,a)=>(i[this.inputs[a].name]=o,i),{});return this._executeAsync(s,this.outputNodes,!0,t,n)}async executeWithControlFlow(e,t,n,s){const i=Object.keys(e),o=i.map(O=>this.graph.nodes[ds(O)[0]]),a=n.map(O=>ds(O)[0]);let c=a.map(O=>this.graph.nodes[O]);c.length===0&&(c=this._outputs);const{usedNodes:h,missingInputs:d,dynamicNode:m,syncInputs:f}=HN(e,c,this.weightMap,this._initNodes),b=[...o,...this.graph.weights,...this._initNodes||[]].map(O=>({node:O,contexts:t.currentContext})),w=Object.assign({},this.weightMap);Object.keys(e).forEach(O=>{const[E,k]=ds(O),F=[];F[k]=e[O],w[E]=F});const L={},x=this.getFrozenTensorIds(w),v={};for(;b.length>0;){const O=this.processStack(o,b,t,w,v,x,a,L,h);await Promise.all(O)}m==null&&!s&&console.warn("This model execution did not contain any nodes with control flow or dynamic output shapes. You can use model.execute() instead.");const N=c.filter(O=>!qN(O)&&!ss(O.name,w,t)).map(O=>O.name);if(N.length>0){let O="";throw m!=null&&(O=`Alternatively, to avoid the dynamic ops, use model.execute() and specify the inputs [${f}]`),new Error(`Cannot compute the outputs [${N}] from the provided inputs [${i}]. Consider providing the following inputs: [${d}]. ${O}`)}return w}processStack(e,t,n,s,i,o,a,c,h){const d=[];for(;t.length>0;){const m=t.pop();n.currentContext=m.contexts;let f="";if(m.node.op==="Enter"&&R("isConstant",m.node,s,n)&&([f]=or(m.node.name,n)),s[m.node.name]==null){const b=GN(m.node,s,n,this._resourceManager);f||([f]=or(m.node.name,n));const w=n.currentContext;bo(b)?d.push(b.then(L=>(s[f]=L,n.currentContext=w,this.checkTensorForDisposal(f,m.node,s,n,o,a,c),this.processChildNodes(m.node,t,n,s,i,h),L))):(s[f]=b,this.checkTensorForDisposal(f,m.node,s,n,o,a,c),this.processChildNodes(m.node,t,n,s,i,h))}else this.processChildNodes(m.node,t,n,s,i,h)}return d}processChildNodes(e,t,n,s,i,o){e.children.forEach(a=>{const[c]=or(a.name,n);if(i[c]||!o.has(a.name))return;a.op==="Merge"?a.inputNames.some(h=>!!ss(h,s,n))&&(i[c]=!0,t.push({contexts:n.currentContext,node:a})):a.inputNames.every(h=>!!ss(h,s,n))&&(i[c]=!0,t.push({contexts:n.currentContext,node:a}))})}dispose(){Object.keys(this.weightMap).forEach(e=>this.weightMap[e].forEach(t=>t.dispose()))}checkInputShapeAndType(e){Object.keys(e).forEach(t=>{const n=e[t],[s]=ds(t),i=this.graph.nodes[s];if(i.attrParams.shape&&i.attrParams.shape.value){const o=i.attrParams.shape.value,a=o.length===n.shape.length&&n.shape.every((c,h)=>o[h]===-1||o[h]===c);A(a,()=>`The shape of dict['${i.name}'] provided in model.execute(dict) must be [${o}], but was [${n.shape}]`)}i.attrParams.dtype&&i.attrParams.dtype.value&&A(n.dtype===i.attrParams.dtype.value,()=>`The dtype of dict['${i.name}'] provided in model.execute(dict) must be ${i.attrParams.dtype.value}, but was ${n.dtype}`)})}mapInputs(e){const t={};for(const n in e)if(this._signature!=null&&this._signature.inputs!=null&&this._signature.inputs[n]!=null){const s=this._signature.inputs[n];t[s.name]=e[n]}else t[n]=e[n];return t}checkInputs(e){const t=Object.keys(e).filter(n=>{const[s]=ds(n);return this.graph.nodes[s]==null});if(t.length>0)throw new Error(`The dict provided in model.execute(dict) has keys: [${t}] that are not part of graph`)}mapOutputs(e){return e.map(t=>{if(this._signature!=null&&this._signature.outputs!=null&&this._signature.outputs[t]!=null){const n=this._signature.outputs[t];return n.name}return t},{})}checkOutputs(e){e.forEach(t=>{const[n]=ds(t);if(!this.graph.nodes[n])throw new Error(`The output '${t}' is not found in the graph`)})}}class hH{constructor(e={},t={}){this.hashTableNameToHandle=e,this.hashTableMap=t}addHashTable(e,t){this.hashTableNameToHandle[e]=t.handle,this.hashTableMap[t.id]=t}getHashTableHandleByName(e){return this.hashTableNameToHandle[e]}getHashTableById(e){return this.hashTableMap[e]}dispose(){for(const e in this.hashTableMap)this.hashTableMap[e].clearAndClose(),delete this.hashTableMap[e];for(const e in this.hashTableNameToHandle)this.hashTableNameToHandle[e].dispose(),delete this.hashTableNameToHandle[e]}}const uH="?tfjs-format=file",dH="model.json";class jN{constructor(e,t={}){this.modelUrl=e,this.loadOptions=t,this.version="n/a",t==null&&(this.loadOptions={}),this.resourceManager=new hH}get modelVersion(){return this.version}get inputNodes(){return this.executor.inputNodes}get outputNodes(){return this.executor.outputNodes}get inputs(){return this.executor.inputs}get outputs(){return this.executor.outputs}get weights(){return this.executor.weightMap}findIOHandler(){const e=this.modelUrl;if(e.load!=null)this.handler=e;else if(this.loadOptions.requestInit!=null)this.handler=Yd(e,this.loadOptions);else{const t=Vy(e,this.loadOptions);if(t.length===0)t.push(Yd(e,this.loadOptions));else if(t.length>1)throw new Error(`Found more than one (${t.length}) load handlers for URL '${[e]}'`);this.handler=t[0]}}async load(){if(this.findIOHandler(),this.handler.load==null)throw new Error("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const e=await this.handler.load();return this.loadSync(e)}loadSync(e){this.artifacts=e;const t=this.artifacts.modelTopology;let n={};this.artifacts.userDefinedMetadata!=null&&(n=this.artifacts.userDefinedMetadata.signature),this.version=`${t.versions.producer}.${t.versions.minConsumer}`;const s=Pd(this.artifacts.weightData,this.artifacts.weightSpecs);if(this.executor=new iS(BN.Instance.transformGraph(t,n)),this.executor.weightMap=this.convertTensorMapToTensorsMap(s),this.executor.resourceManager=this.resourceManager,e.modelInitializer!=null){const i=BN.Instance.transformGraph(e.modelInitializer);this.initializer=new iS(i),this.initializer.weightMap=this.executor.weightMap,this.initializer.resourceManager=this.resourceManager,this.initializer.executeAsync({},[])}return!0}async save(e,t){if(typeof e=="string"){const n=zy(e);if(n.length===0)throw new Error(`Cannot find any save handlers for URL '${e}'`);if(n.length>1)throw new Error(`Found more than one (${n.length}) save handlers for URL '${e}'`);e=n[0]}if(e.save==null)throw new Error("GraphModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");return e.save(this.artifacts)}predict(e,t){return this.execute(e,this.outputNodes)}normalizeInputs(e){if(!(e instanceof ee)&&!Array.isArray(e))return e;if(e=Array.isArray(e)?e:[e],e.length!==this.inputNodes.length)throw new Error(`Input tensor count mismatch,the graph model has ${this.inputNodes.length} placeholders, while there are ${e.length} input tensors.`);return this.inputNodes.reduce((t,n,s)=>(t[n]=e[s],t),{})}normalizeOutputs(e){return e=e||this.outputNodes,Array.isArray(e)?e:[e]}execute(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=this.executor.execute(e,t);return n.length>1?n:n[0]}async executeAsync(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=await this.executor.executeAsync(e,t);return n.length>1?n:n[0]}convertTensorMapToTensorsMap(e){return Object.keys(e).reduce((t,n)=>(t[n]=[e[n]],t),{})}dispose(){this.executor.dispose(),this.initializer&&this.initializer.dispose(),this.resourceManager.dispose()}}async function pH(e,t={}){if(e==null)throw new Error("modelUrl in loadGraphModel() cannot be null. Please provide a url or an IOHandler that loads the model");t==null&&(t={}),t.fromTFHub&&(e.load==null&&(e.endsWith("/")||(e=e+"/"),e=`${e}${dH}${uH}`));const n=new jN(e,t);return await n.load(),n}const KN="2.7.0";function mH(e,t){return Im(e,t)}function Im(e,t,n=new Map,s=new Set){if(e==null)return null;if(s.has(e))throw new Error("Circular references are not supported.");if(n.has(e))return n.get(e);const i=t(e);if(i.recurse&&i.value!==null)throw new Error("A deep map function may not return both a value and recurse=true.");if(i.recurse)if(cc(e)){const o=Array.isArray(e)?[]:{};s.add(e);for(const a in e){const c=e[a],h=Im(c,t,n,s);o[a]=h}return s.delete(e),o}else throw new Error(`Can't recurse into non-iterable type: ${e}`);else return n.set(e,i.value),i.value}function fH(e,t=JN){return XN(e,t)}function XN(e,t,n=new Set){const s=e[0];if(n.has(s))throw new Error("Circular references are not supported.");const i=t(e);if(i.recurse&&i.value!==null)throw new Error("A deep zip function may not return both a value and recurse=true.");if(i.recurse)if(cc(s)){const o=Array.isArray(s)?[]:{};n.add(s);for(const a in s){const c=e.map(d=>d[a]),h=XN(c,t,n);o[a]=h}return n.delete(s),o}else throw new Error(`Can't recurse into non-iterable type: ${s}`);else return i.value}function JN(e){return e===null?null:cc(e[0])?{value:null,recurse:!0}:{value:e,recurse:!1}}async function ZN(e,t){const n=new Map;Im(e,t,n);for(const i of Array.from(n.keys())){const o=n.get(i);if(bo(o)){const a=await o;n.set(i,a)}}const s=Im(e,t,n);return s}function cc(e){return e!=null&&!ArrayBuffer.isView(e)&&(Array.isArray(e)||typeof e=="object"&&!(e instanceof ee))}function gH(e){return e==null||yH(e)||Array.isArray(e)||typeof e=="object"&&e instanceof ee||hn(e)}function yH(e){return e===null||typeof e!="object"&&typeof e!="function"}function bH(e){return mH(e,wH)}function wH(e){return e instanceof ee?{value:e.clone(),recurse:!1}:cc(e)?{value:null,recurse:!0}:{value:e,recurse:!1}}class QN{constructor(e){if(this.capacity=e,this.begin=0,this.end=0,e==null)throw new RangeError("Can't create a ring buffer of unknown capacity.");if(e<1)throw new RangeError("Can't create ring buffer of capacity < 1.");this.data=new Array(e),this.doubledCapacity=2*e}wrap(e){for(;e<0;)e+=this.doubledCapacity;return e%this.doubledCapacity}get(e){if(e<0)throw new RangeError("Can't get item at a negative index.");return this.data[e%this.capacity]}set(e,t){if(e<0)throw new RangeError("Can't set item at a negative index.");this.data[e%this.capacity]=t}length(){let e=this.end-this.begin;return e<0&&(e=this.doubledCapacity+e),e}isFull(){return this.length()===this.capacity}isEmpty(){return this.length()===0}push(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.set(this.end,e),this.end=this.wrap(this.end+1)}pushAll(e){for(const t of e)this.push(t)}pop(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");this.end=this.wrap(this.end-1);const e=this.get(this.end);return this.set(this.end,void 0),e}unshift(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.begin=this.wrap(this.begin-1),this.set(this.begin,e)}shift(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const e=this.get(this.begin);return this.set(this.begin,void 0),this.begin=this.wrap(this.begin+1),e}shuffleExcise(e){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const t=this.wrap(this.begin+e),n=this.get(t);return this.set(t,this.pop()),n}}class rS extends QN{constructor(){super(rS.INITIAL_CAPACITY)}isFull(){return!1}push(e){super.isFull()&&this.expand(),super.push(e)}unshift(e){super.isFull()&&this.expand(),super.unshift(e)}expand(){const e=this.capacity*2,t=new Array(e),n=this.length();for(let s=0;s({value:t++,done:!1}))}function su(e){return new IH(e)}function t0(e,t){return new s0(e,t)}function qte(e,t,n){return t0(su(e).take(t),n)}function LH(e,t=Jr.FAIL){return new EH(e,t)}class Sn{async toArray(){const e=[];let t=await this.next();for(;!t.done;)e.push(t.value),t=await this.next();return e}async toArrayForTest(){const e=this.prefetch(100),t=[];let n=await e.next();for(;!n.done;)t.push(n.value),n=await e.next();return t}async resolveFully(){let e=await this.next();for(;!e.done;)e=await this.next()}async resolveWhile(e){let t=await this.next(),n=e(t.value);for(;!t.done&&n;)t=await this.next(),n=e(t.value)}handleErrors(e){return new RH(this,e)}filter(e){return new NH(this,e)}map(e){return new CH(this,e)}mapAsync(e){return new n0(this,e)}serialMapAsync(e){return new n0(this,e).serial()}flatmap(e){return new OH(this,e)}async forEachAsync(e){return this.map(e).resolveFully()}async serialForEach(e){return this.serialMapAsync(e).resolveWhile(t=>t===!0)}rowMajorBatch(e,t=!0){return new vH(this,e,t)}columnMajorBatch(e,t=!0,n=JN){const s=this.rowMajorBatch(e,t);return s.map(i=>fH(i,n))}concatenate(e,t){return new s0(e0([this,e]),t)}take(e){return e<0||e==null?this:new AH(this,e)}skip(e){return e<0||e==null?this:new TH(this,e)}prefetch(e){return new i0(this,e)}shuffle(e,t){return new DH(this,e,t)}serial(){return new xH(this)}}class SH extends Sn{constructor(e){super();this.items=e,this.trav=0}summary(){return`Array of ${this.items.length} items`}async next(){if(this.trav>=this.items.length)return{value:null,done:!0};const e=this.items[this.trav];return this.trav++,{value:bH(e),done:!1}}}class IH extends Sn{constructor(e){super();this.nextFn=e}summary(){return"Function call"}async next(){try{return this.nextFn()}catch(e){throw e.message=`Error thrown while iterating through a dataset: ${e.message}`,e}}}class xH extends Sn{constructor(e){super();this.upstream=e,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Serial`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){return this.upstream.next()}}class TH extends Sn{constructor(e,t){super();this.upstream=e,this.maxCount=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Skip`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.count++ Take`}async next(){return this.count++>=this.maxCount?{value:null,done:!0}:this.upstream.next()}}class vH extends Sn{constructor(e,t,n=!0){super();this.upstream=e,this.batchSize=t,this.enableSmallLastBatch=n,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> RowMajorBatch`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){const e=[];for(;e.length0?{value:e,done:!1}:{value:null,done:!0};e.push(t.value)}return{value:e,done:!1}}}class NH extends Sn{constructor(e,t){super();this.upstream=e,this.predicate=t,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Filter`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;){const e=await this.upstream.next();if(e.done||this.predicate(e.value))return e;He(e.value)}}}class CH extends Sn{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Map`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=Hi(e.value),n=this.transform(e.value),s=Hi(n);for(const i of t)Bd(i,s)||i.dispose();return{value:n,done:!1}}}class RH extends Sn{constructor(e,t){super();this.upstream=e,this.handler=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> handleErrors`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;)try{return await this.upstream.next()}catch(e){if(!this.handler(e))return{value:null,done:!0}}}}class n0 extends Sn{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> AsyncMap`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=Hi(e.value),n=await this.transform(e.value),s=Hi(n);for(const i of t)Bd(i,s)||i.dispose();return{value:n,done:!1}}}class oS extends Sn{constructor(){super();this.outputQueue=new rS,this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.outputQueue.length()===0;)if(!await this.pump())return{value:null,done:!0};return{value:this.outputQueue.shift(),done:!1}}}class OH extends oS{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Flatmap`}async pump(){const e=await this.upstream.next();if(e.done)return!1;const t=Hi(e.value),n=this.transform(e.value),s=Hi(n);this.outputQueue.pushAll(n);for(const i of t)Bd(i,s)||i.dispose();return!0}}class s0 extends Sn{constructor(e,t){super();this.baseErrorHandler=t,this.lastRead=null,this.iterator=null,this.moreIterators=e}summary(){const e="TODO: fill in upstream of chained summaries";return`${e} -> Chained`}async next(){return this.lastRead=this.readFromChain(this.lastRead),this.lastRead}async readFromChain(e){if(await e,this.iterator==null){const n=await this.moreIterators.next();if(n.done)return{value:null,done:!0};this.iterator=n.value,this.baseErrorHandler!=null&&(this.iterator=this.iterator.handleErrors(this.baseErrorHandler))}const t=await this.iterator.next();return t.done?(this.iterator=null,this.readFromChain(e)):t}}var Jr;(function(e){e[e.FAIL=0]="FAIL",e[e.SHORTEST=1]="SHORTEST",e[e.LONGEST=2]="LONGEST"})(Jr||(Jr={}));class EH extends Sn{constructor(e,t=Jr.FAIL){super();this.iterators=e,this.mismatchMode=t,this.count=0,this.currentPromise=null}summary(){const e="TODO: fill in upstream of zip summaries";return`{${e}} -> Zip`}async nextState(e){await e;let t=0,n=0;function s(o){if(o instanceof Sn){const a=o.next();return{value:a.then(c=>(t++,c.done&&n++,c.value)),recurse:!1}}else return{value:null,recurse:!0}}const i=await ZN(this.iterators,s);if(t===n)return{value:null,done:!0};if(n>0)switch(this.mismatchMode){case Jr.FAIL:throw new Error(`Zipped streams should have the same length. Mismatched at element ${this.count}.`);case Jr.SHORTEST:return{value:null,done:!0};case Jr.LONGEST:default:}return this.count++,{value:i,done:!1}}async next(){return this.currentPromise=this.nextState(this.currentPromise),this.currentPromise}}class i0 extends Sn{constructor(e,t){super();this.upstream=e,this.bufferSize=t,this.buffer=new QN(t)}summary(){return`${this.upstream.summary()} -> Prefetch`}refill(){for(;!this.buffer.isFull();){const e=this.upstream.next();this.buffer.push(e)}}next(){return this.refill(),this.buffer.shift()}}class DH extends i0{constructor(e,t,n){super(e,t);this.upstream=e,this.windowSize=t,this.upstreamExhausted=!1,this.random=Ha(n||jn().toString()),this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}randomInt(e){return Math.floor(this.random()*e)}chooseIndex(){return this.randomInt(this.buffer.length())}async serialNext(){for(this.upstreamExhausted||this.refill();!this.buffer.isEmpty();){const e=this.chooseIndex(),t=await this.buffer.shuffleExcise(e);if(t.done)this.upstreamExhausted=!0;else return this.refill(),t}return{value:null,done:!0}}}class lc{constructor(){this.size=null}batch(e,t=!0){const n=this;A(e>0,()=>`batchSize needs to be positive, but it is + ${e}`);let s;return this.size===Infinity||this.size==null?s=this.size:t?s=Math.ceil(this.size/e):s=Math.floor(this.size/e),ps(async()=>(await n.iterator()).columnMajorBatch(e,t,_H),s)}concatenate(e){const t=this;let n;return this.size===Infinity||e.size===Infinity?n=Infinity:this.size!=null&&e.size!=null?n=this.size+e.size:n=null,ps(async()=>(await t.iterator()).concatenate(await e.iterator()),n)}filter(e){const t=this;let n;return this.size===Infinity?n=Infinity:n=null,ps(async()=>(await t.iterator()).filter(s=>Q(()=>e(s))),n)}async forEachAsync(e){return(await this.iterator()).forEachAsync(e)}map(e){const t=this;return ps(async()=>(await t.iterator()).map(n=>Q(()=>e(n))),this.size)}mapAsync(e){const t=this;return ps(async()=>(await t.iterator()).mapAsync(e),this.size)}prefetch(e){if(e==null)throw new RangeError("`Dataset.prefetch()` requires bufferSize to be specified.");const t=this;return ps(async()=>(await t.iterator()).prefetch(e),this.size)}repeat(e){const t=this;let n;return this.size!=null&&e>0?n=this.size*e:e===0?n=0:this.size!=null&&(e===void 0||e<0)?n=Infinity:n=null,ps(async()=>{const s=su(async()=>({value:await t.iterator(),done:!1}));return t0(s.take(e))},n)}skip(e){const t=this;let n;return this.size!=null&&e>=0&&this.size>=e?n=this.size-e:this.size!=null&&(this.size(await t.iterator()).skip(e),n)}shuffle(e,t,n=!0){if(e==null||e<0)throw this.size==null?new RangeError("`Dataset.shuffle()` requires bufferSize to be specified."):new RangeError(`\`Dataset.shuffle()\` requires bufferSize to be specified. If your data fits in main memory (for regular JS objects), and/or GPU memory (for \`tf.Tensor\`s), consider setting bufferSize to the dataset size (${this.size} elements)`);const s=this,i=Ha(t||jn().toString());return ps(async()=>{let o=i.int32();return n&&(o+=i.int32()),(await s.iterator()).shuffle(e,o.toString())},this.size)}take(e){const t=this;let n;return this.size!=null&&this.size>e?n=e:this.size!=null&&this.size<=e?n=this.size:n=null,ps(async()=>(await t.iterator()).take(e),n)}async toArray(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArray()}async toArrayForTest(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArrayForTest()}}lc.MAX_BUFFER_SIZE=1e4;function ps(e,t=null){return new class extends lc{constructor(){super(...arguments);this.size=t}async iterator(){return e()}}}function kH(e){return ps(async()=>e0(e),e.length)}function FH(e){if(!cc(e))throw new Error("The argument to zip() must be an object or array.");let t;if(Array.isArray(e))for(let n=0;n{const n=await ZN(e,s=>{if(s instanceof lc)return{value:s.iterator(),recurse:!1};if(cc(s))return{value:null,recurse:!0};throw new Error("Leaves of the structure passed to zip() must be Datasets, not primitives.")});return LH(n,Jr.SHORTEST)},t)}function _H(e){if(e===null)return null;const t=e[0];if(gH(t)){const n=WH(e);return{value:n,recurse:!1}}return{value:null,recurse:!0}}function WH(e){if(e.length===0)throw new Error("Can't make a batch of zero elements.");return e[0]instanceof ee?es(e):sn(e)}class r0 extends lc{constructor(e){super();this.input=e}async iterator(){const e=await this.input.iterator(),t=e.decodeUTF8(),n=t.split(` +`).map(s=>(s.endsWith("\r")&&(s=s.slice(0,-1)),s));return n}}const xm='"',iu=Symbol("out"),o0=Symbol("field"),Tm=Symbol("quote"),aS=Symbol("quoteafterquote"),a0=Symbol("quoteinquote");class c0 extends lc{constructor(e,t){super();this.input=e,this.hasHeader=!0,this.fullColumnNames=null,this.columnNamesValidated=!1,this.columnConfigs=null,this.configuredColumnsOnly=!1,this.delimiter=",",this.delimWhitespace=!1,this.base=new r0(e),t||(t={}),this.hasHeader=!(t.hasHeader===!1),this.fullColumnNames=t.columnNames,this.columnConfigs=t.columnConfigs,this.configuredColumnsOnly=t.configuredColumnsOnly,t.delimWhitespace?(A(t.delimiter==null,()=>"Delimiter should not be provided when delimWhitespace is true."),this.delimWhitespace=!0,this.delimiter=" "):this.delimiter=t.delimiter?t.delimiter:","}async columnNames(){return this.columnNamesValidated||await this.setColumnNames(),this.configuredColumnsOnly?Object.keys(this.columnConfigs):this.fullColumnNames}async setColumnNames(){const e=await this.maybeReadHeaderLine();if(!this.fullColumnNames&&!e)throw new Error("Column names must be provided if there is no header line.");this.fullColumnNames&&e&&A(e.length===this.fullColumnNames.length,()=>"The length of provided columnNames ("+this.fullColumnNames.length.toString()+") does not match the length of the header line read from file ("+e.length.toString()+")."),this.fullColumnNames||(this.fullColumnNames=e);const t=this.fullColumnNames.reduce((s,i)=>(s[i]=s[i]+1||1,s),{}),n=Object.keys(t).filter(s=>t[s]>1);if(A(n.length===0,()=>"Duplicate column names found: "+n.toString()),this.columnConfigs)for(const s of Object.keys(this.columnConfigs)){const i=this.fullColumnNames.indexOf(s);if(i===-1)throw new Error('The key "'+s+'" provided in columnConfigs does not match any of the column names ('+this.fullColumnNames.toString()+").")}this.columnNamesValidated=!0}async maybeReadHeaderLine(){if(this.hasHeader){const e=await this.base.iterator(),t=await e.next();if(t.done)throw new Error("No data was found for CSV parsing.");const n=t.value,s=this.parseRow(n,!1);return s}else return null}async iterator(){this.columnNamesValidated||await this.setColumnNames();let e=await this.base.iterator();return this.hasHeader&&(e=e.skip(1)),e.map(t=>this.makeDataElement(t))}makeDataElement(e){const t=this.parseRow(e),n={},s={};for(let i=0;i14||!Number.isInteger(t))throw new Error(`Invalid fftSize: it must be a power of 2 between 2 to 4 and 2 to 14, but got ${this.fftSize}`);if(this.numFrames=e.numFramesPerSpectrogram||43,this.sampleRateHz=e.sampleRateHz,this.columnTruncateLength=e.columnTruncateLength||this.fftSize,this.audioTrackConstraints=e.audioTrackConstraints,this.smoothingTimeConstant=e.smoothingTimeConstant||0,this.includeSpectrogram=!(e.includeSpectrogram===!1),this.includeWaveform=e.includeWaveform===!0,!this.includeSpectrogram&&!this.includeWaveform)throw new Error("Both includeSpectrogram and includeWaveform are false. At least one type of data should be returned.")}summary(){return"microphone"}static async create(e={}){if(oe().get("IS_NODE"))throw new Error("microphone API is only supported in browser environment.");const t=new l0(e);return await t.start(),t}async start(){try{this.stream=await navigator.mediaDevices.getUserMedia({audio:this.audioTrackConstraints==null?!0:this.audioTrackConstraints,video:!1})}catch(n){throw new Error(`Error thrown while initializing video stream: ${n.message}`)}if(!this.stream)throw new Error("Could not obtain audio from microphone.");const e=window.AudioContext||window.webkitAudioContext;if(this.audioContext=new e,!this.sampleRateHz)this.sampleRateHz=this.audioContext.sampleRate;else if(this.audioContext.sampleRate!==this.sampleRateHz)throw new Error(`Mismatch in sampling rate: Expected: ${this.sampleRateHz}; Actual: ${this.audioContext.sampleRate}`);const t=this.audioContext.createMediaStreamSource(this.stream);this.analyser=this.audioContext.createAnalyser(),this.analyser.fftSize=this.fftSize*2,this.analyser.smoothingTimeConstant=this.smoothingTimeConstant,t.connect(this.analyser),this.freqData=new Float32Array(this.fftSize),this.timeData=new Float32Array(this.fftSize);return}async next(){if(this.isClosed)return{value:null,done:!0};let e,t;const n=await this.getAudioData();if(this.includeSpectrogram){const s=this.flattenQueue(n.freqDataQueue);e=this.getTensorFromAudioDataArray(s,[this.numFrames,this.columnTruncateLength,1])}if(this.includeWaveform){const s=this.flattenQueue(n.timeDataQueue);t=this.getTensorFromAudioDataArray(s,[this.numFrames*this.fftSize,1])}return{value:{spectrogram:e,waveform:t},done:!1}}async capture(){return(await this.next()).value}async getAudioData(){const e=[],t=[];let n=0;return new Promise(s=>{const i=setInterval(()=>{this.includeSpectrogram&&(this.analyser.getFloatFrequencyData(this.freqData),this.freqData[0]===-Infinity&&s({freqDataQueue:e,timeDataQueue:t}),e.push(this.freqData.slice(0,this.columnTruncateLength))),this.includeWaveform&&(this.analyser.getFloatTimeDomainData(this.timeData),t.push(this.timeData.slice())),++n===this.numFrames&&(clearInterval(i),s({freqDataQueue:e,timeDataQueue:t}))},this.fftSize/this.sampleRateHz*1e3)})}stop(){this.isClosed||(this.isClosed=!0,this.analyser.disconnect(),this.audioContext.close(),this.stream!=null&&this.stream.getTracks().length>0&&this.stream.getTracks()[0].stop())}toArray(){throw new Error("Can not convert infinite audio stream to array.")}getSampleRate(){return this.sampleRateHz}flattenQueue(e){const t=e[0].length,n=new Float32Array(e.length*t);return e.forEach((s,i)=>n.set(s,i*t)),n}getTensorFromAudioDataArray(e,t){const n=new Float32Array(P(t));return n.set(e,n.length-e.length),sn(n,t)}}class h0 extends Sn{constructor(e,t){super();if(this.webcamVideoElement=e,this.webcamConfig=t,this.isClosed=!0,this.resize=!1,this.needToResize())if(this.resize=!0,this.cropSize=[this.webcamConfig.resizeHeight,this.webcamConfig.resizeWidth],this.cropBoxInd=ls([0],"int32"),this.webcamConfig.centerCrop){const n=this.webcamConfig.resizeWidth*1/this.webcamVideoElement.width,s=this.webcamConfig.resizeHeight*1/this.webcamVideoElement.height,i=(1-n)/2,o=(1-s)/2,a=i+n,c=s+o;this.cropBox=Pr([o,i,c,a],[1,4])}else this.cropBox=Pr([0,0,1,1],[1,4])}summary(){return"webcam"}static async create(e,t={}){if(oe().get("IS_NODE"))throw new Error("tf.data.webcam is only supported in browser environment.");if(!e){if(e=document.createElement("video"),!t.resizeWidth||!t.resizeHeight)throw new Error("Please provide webcam video element, or resizeWidth and resizeHeight to create a hidden video element.");e.width=t.resizeWidth,e.height=t.resizeHeight}const n=new h0(e,t);return await n.start(),n}async start(){this.webcamConfig.facingMode&&A(this.webcamConfig.facingMode==="user"||this.webcamConfig.facingMode==="environment",()=>`Invalid webcam facing mode: ${this.webcamConfig.facingMode}. Please provide 'user' or 'environment'`);try{this.stream=await navigator.mediaDevices.getUserMedia({video:{deviceId:this.webcamConfig.deviceId,facingMode:this.webcamConfig.facingMode?this.webcamConfig.facingMode:"user",width:this.webcamVideoElement.width,height:this.webcamVideoElement.height}})}catch(e){throw e.message=`Error thrown while initializing video stream: ${e.message}`,e}if(!this.stream)throw new Error("Could not obtain video from webcam.");try{this.webcamVideoElement.srcObject=this.stream}catch(e){console.log(e),this.webcamVideoElement.src=window.URL.createObjectURL(this.stream)}return this.webcamVideoElement.play(),this.isClosed=!1,new Promise(e=>{this.webcamVideoElement.onloadedmetadata=()=>{e()}})}async next(){if(this.isClosed)return{value:null,done:!0};let e;try{e=OT(this.webcamVideoElement)}catch(t){throw new Error(`Error thrown converting video to pixels: ${JSON.stringify(t)}`)}if(this.resize)try{return{value:this.cropAndResizeFrame(e),done:!1}}catch(t){throw new Error(`Error thrown cropping the video: ${t.message}`)}finally{e.dispose()}else return{value:e,done:!1}}needToResize(){return!!(this.webcamConfig.resizeWidth&&this.webcamConfig.resizeHeight&&(this.webcamVideoElement.width!==this.webcamConfig.resizeWidth||this.webcamVideoElement.height!==this.webcamConfig.resizeHeight))}cropAndResizeFrame(e){return Q(()=>{const t=e.toFloat().expandDims(0);let n;n=zr.cropAndResize(t,this.cropBox,this.cropBoxInd,this.cropSize,"bilinear");const s=n.shape;return n.reshape(s.slice(1))})}async capture(){return(await this.next()).value}stop(){const e=this.stream.getTracks();e.forEach(t=>t.stop());try{this.webcamVideoElement.srcObject=null}catch(t){console.log(t),this.webcamVideoElement.src=null}this.isClosed=!0}toArray(){throw new Error("Can not convert infinite video stream to array.")}}class u0{}class d0 extends Sn{split(e){return new $H(this,e)}}class $H extends d0{constructor(e,t){super();this.upstream=e,this.impl=new UH(e,t)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class UH extends oS{constructor(e,t){super();this.upstream=e,this.separator=t,this.carryover=""}summary(){return`${this.upstream.summary()} -> Split('${this.separator}')`}async pump(){const e=await this.upstream.next();if(e.done)return this.carryover===""?!1:(this.outputQueue.push(this.carryover),this.carryover="",!0);const t=e.value.split(this.separator);t[0]=this.carryover+t[0];for(const n of t.slice(0,-1))this.outputQueue.push(n);return this.carryover=t[t.length-1],!0}}class BH extends Sn{decodeUTF8(){return new MH(this)}}class MH extends d0{constructor(e){super();this.upstream=e,this.impl=new PH(e)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class PH extends oS{constructor(e){super();if(this.upstream=e,oe().get("IS_BROWSER"))this.decoder=new TextDecoder("utf-8");else{const{StringDecoder:t}=require("string_decoder");this.decoder=new t("utf8")}}summary(){return`${this.upstream.summary()} -> Utf8`}async pump(){const e=await this.upstream.next();let t;if(e.done)return!1;t=e.value;let n;return oe().get("IS_BROWSER")?n=this.decoder.decode(t,{stream:!0}):n=this.decoder.write(Buffer.from(t.buffer)),this.outputQueue.push(n),!0}}class p0 extends BH{constructor(e,t={}){super();this.file=e,this.options=t,A(e instanceof Uint8Array||(oe().get("IS_BROWSER")?e instanceof File||e instanceof Blob:!1),()=>"FileChunkIterator only supports File, Blob and Uint8Array right now."),this.offset=t.offset||0,this.chunkSize=t.chunkSize||1024*1024}summary(){return`FileChunks ${this.file}`}async next(){if(this.offset>=(this.file instanceof Uint8Array?this.file.byteLength:this.file.size))return{value:null,done:!0};const e=new Promise((t,n)=>{const s=this.offset+this.chunkSize;if(this.file instanceof Uint8Array)t(new Uint8Array(this.file.slice(this.offset,s)));else{const i=new FileReader;i.onload=a=>{let c=i.result;if(c instanceof ArrayBuffer&&(c=new Uint8Array(c)),!(c instanceof Uint8Array))return n(new TypeError("FileReader returned unknown type."));t(c)},i.onabort=a=>n(new Error("Aborted")),i.onerror=a=>n(new Error(a.type));const o=this.file.slice(this.offset,s);i.readAsArrayBuffer(o)}this.offset=s});return{value:await e,done:!1}}}async function zH(e,t={}){let n,s;typeof e=="string"?n=e:(n=e.url,s=VH(e));const i=await nT(n,s);if(i.ok){const o=new Uint8Array(await i.arrayBuffer());return new p0(o,t)}else throw new Error(i.statusText)}const VH=e=>{const t={method:e.method,headers:e.headers,body:e.body,mode:e.mode,credentials:e.credentials,cache:e.cache,redirect:e.redirect,referrer:e.referrer,integrity:e.integrity};return t};function m0(e){return typeof e=="string"&&e.substr(0,7)==="file://"}class f0 extends u0{constructor(e,t={}){super();this.input=e,this.options=t}async iterator(){if(m0(this.input)&&oe().get("IS_NODE")){const e=require("fs");this.input=e.readFileSync(this.input.substr(7))}return new p0(this.input,this.options)}}class g0 extends u0{constructor(e,t={}){super();this.url=e,this.fileOptions=t}async iterator(){return m0(this.url)?new f0(this.url,this.fileOptions).iterator():zH(this.url,this.fileOptions)}}function GH(e,t={}){return new c0(new g0(e),t)}function YH(e){const t=su(e);return ps(async()=>t)}function HH(e){return ps(async()=>{const t=await e();return su(()=>t.next())})}async function qH(e,t){return h0.create(e,t)}async function jH(e){return l0.create(e)}const y0="2.7.0";var KH=Object.freeze({__proto__:null,array:kH,Dataset:lc,zip:FH,CSVDataset:c0,TextLineDataset:r0,csv:GH,func:YH,generator:HH,microphone:jH,webcam:qH,FileDataSource:f0,URLDataSource:g0,version_data:y0});function Te(e,t){Array.isArray(e)||(e=[e]),e.forEach(n=>{n!=null&&A(n.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the CPU backend.`)})}const XH=Dp,JH=lw,ZH=hw,QH=uw,eq=Ap;class tq extends y{constructor(){super();this.blockSize=48,this.firstUse=!0,this.data=new p(this,Ki())}write(e,t,n){this.firstUse&&(this.firstUse=!1,oe().get("IS_NODE")&&Za(` ============================ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed things up dramatically, install our node backend, which binds to TensorFlow C++, by running npm i @tensorflow/tfjs-node, or npm i @tensorflow/tfjs-node-gpu if you have CUDA. Then call require('@tensorflow/tfjs-node'); (-gpu suffix for CUDA) at the start of your program. Visit https://github.com/tensorflow/tfjs-node for more details. -============================`));const s={};return this.data.set(s,{values:e,dtype:n,refCount:1}),s}makeTensorInfo(e,t,n){const s=this.write(n,e,t);return{dataId:s,shape:e,dtype:t}}incRef(e){const t=this.data.get(e);t.refCount++}decRef(e){if(this.data.has(e)){const t=this.data.get(e);t.refCount--}}move(e,t,n,s){this.data.set(e,{values:t,dtype:s,refCount:1})}numDataIds(){return this.data.numDataIds()}async read(e){return this.readSync(e)}readSync(e){const{dtype:t,complexTensorInfos:n}=this.data.get(e);if(t==="complex64"){const s=this.readSync(n.real.dataId),i=this.readSync(n.imag.dataId);return Zi(s,i)}return this.data.get(e).values}bufferSync(e){const t=this.readSync(e.dataId);let n=t;if(e.dtype==="string")try{n=t.map(s=>Dl(s))}catch(s){throw new Error("Failed to decode encoded string bytes into utf-8")}return Ze(e.shape,e.dtype,n)}makeOutput(e,t,n){const s=this.write(e,t,n);return Fs().makeTensorFromDataId(s,t,n,this)}disposeData(e){if(this.data.has(e)){const{complexTensorInfos:t}=this.data.get(e);t!=null&&(this.disposeData(t.real.dataId),this.disposeData(t.imag.dataId)),this.data.delete(e)}}disposeIntermediateTensorInfo(e){const t=e.dataId;if(this.data.has(t)){const n=this.data.get(t);n.refCount--,n.refCount<1&&this.disposeData(t)}}async time(e){const t=Vn();e();const n=Vn()-t;return{kernelMs:n}}memory(){return{unreliable:!0,reasons:["The reported memory is an upper bound. Due to automatic garbage collection, the true allocated memory may be less."]}}stridedSlice(e,t,n,s){xe(e,"stridedSlice");const i=bd(t,n,s);if(i.some(c=>c===0))return en([],i);const o=Ze(i,e.dtype),a=this.bufferSync(e);for(let c=0;ca[c]=e.shape[c]-1-a[c]),n.set(s.get(...a),...o)}return n.toTensor()}neg(e){return xe(e,"neg"),X(Ne(-1),e)}addN(e){xe(e,"addN");const t=e.map(i=>this.readSync(i.dataId)),n=Ze(e[0].shape,e[0].dtype),s=n.values;for(let i=0;iMath.pow(n,s))}batchMatMul(e,t,n,s){xe([e,t],"matMul");const i=n?e.shape[1]:e.shape[2],o=n?e.shape[2]:e.shape[1],a=s?t.shape[1]:t.shape[2],c=e.shape[0],h=this.readSync(e.dataId),d=this.readSync(t.dataId),[m,y,b]=n?[e.strides[0],1,e.strides[1]]:[e.strides[0],e.strides[1],1],[w,L,T]=s?[1,t.strides[1],t.strides[0]]:[t.strides[1],1,t.strides[0]],A=o*a,N=Ze([c,o,a],e.dtype),E=N.values,D=this.blockSize;for(let F=0;FMath.floor(i/o),s="int32";return this.broadcastedBinaryOp(e,t,s,n)}sum(e,t){xe(e,"sum"),es("sum",t,e.rank);const[n,s]=Cn(e.shape,t),i=vn(e.dtype,"int32"),o=ct(n,i),a=we(s),c=this.readSync(o.dataId),h=this.readSync(e.dataId);for(let d=0;dy&&(y=L,b=w)}c[d]=b}return o}cumsum(e,t,n,s){if(xe(e,"cumsum"),t!==e.rank-1)throw new Error(`backend.cumsum in CPU expects an inner-most axis=${e.rank-1} but got axis=${t}`);const i=vn(e.dtype,"int32"),o=ct(e.shape,i),a=this.readSync(o.dataId),c=this.readSync(e.dataId),h=e.shape[e.rank-1],d=s?(m,y)=>m+h-y-1:(m,y)=>m+y;for(let m=0;mn===s?1:0)}notEqual(e,t){return xe([e,t],"notEqual"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n!==s?1:0)}less(e,t){return xe([e,t],"less"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>nn<=s?1:0)}greater(e,t){return xe([e,t],"greater"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n>s?1:0)}greaterEqual(e,t){return xe([e,t],"greaterEqual"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n>=s?1:0)}logicalAnd(e,t){return xe([e,t],"logicalAnd"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n&&s)}logicalOr(e,t){return xe([e,t],"logicalOr"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n||s)}select(e,t,n){xe([e,t,n],"select");const s=this.readSync(e.dataId),i=this.readSync(t.dataId),o=this.readSync(n.dataId),a=ct(t.shape,vn(t.dtype,n.dtype)),c=this.readSync(a.dataId);let h=0;const d=e.rank===0||e.rank>1||t.rank===1?1:we(t.shape.slice(1));for(let m=0;mMath.min(n,s))}mod(e,t){return xe([e,t],"mod"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>{const i=n%s;return n<0&&s<0||n>=0&&s>=0?i:(i+s)%s})}maximum(e,t){return xe([e,t],"maximum"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>Math.max(n,s))}all(e,t){xe(e,"all"),es("all",t,e.rank);const[n,s]=Cn(e.shape,t),i=ct(n,e.dtype),o=we(s),a=this.readSync(i.dataId),c=this.readSync(e.dataId);for(let h=0;h{const i=n-s;return i*i})}linear(e){return e}relu(e){xe(e,"relu");const t=ct(e.shape,e.dtype),n=this.readSync(t.dataId),s=this.readSync(e.dataId);for(let i=0;in<0?s*n:n)}eluDer(e,t){xe([e,t],"eluDer");const n=new Float32Array(t.size),s=this.readSync(t.dataId),i=this.readSync(e.dataId);for(let o=0;o=1?n[o]=i[o]:n[o]=i[o]*(a+1)}return this.makeOutput(n,t.shape,"float32")}atan2(e,t){return xe([e,t],"atan2"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>Math.atan2(n,s))}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){let a=this.conv2d(e,t,n);return s&&(a=be(a,s)),i&&(a=BL(this,a,i,o)),a}conv2d(e,t,n){xe([e,t],"conv2d");const s=n.filterHeight,i=n.filterWidth,o=n.dilationHeight,a=n.dilationWidth,c=n.padInfo.left,h=n.padInfo.top,d=n.dataFormat==="channelsLast",m=Ze(n.outShape,e.dtype),y=e.strides[0],b=d?e.strides[1]:e.strides[2],w=d?e.strides[2]:1,L=d?1:e.strides[1],T=m.strides[0],A=d?m.strides[1]:m.strides[2],N=d?m.strides[2]:1,E=d?1:m.strides[1],D=this.readSync(e.dataId),F=this.readSync(t.dataId),_=m.values;for(let B=0;B=n.inHeight)continue;const he=ce*t.strides[0],de=$+ue*b;for(let le=0;le=n.inWidth)continue;const Ee=he+Ie*t.strides[1],We=de+Se*w;let Oe=Ee;for(let $e=0;$e=n.inDepth)continue;const H=B*t.strides[0],q=N+$*e.strides[1];for(let J=0;J=n.inHeight)continue;const de=H+ue*t.strides[1],le=q+he*e.strides[2];for(let ye=0;ye=n.inWidth)continue;const We=de+Se*t.strides[2],Oe=le+Ee*n.inChannels;let $e=We;for(let Ye=0;Ye=n.inHeight)continue;const B=F*t.strides[0],$=T+_*e.strides[1];for(let H=0;H=n.inWidth)continue;const ue=B+re*t.strides[1],he=$+ce*n.inChannels;let de=q,le=ue;for(let ye=0;yed*m),i=fh(e.shape,t,s),o=gh(i.length,t.length),a=yh(e.shape,t,s),c=Rb(n,t.length),h=Ob(a,n,t.length);return Me(e.reshape(i),o).reshape(a).slice(c,h)}pool3d(e,t,n){xe(e,"pool3d");const s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,a=t.dilationDepth,c=t.dilationHeight,h=t.dilationWidth,d=t.effectiveFilterDepth,m=t.effectiveFilterHeight,y=t.effectiveFilterWidth,b=t.padInfo.front,w=t.padInfo.top,L=t.padInfo.left,T=n==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,A=this.readSync(e.dataId),N=Ze(t.outShape,e.dtype),E=N.values,D=t.outShape[1]*t.outShape[2]*t.outShape[3]*t.outShape[4],F=t.outShape[2]*t.outShape[3]*t.outShape[4],_=t.outShape[3]*t.outShape[4],B=t.outShape[4];for(let $=0;$et?et=zs:n==="avg"&&(bt+=zs,Jt++),isNaN(et))break}if(isNaN(et))break}if(isNaN(et))break}const un=Ye+J;E[un]=n==="avg"?bt/Jt:et}}}}return N.toTensor()}avgPool3d(e,t){return xe(e,"avgPool3d"),this.pool3d(e,t,"avg").toFloat()}avgPool3dBackprop(e,t,n){xe([e,t],"avgPool3dBackprop");const s=n.strideDepth,i=n.strideHeight,o=n.strideWidth,a=n.filterDepth,c=n.filterHeight,h=n.filterWidth,d=n.dilationDepth,m=n.dilationHeight,y=n.dilationWidth,b=n.effectiveFilterDepth,w=n.effectiveFilterHeight,L=n.effectiveFilterWidth,T=b-1-n.padInfo.front,A=L-1-n.padInfo.left,N=w-1-n.padInfo.top,E=Ze(t.shape,"float32"),D=1/(a*c*h),F=this.bufferSync(e);for(let _=0;_=n.outDepth||Math.floor(de)!==de)continue;for(let le=0;le=n.outHeight||Math.floor(ye)!==ye)continue;for(let pe=0;pe=n.outWidth||Math.floor(Ie)!==Ie)continue;const Se=F.get(_,de,ye,Ie,B);ue+=Se}}}E.set(ue*D,_,$,H,q,B)}return E.toTensor()}maxPool3d(e,t){return xe(e,"maxPool3d"),this.pool3d(e,t,"max").toFloat()}maxPool3dPositions(e,t){const n=Ze(t.outShape,"int32"),s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,a=t.dilationDepth,c=t.dilationHeight,h=t.dilationWidth,d=t.effectiveFilterDepth,m=t.effectiveFilterHeight,y=t.effectiveFilterWidth,b=t.padInfo.front,w=t.padInfo.top,L=t.padInfo.left,T=this.bufferSync(e);for(let A=0;A=he&&(he=We,de=ye*m*y+Ie*m+Ee)}}}n.set(de,A,E,B,J,N)}}}return n.toTensor()}maxPool3dBackprop(e,t,n,s){xe([t,n],"maxPool3dBackprop");const i=this.maxPool3dPositions(t,s),o=s.strideDepth,a=s.strideHeight,c=s.strideWidth,h=s.dilationDepth,d=s.dilationHeight,m=s.dilationWidth,y=s.effectiveFilterDepth,b=s.effectiveFilterHeight,w=s.effectiveFilterWidth,L=y-1-s.padInfo.front,T=w-1-s.padInfo.left,A=b-1-s.padInfo.top,N=Ze(t.shape,"float32"),E=this.bufferSync(i),D=this.bufferSync(e);for(let F=0;F=s.outDepth||Math.floor(he)!==he)continue;for(let de=0;de=s.outHeight||Math.floor(le)!==le)continue;for(let ye=0;ye=s.outWidth||Math.floor(pe)!==pe)continue;const Ie=y*b*w-1-E.get(F,he,le,pe,_),Se=ue*b*w+de*w+ye,Ee=Ie===Se?1:0;if(Ee===0)continue;const We=D.get(F,he,le,pe,_);ce+=We*Ee}}}N.set(ce,F,B,$,H,_)}return N.toTensor()}resizeBilinear(e,t,n,s){xe(e,"resizeBilinear");const[i,o,a,c]=e.shape,h=this.readSync(e.dataId),d=new Float32Array(we([i,t,n,c])),m=[s&&t>1?o-1:o,s&&n>1?a-1:a],y=[s&&t>1?t-1:t,s&&n>1?n-1:n];let b=0;const w=m[0]/y[0],L=m[1]/y[1];for(let T=0;T1?i-1:i,n&&h>1?o-1:o],y=[n&&c>1?c-1:c,n&&h>1?h-1:h],b=m[0]/y[0],w=m[1]/y[1],L=this.readSync(e.dataId);let T=0;for(let A=0;A1?o-1:o,s&&n>1?a-1:a],y=[s&&t>1?t-1:t,s&&n>1?n-1:n],b=m[0]/y[0],w=m[1]/y[1];let L=0;for(let T=0;T1?i-1:i,n&&h>1?o-1:o],b=[n&&c>1?c-1:c,n&&h>1?h-1:h],w=y[0]/b[0],L=y[1]/b[1],T=1/w,A=1/L,N=Math.ceil(T)*2+2,E=Math.ceil(A)*2+2;for(let D=0;D=c)continue;const ye=F+le*e.strides[1],pe=le*w,Ie=Math.min(i-1,n?Math.round(pe):Math.floor(pe));if(_!==Ie)continue;for(let Se=0;Se=h)continue;const We=ye+Ee*e.strides[2],Oe=Ee*L,$e=Math.min(o-1,n?Math.round(Oe):Math.floor(Oe));q===$e&&(he+=m[We+ue])}}d[J+ue]=he}}}}return Ua(d,t.shape,t.dtype)}localResponseNormalization4D(e,t,n,s,i){xe(e,"localResponseNormalization4D");const o=e.shape[3],a=o-1,c=this.readSync(e.dataId),h=e.size,d=new Float32Array(h);function m(y){const b=y%o;let w=y-b+Math.max(0,b-t);const L=y-b+Math.min(b+t,a);let T=0;for(;w<=L;w++){const A=c[w];T+=A*A}return T}for(let y=0;y=0&&o[a]`Only NHWC dataFormat supported on CPU for depthToSpace. Got ${n}`),k(t>1,()=>`blockSize should be > 1 for depthToSpace, but was: ${t}`);const s=e.shape[0],i=e.shape[1],o=e.shape[2],a=e.shape[3],c=i*t,h=o*t,d=a/(t*t),m=this.readSync(e.dataId),y=new Float32Array(s*c*h*d);let b=0;for(let w=0;wT[D]=0);const A=y.locToIndex(T),N=L.slice(-t.rank);d.forEach(D=>N[D]=0);const E=b.locToIndex(N);m[w]=s(a[A],c[E])}}return o.toTensor()}split(e,t,n){return cH(e,t,n)}dispose(){}floatPrecision(){return 32}epsilon(){return super.epsilon()}cropAndResize(e,t,n,s,i,o){const[a,c,h,d]=e.shape,m=t.shape[0],[y,b]=s,w=Ze([m,y,b,d],"float32"),L=this.readSync(t.dataId),T=this.readSync(n.dataId),A=this.readSync(e.dataId),N=e.strides,E=w.strides;for(let D=0;D=a)continue;const J=y>1?($-_)*(c-1)/(y-1):0,re=b>1?(H-B)*(h-1)/(b-1):0;for(let ce=0;ce1?_*(c-1)+ce*J:.5*(_+$)*(c-1);if(ue<0||ue>c-1){for(let he=0;he1?B*(h-1)+ye*re:.5*(B+H)*(h-1);if(pe<0||pe>h-1){for(let We=0;We1?B*(h-1)+he*re:.5*(B+H)*(h-1);if(de<0||de>h-1){for(let pe=0;pe=e.size/a)throw new Error(`Invalid indices: ${b} does not index into ${e.shape}`);for(let L=0;L=s/i)throw new Error(`Invalid indices: ${T} does not index into ${n}`);for(let N=0;N{const{x:t}=e.inputs,n=e.backend;let s=new Float32Array(we(t.shape));if(t.dtype!=="complex64"){const i=n.data.get(t.dataId).values;s=e0(i)}else{const i=n.data.get(t.dataId),o=i.complexTensorInfos.real,a=i.complexTensorInfos.imag,c=n.data.get(o.dataId).values,h=n.data.get(a.dataId).values;for(let d=0;d{const a=tt(t,n),c=a.length,h=Ot(a),d=we(a),m=bn(o,d),y=t.length,b=n.length,w=Ot(t),L=Ot(n),T=Lo(t,a),A=Lo(n,a);if(T.length+A.length===0)for(let N=0;ND[$]=0);const F=Js(D,y,w),_=E.slice(-b);A.forEach($=>_[$]=0);const B=Js(_,b,L);m[N]=e(s[F],i[B])}return[m,a]}}function li(e){const{inputs:t,backend:n}=e,{real:s,imag:i}=t,o=n.data.get(s.dataId).values,a=n.data.get(i.dataId).values,c=n.makeTensorInfo(s.shape,"complex64"),h=n.data.get(c.dataId);return h.complexTensorInfos={real:n.makeTensorInfo(s.shape,"float32",o),imag:n.makeTensorInfo(i.shape,"float32",a)},c}const fH={kernelName:hg,backendName:"cpu",kernelFunc:li};function Qa(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const gH={kernelName:al,backendName:"cpu",kernelFunc:Qa};function Vh(e){const{inputs:t,backend:n}=e,{input:s}=t,i=n.data.get(s.dataId).complexTensorInfos.real,o=n.data.get(i.dataId).values;return n.makeTensorInfo(i.shape,i.dtype,o)}const yH={kernelName:_g,backendName:"cpu",kernelFunc:Vh};function Yh(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{dtype:o}=s;if(o==="complex64"){if(i.dtype==="complex64")return Qa({inputs:{x:i},backend:n});const a=ct(i.shape),c=Yh({inputs:{x:i},backend:n,attrs:{dtype:"float32"}}),h=li({inputs:{real:c,imag:a},backend:n});return a.dispose(),n.disposeIntermediateTensorInfo(c),h}if(i.dtype==="complex64"){const a=Vh({inputs:{input:i},backend:n}),c=Yh({inputs:{x:a},backend:n,attrs:{dtype:o}});return n.disposeIntermediateTensorInfo(a),c}if(!iy(i.dtype,o)){const a=Qa({inputs:{x:i},backend:n});return{dataId:a.dataId,shape:a.shape,dtype:o}}if(o==="int32"){const a=n.data.get(i.dataId).values,c=Int32Array.from(a);return n.makeTensorInfo(i.shape,"int32",c)}if(o==="bool"){const a=n.data.get(i.dataId).values,c=Tr([0],i.dtype),[h,d]=$o((m,y)=>m!==y?1:0)(i.shape,[],a,c,"bool");return n.makeTensorInfo(d,"bool",h)}throw new Error(`Error in Cast: failed to cast ${i.dtype} to ${o}`)}const bH={kernelName:Jc,backendName:"cpu",kernelFunc:Yh};function ec(e,t,n,s){return n==null?({inputs:i,backend:o})=>{const{a,b:c}=i,h=o;xe([a,c],e);const d=h.data.get(a.dataId).values,m=h.data.get(c.dataId).values,y=s||a.dtype,[b,w]=t(a.shape,c.shape,d,m,y);return h.makeTensorInfo(w,y,b)}:({inputs:i,backend:o})=>{const{a,b:c}=i,h=o;if(a.dtype==="complex64"||c.dtype==="complex64"){const d=Yh({inputs:{x:a},backend:h,attrs:{dtype:"complex64"}}),m=h.data.get(d.dataId),y=m.complexTensorInfos.real,b=m.complexTensorInfos.imag,w=h.data.get(y.dataId).values,L=h.data.get(b.dataId).values,T=Yh({inputs:{x:c},backend:h,attrs:{dtype:"complex64"}}),A=h.data.get(T.dataId),N=A.complexTensorInfos.real,E=A.complexTensorInfos.imag,D=h.data.get(N.dataId).values,F=h.data.get(E.dataId).values,[_,B,$]=n(a.shape,c.shape,w,L,D,F),H=h.makeTensorInfo($,"float32",_),q=h.makeTensorInfo($,"float32",B),J=li({inputs:{real:H,imag:q},backend:h});return h.disposeIntermediateTensorInfo(d),h.disposeIntermediateTensorInfo(T),h.disposeIntermediateTensorInfo(H),h.disposeIntermediateTensorInfo(q),J}else{const d=h.data.get(a.dataId).values,m=h.data.get(c.dataId).values,y=s||a.dtype,[b,w]=t(a.shape,c.shape,d,m,y);return h.makeTensorInfo(w,y,b)}}}function ML(e){return(t,n,s,i,o,a)=>{const c=tt(t,n),h=we(c),d=c.length,m=Ot(c),y=bn("float32",h),b=bn("float32",h),w=Lo(t,c),L=Lo(n,c),T=Zi(s,i),A=Zi(o,a),N=t.length,E=Ot(t),D=n.length,F=Ot(n);if(w.length+L.length===0)for(let _=0;_$[ce]=0);const H=Js($,N,E),q=B.slice(-D);L.forEach(ce=>q[ce]=0);const J=Js(q,D,F),re=e(T[H*2],T[H*2+1],A[J*2],A[J*2+1]);y[_]=re.real,b[_]=re.imag}return[y,b,c]}}const t0=$o((e,t)=>e+t),wH=ML((e,t,n,s)=>({real:e+n,imag:t+s})),n0=ec(Te,t0,wH),LH={kernelName:Te,backendName:"cpu",kernelFunc:n0};function tc(e){return(t,n,s)=>{const i=bn(n,t.length);for(let o=0;o{const{x:a}=s;if(xe(a,e),a.dtype==="string"||n==="string")throw new Error("unaryKernelFunc does not support string input/output");const c=o,h=c.data.get(a.dataId).values,d=we(a.shape),m=n||a.dtype,y=lo(m,d);for(let b=0;b{const{x:a}=s;if(xe(a,e),a.dtype==="string"||n==="string")throw new Error("unaryKernelFunc does not support string input/output");const c=o,h=c.data.get(a.dataId).values,d=n||a.dtype,m=t(h,d,i);return c.makeTensorInfo(a.shape,d,m)}}const s0=tc(e=>Math.ceil(e)),SH=nc(Zc,s0),IH={kernelName:Zc,backendName:"cpu",kernelFunc:SH};const i0=tc(e=>Math.exp(e)),xH=nc(sl,i0),TH={kernelName:sl,backendName:"cpu",kernelFunc:xH};const r0=tc(e=>Math.expm1(e)),AH=nc(il,r0),vH={kernelName:il,backendName:"cpu",kernelFunc:AH};const o0=tc(e=>Math.floor(e)),NH=nc(rl,o0),CH={kernelName:rl,backendName:"cpu",kernelFunc:NH};const a0=tc(e=>Math.log(e)),RH=nc(ul,a0),OH={kernelName:ul,backendName:"cpu",kernelFunc:RH};function c0(e,t,n,s){const i=bn(s,we(n));for(let o=0;oc&&(c=d)}i[o]=c}return i}const l0=$o((e,t)=>e*t),EH=ML((e,t,n,s)=>({real:e*n-t*s,imag:e*s+t*n})),h0=ec(fl,l0,EH),DH={kernelName:fl,backendName:"cpu",kernelFunc:h0};const u0=tc(e=>1/Math.sqrt(e)),kH=nc(wl,u0),FH={kernelName:wl,backendName:"cpu",kernelFunc:kH};function d0(e,t,n,s,i){const o=Ey(s,t,n),a=we(n),c=Ot(s);if(o){const d=Dy(t,c);return e.subarray(d,d+a)}const h=bn(i,a);for(let d=0;dT+t[A]),L=Js(w,s.length,c);h[d]=e[L]}return h}function PL(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{begin:o,size:a}=s;xe(i,"slice");const[c,h]=wd(i,o,a);Oy(i,c,h);const d=n.data.get(i.dataId).values,m=d0(d,c,h,i.shape,i.dtype);return n.makeTensorInfo(h,i.dtype,m)}const _H={kernelName:Ku,backendName:"cpu",kernelFunc:PL};const p0=$o((e,t)=>e-t),WH=ML((e,t,n,s)=>({real:e-n,imag:t-s})),m0=ec(vl,p0,WH),$H={kernelName:vl,backendName:"cpu",kernelFunc:m0};function zL(e,t,n,s,i){const o=t.length,a=we(t),c=Ot(t),h=Ot(i),d=bn(n,we(i));for(let m=0;m{for(let A=0;Anew dH,1);const MH=xt(fe,e=>Math.acos(e)),PH={kernelName:fe,backendName:"cpu",kernelFunc:MH};const zH=xt(Ae,e=>Math.acosh(e)),GH={kernelName:Ae,backendName:"cpu",kernelFunc:zH};const VH=xt(Dn,e=>Math.asin(e)),YH={kernelName:Dn,backendName:"cpu",kernelFunc:VH};const HH=xt(Tn,e=>Math.asinh(e)),qH={kernelName:Tn,backendName:"cpu",kernelFunc:HH};const jH=xt(An,e=>Math.atan(e)),KH={kernelName:An,backendName:"cpu",kernelFunc:jH};const XH=xt(Ks,e=>Math.atanh(e)),JH={kernelName:Ks,backendName:"cpu",kernelFunc:XH};function GL(e,t,n,s,i,o){const a=i.strideHeight,c=i.strideWidth,h=i.dilationHeight,d=i.dilationWidth,m=i.effectiveFilterHeight,y=i.effectiveFilterWidth,b=i.padInfo.top,w=i.padInfo.left,L=o==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,T=Ze(i.outShape,n),A=T.values,N=i.outShape[1]*i.outShape[2]*i.outShape[3],E=i.outShape[2]*i.outShape[3],D=i.outShape[3];for(let F=0;Fye?ye=Ye:o==="avg"&&(pe+=Ye,Ie++)}if(isNaN(ye))break}const Se=ce+ue*D+$;A[Se]=o==="avg"?pe/Ie:ye}}}return T}function g0(e,t,n,s,i=!1,o=!1){const a=Ze(s.outShape,"int32"),c=s.strideHeight,h=s.strideWidth,d=s.dilationHeight,m=s.dilationWidth,y=s.effectiveFilterHeight,b=s.effectiveFilterWidth,w=s.padInfo.top,L=s.padInfo.left,T=Ze(t,n,e);for(let A=0;AJ&&(J=le,i?re=o?((A*s.inHeight+ce)*s.inWidth+he)*s.inChannels+N:(ce*s.inWidth+he)*s.inChannels+N:re=ue*b+de)}}a.set(re,A,E,B,N)}}return a}function ZH(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t;xe(i,"avgPool");const{filterSize:o,strides:a,pad:c,dimRoundingMode:h}=s,d=1;k(rn(a,d),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Fn(i.shape,o,a,d,c,h);let y;if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))y=Qa({inputs:{x:i},backend:n});else{const b=n.data.get(i.dataId).values,w=Ot(i.shape),L=GL(b,i.shape,i.dtype,w,m,"avg");y=n.makeTensorInfo(m.outShape,i.dtype,L.values)}return y}const QH={kernelName:Xs,backendName:"cpu",kernelFunc:ZH};function eq(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o}=t,a=o;xe([i,o],"avgPoolBackprop");const{filterSize:c,strides:h,pad:d}=s,m=Fn(a.shape,c,h,1,d),y=m.strideHeight,b=m.strideWidth,w=m.filterHeight,L=m.filterWidth,T=m.dilationHeight,A=m.dilationWidth,N=m.effectiveFilterHeight,E=m.effectiveFilterWidth,D=E-1-m.padInfo.left,F=N-1-m.padInfo.top,_=Ze(a.shape,"float32"),B=1/(w*L),$=n.data.get(i.dataId).values,H=Ze(i.shape,"float32",$);for(let q=0;q=m.outHeight||Math.floor(ye)!==ye)continue;for(let pe=0;pe=m.outWidth||Math.floor(Ie)!==Ie)continue;const Se=H.get(q,ye,Ie,J);de+=Se}}_.set(de*B,q,re,ce,J)}return n.makeTensorInfo(_.shape,_.dtype,_.values)}const tq={kernelName:ua,backendName:"cpu",kernelFunc:eq};function nq(e){const{inputs:t,backend:n,attrs:s}=e,{x:i,scale:o,offset:a,mean:c,variance:h}=t;k(c.shape.length===h.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),k(a==null||c.shape.length===a.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),k(o==null||c.shape.length===o.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks."),xe([i,c,h,o,a],"batchNorm");let{varianceEpsilon:d}=s;d==null&&(d=.001);const m=n.data.get(i.dataId).values,y=n.data.get(c.dataId).values,b=n.data.get(h.dataId).values,w=o?n.data.get(o.dataId).values:new Float32Array([1]),L=a?n.data.get(a.dataId).values:new Float32Array([0]),T=new Float32Array(m.length),A=L.length,N=w.length,E=b.length,D=y.length;let F=0,_=0,B=0,$=0;for(let H=0;H=A&&(F=0),_>=D&&(_=0),B>=N&&(B=0),$>=E&&($=0);return n.makeTensorInfo(i.shape,i.dtype,T)}const sq={kernelName:ol,backendName:"cpu",kernelFunc:nq};const iq=xt(Qc,(e,t)=>{const n=t;return e>n.clipValueMax?n.clipValueMax:e`The new shape (${c}) has ${h} elements and the old shape (${i.shape}) has ${a} elements. The new shape and old shape must have the same number of elements.`),n.incRef(i.dataId);const d=n.data.get(i.dataId);if(d.complexTensorInfos!=null){const m=d.complexTensorInfos.real,y=d.complexTensorInfos.imag;m.shape=c,y.shape=c}return{dataId:i.dataId,shape:c,dtype:i.dtype}}const aq={kernelName:yl,backendName:"cpu",kernelFunc:Hr};function Hh(e){const{inputs:t,backend:n,attrs:s}=e,{axis:i}=s,o=ft(i,t[0].shape)[0];let a=Or(t.map(w=>w.shape),o);if(we(a)===0)return n.makeTensorInfo(a,t[0].dtype,[]);const c=t.filter(w=>we(w.shape)>0);if(c.length===1)return c[0];const h=c.map(w=>w.shape);if(Ky(h,o),c[0].dtype==="complex64"){const w=c.map(E=>Vh({inputs:{input:E},backend:n})),L=c.map(E=>jp({inputs:{input:E},backend:n})),T=Hh({inputs:w,backend:n,attrs:{axis:i}}),A=Hh({inputs:L,backend:n,attrs:{axis:i}}),N=li({inputs:{real:T,imag:A},backend:n});return w.forEach(E=>n.disposeIntermediateTensorInfo(E)),L.forEach(E=>n.disposeIntermediateTensorInfo(E)),n.disposeIntermediateTensorInfo(T),n.disposeIntermediateTensorInfo(A),N}const d=c.map(w=>{const L=we(w.shape.slice(o)),T=[-1,L];return Hr({inputs:{x:w},backend:n,attrs:{shape:T}})});a=Or(d.map(w=>w.shape),1);const m=bn(c[0].dtype,we(a));if(d[0].shape[0]===1){let w=0;d.forEach(L=>{const T=n.data.get(L.dataId).values,A=we(L.shape);m.set(T,w),w+=A})}else{let w=0;d.forEach(L=>{const T=n.data.get(L.dataId).values;let A=0;for(let N=0;Nw.shape),o),b=n.makeTensorInfo(y,t[0].dtype,m);return d.forEach(w=>n.disposeIntermediateTensorInfo(w)),b}const cq={kernelName:$u,backendName:"cpu",kernelFunc:Hh};const lq=xt(da,e=>Math.cos(e)),hq={kernelName:da,backendName:"cpu",kernelFunc:lq};const uq=xt(el,e=>Math.cosh(e)),dq={kernelName:el,backendName:"cpu",kernelFunc:uq};const pq={kernelName:Uu,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i}=e,{strides:o,pad:a,dilations:c}=n,h=t,d=h.data.get(s.dataId).values,m=s.shape.length,y=h.data.get(i.dataId).values,b=i.shape.length,{batchSize:w,inHeight:L,inWidth:T,inChannels:A,outHeight:N,outWidth:E,padInfo:D,strideHeight:F,strideWidth:_,filterHeight:B,filterWidth:$,dilationHeight:H,dilationWidth:q,outShape:J}=Td(s.shape,i.shape,o,a,"NHWC",c),re=we(J),ce=J.length,ue=lo(s.dtype,re);for(let de=0;de=0&&$e=0&&etEe&&(Ee=un)}}}const We=Js([de,le,pe,Se],ce,Ot(J));ue[We]=Ee}}}const he=h.write(Tr(ue,s.dtype),J,s.dtype);return{dataId:he,shape:J,dtype:s.dtype}}};const mq={kernelName:Mu,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i,dy:o}=e,{strides:a,pad:c,dilations:h}=n,d=t,m=ys(s.shape,d.data.get(s.dataId).values),y=ys(i.shape,d.data.get(i.dataId).values),{batchSize:b,inHeight:w,inWidth:L,inChannels:T,outHeight:A,outWidth:N,padInfo:E,strideHeight:D,strideWidth:F,filterHeight:_,filterWidth:B,dilationHeight:$,dilationWidth:H,outShape:q}=Td(s.shape,i.shape,a,c,"NHWC",h);k(o.rank===q.length,()=>`Error in ${Mu}, dy must have the same rank as output ${q.length}, but got ${o.rank}`);const J=ys(q,d.data.get(o.dataId).values),re=ay(i.shape,i.dtype);for(let ue=0;ue=0&&Oe=0&&YeIe&&(Ie=et,Se=We,Ee=$e)}}}re[Se][Ee][pe]+=J[ue][he][le][pe]}}}const ce=d.write(Tr(re,s.dtype),i.shape,i.dtype);return{dataId:ce,shape:i.shape,dtype:i.dtype}}};const fq={kernelName:Bu,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i,dy:o}=e,{strides:a,pad:c,dilations:h}=n,d=t,m=ys(s.shape,d.data.get(s.dataId).values),y=ys(i.shape,d.data.get(i.dataId).values),{batchSize:b,inHeight:w,inWidth:L,inChannels:T,outHeight:A,outWidth:N,padInfo:E,strideHeight:D,strideWidth:F,filterHeight:_,filterWidth:B,dilationHeight:$,dilationWidth:H,outShape:q}=Td(s.shape,i.shape,a,c,"NHWC",h);k(o.rank===q.length,()=>`Error in ${Bu}, dy must have the same rank as output ${q.length}, but got ${o.rank}`);const J=ys(q,d.data.get(o.dataId).values),re=ay(s.shape,s.dtype);for(let ue=0;ue=0&&Oe=0&&YeIe&&(Ie=et,Se=Oe,Ee=Ye)}}}re[ue][Se][Ee][pe]+=J[ue][he][le][pe]}}}const ce=d.write(Tr(re,s.dtype),s.shape,s.dtype);return{dataId:ce,shape:s.shape,dtype:s.dtype}}};const gq=$o((e,t)=>e/t),yq=ec(pa,gq),VL={kernelName:pa,backendName:"cpu",kernelFunc:yq};const bq=xt(tl,e=>e>=0?e:Math.exp(e)-1),wq={kernelName:tl,backendName:"cpu",kernelFunc:bq};const Lq=Eb,Sq=Db,Iq=kb,xq=Fb,Tq=_b,Aq=Wb,vq=xt(nl,e=>{const t=Math.sign(e),n=Math.abs(e),s=1/(1+Lq*n);return t*(1-((((Aq*s+Tq)*s+xq)*s+Iq)*s+Sq)*s*Math.exp(-n*n))}),Nq={kernelName:nl,backendName:"cpu",kernelFunc:vq};function y0(e,t,n){const s=e.shape,i=s[0],o=s[1],a=n.data.get(e.dataId),c=a.complexTensorInfos.real,h=a.complexTensorInfos.imag,d=[i,o],m=we(d),y=bn("float32",m),b=bn("float32",m);for(let A=0;A{const{image:s}=e,i=n,o=bn(s.dtype,we(s.shape)),[a,c,h,d]=s.shape,m=i.data.get(s.dataId).values;for(let b=0;b=0&&_Number.isFinite(e)?1:0,"bool"),$q={kernelName:cl,backendName:"cpu",kernelFunc:Wq};const Uq=xt(ll,e=>Math.abs(e)===Infinity?1:0,"bool"),Bq={kernelName:ll,backendName:"cpu",kernelFunc:Uq};const Mq=xt(hl,e=>Number.isNaN(e)?1:0,"bool"),Pq={kernelName:hl,backendName:"cpu",kernelFunc:Mq};const zq=xt(dl,e=>Math.log1p(e)),Gq={kernelName:dl,backendName:"cpu",kernelFunc:zq};const Vq=xt(zu,e=>e?0:1,"bool"),Yq={kernelName:zu,backendName:"cpu",kernelFunc:Vq};const Hq={kernelName:pl,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{reductionIndices:i,keepDims:o}=t,a=n;let c=s.shape;const h=c.length,d=ft(i,c);let m=d;const y=kn(m,h);let b=a.data.get(s.dataId).values;if(y!=null){const D=new Array(h);for(let F=0;F`Error in maxPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Fn(i.shape,o,a,d,c,h);let y;if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))y=Qa({inputs:{x:i},backend:n});else{const b=n.data.get(i.dataId).values,w=Ot(i.shape),L=GL(b,i.shape,i.dtype,w,m,"max");y=n.makeTensorInfo(m.outShape,i.dtype,L.values)}return y}const jq={kernelName:ml,backendName:"cpu",kernelFunc:qq};function Kq(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o,output:a}=t,c=o;xe([o,a],"maxPoolBackprop");const{filterSize:h,strides:d,pad:m,dimRoundingMode:y}=s,b=Fn(c.shape,h,d,1,m,y),w=n.data.get(c.dataId).values,L=Ze(b.outShape,c.dtype,g0(w,c.shape,c.dtype,b).values),T=b.strideHeight,A=b.strideWidth,N=b.dilationHeight,E=b.dilationWidth,D=b.effectiveFilterHeight,F=b.effectiveFilterWidth,_=F-1-b.padInfo.left,B=D-1-b.padInfo.top,$=Ze(c.shape,"float32"),H=n.data.get(i.dataId).values,q=Ze(i.shape,"float32",H);for(let J=0;J=b.outHeight||Math.floor(pe)!==pe)continue;for(let Ie=0;Ie=b.outWidth||Math.floor(Se)!==Se)continue;const Ee=D*F-1-L.get(J,pe,Se,re),We=ye*F+Ie,Oe=Ee===We?1:0;if(Oe===0)continue;const $e=q.get(J,pe,Se,re);le+=$e*Oe}}$.set(le,J,ce,ue,re)}return n.makeTensorInfo($.shape,$.dtype,$.values)}const Xq={kernelName:Gu,backendName:"cpu",kernelFunc:Kq};function Jq(e,t,n,s,i){const o=Ot(t),a=GL(e,t,n,o,i,"max"),c=g0(e,t,n,i,!0,s);return[a.values,c.values]}const Zq={kernelName:Vu,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:i,strides:o,pad:a,includeBatchInIndex:c}=t,h=n;xe(s,"MaxPoolWithArgmax");const d=h.data.get(s.dataId).values,m=Fn(s.shape,i,o,[1,1],a),[y,b]=Jq(d,s.shape,s.dtype,c,m),w=h.write(y,m.outShape,s.dtype),L=h.write(b,m.outShape,s.dtype);return[{dataId:w,shape:m.outShape,dtype:s.dtype},{dataId:L,shape:m.outShape,dtype:"int32"}]}};const Qq=np,e4={kernelName:Hu,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,padToMaxOutputSize:h}=n,d=t;xe(s,"NonMaxSuppressionPadded");const m=d.data.get(s.dataId).values,y=d.data.get(i.dataId).values,{selectedIndices:b,validOutputs:w}=Qq(m,y,o,a,c,h);return[b,w]}};const t4=sp,n4={kernelName:qu,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}=n,d=t;xe(s,"NonMaxSuppressionWithScore");const m=d.data.get(s.dataId).values,y=d.data.get(i.dataId).values,b=o,w=a,L=c,T=h,{selectedIndices:A,selectedScores:N}=t4(m,y,b,w,L,T);return[A,N]}};const s4=$o((e,t)=>e!==t?1:0),i4=ec(Yu,s4,null,"bool"),r4={kernelName:Yu,backendName:"cpu",kernelFunc:i4};function o4(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{paddings:o,constantValue:a}=s;xe(i,"pad");const c=o.map((E,D)=>E[0]+i.shape[D]+E[1]),h=o.map(E=>E[0]),d=n.data.get(i.dataId).values,m=we(i.shape),y=i.shape.length,b=Ot(i.shape),w=we(c),L=c.length,T=Ot(c),A=bn(i.dtype,w);a!==0&&A.fill(a);for(let E=0;EB+h[$]),_=Js(F,L,T);A[_]=d[E]}const N=n.write(A,c,i.dtype);return{dataId:N,shape:c,dtype:i.dtype}}const b0={kernelName:ju,backendName:"cpu",kernelFunc:o4};const a4=xt(gl,e=>1/e),c4={kernelName:gl,backendName:"cpu",kernelFunc:a4};const l4={kernelName:ed,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,{radians:i,fillValue:o,center:a}=t,c=n,h=bn(s.dtype,we(s.shape)),[d,m,y,b]=s.shape,[w,L]=Cb(a,m,y),T=255,A=Math.sin(i),N=Math.cos(i),E=c.data.get(s.dataId).values;for(let F=0;F=0&&he=0&&de{const t=Math.floor(e);return e-t<.5?Math.floor(e):e-t>.5?Math.ceil(e):t%2===0?t:t+1}),u4={kernelName:bl,backendName:"cpu",kernelFunc:h4};const d4=rp,p4=op,m4=xt(Ll,e=>e>=0?p4*e:d4*(Math.exp(e)-1)),f4={kernelName:Ll,backendName:"cpu",kernelFunc:m4};const g4=xt(xl,e=>1/(1+Math.exp(-e))),y4={kernelName:xl,backendName:"cpu",kernelFunc:g4};const b4=xt(Il,e=>e<0?-1:e>0?1:0),w4={kernelName:Il,backendName:"cpu",kernelFunc:b4};const L4=xt(ma,e=>Math.sin(e)),S4={kernelName:ma,backendName:"cpu",kernelFunc:L4};const I4=xt(Sl,e=>Math.sinh(e)),x4={kernelName:Sl,backendName:"cpu",kernelFunc:I4};const T4=11920928955078125e-23,w0=Math.log(T4)+2,A4=xt(Tl,e=>{const t=e>-w0,n=eMath.sqrt(e)),E4={kernelName:Al,backendName:"cpu",kernelFunc:O4};const D4={kernelName:Ju,backendName:"cpu",kernelFunc:({inputs:e,backend:t})=>{const{x:n}=e,s=t;xe(n,"square");const i=s.data.get(n.dataId).values,o=new Float32Array(i.length);for(let c=0;c{const n=e-t;return n*n}),F4=ec(fa,k4),_4={kernelName:fa,backendName:"cpu",kernelFunc:F4};const W4=xt(Rl,(e,t)=>{const n=t;return isNaN(e)?NaN:e>0?1:n.alpha}),$4={kernelName:Rl,backendName:"cpu",kernelFunc:W4};const U4=xt(ga,e=>Math.tan(e)),B4={kernelName:ga,backendName:"cpu",kernelFunc:U4};const M4=xt(Nl,e=>Math.tanh(e)),P4={kernelName:Nl,backendName:"cpu",kernelFunc:M4};function z4(e){const{inputs:t,attrs:n,backend:s}=e,{axis:i}=n,{x:o}=t;xe(o,"unique");const a=s.data.get(o.dataId).values,{outputValues:c,outputShape:h,indices:d}=f0(a,i,o.shape,o.dtype);return[s.makeTensorInfo(h,o.dtype,c),s.makeTensorInfo([d.length],"int32",d)]}const G4={kernelName:Zu,backendName:"cpu",kernelFunc:z4};const V4=[mH,PH,GH,LH,YH,qH,KH,JH,QH,tq,sq,bH,IH,rq,fH,cq,hq,dq,pq,fq,mq,VL,wq,Nq,TH,vH,Dq,kq,CH,gH,_q,oq,$q,Bq,Pq,OH,Gq,Yq,jq,Xq,Zq,Hq,DH,e4,n4,r4,b0,yH,c4,aq,l4,u4,FH,f4,y4,w4,S4,x4,_H,v4,R4,E4,D4,_4,$4,$H,B4,P4,N4,G4];for(const e of V4)nd(e);const Uo={},HL={alpha:!1,antialias:!1,premultipliedAlpha:!1,preserveDrawingBuffer:!1,depth:!1,stencil:!1,failIfMajorPerformanceCaveat:!0};function Y4(e,t){Uo[e]=t}function _i(e){if(!(e in Uo)){const n=q4(e);if(n!==null)Uo[e]=n;else return console.log("Could not get context for WebGL version",e),null}const t=Uo[e];return t.isContextLost()?(delete Uo[e],_i(e)):(t.disable(t.DEPTH_TEST),t.disable(t.STENCIL_TEST),t.disable(t.BLEND),t.disable(t.DITHER),t.disable(t.POLYGON_OFFSET_FILL),t.disable(t.SAMPLE_COVERAGE),t.enable(t.SCISSOR_TEST),t.enable(t.CULL_FACE),t.cullFace(t.BACK),Uo[e])}function H4(e){if(typeof OffscreenCanvas!="undefined"&&e===2)return new OffscreenCanvas(300,150);if(typeof document!="undefined")return document.createElement("canvas");throw new Error("Cannot create a canvas in this context")}function q4(e){if(e!==1&&e!==2)throw new Error("Cannot get WebGL rendering context, WebGL is disabled.");const t=H4(e);return t.addEventListener("webglcontextlost",n=>{n.preventDefault(),delete Uo[e]},!1),e===1?t.getContext("webgl",HL)||t.getContext("experimental-webgl",HL):t.getContext("webgl2",HL)}var qh;(function(e){e[e.DENSE=0]="DENSE",e[e.SHARED_BATCH=1]="SHARED_BATCH"})(qh||(qh={}));var As;(function(e){e[e.RENDER=0]="RENDER",e[e.UPLOAD=1]="UPLOAD",e[e.PIXELS=2]="PIXELS",e[e.DOWNLOAD=3]="DOWNLOAD"})(As||(As={}));var Sn;(function(e){e[e.UNPACKED_FLOAT16=0]="UNPACKED_FLOAT16",e[e.UNPACKED_FLOAT32=1]="UNPACKED_FLOAT32",e[e.PACKED_4X1_UNSIGNED_BYTE=2]="PACKED_4X1_UNSIGNED_BYTE",e[e.PACKED_2X2_FLOAT32=3]="PACKED_2X2_FLOAT32",e[e.PACKED_2X2_FLOAT16=4]="PACKED_2X2_FLOAT16"})(Sn||(Sn={}));function jh(e,t){return[t,e]}function j4(e,t){return e*t}function cee(e,t){return[t*4,e]}function Kh(e){const t=we(e),n=Math.ceil(t/4);return sd(n)}function lee(e,t){if(e%t!==0)throw new Error(`unpackedSize (${e}) must be a multiple of ${t}`);return e/t}function hee(e,t,n){const s=e.length*n/4;if(t.length= ${s}`);let i=0;for(let o=0;oe.getExtension(t),'Extension "'+t+'" not supported on this browser.')}function tj(e,t){const n=rr(e,()=>e.createShader(e.VERTEX_SHADER),"Unable to create vertex WebGLShader.");if(Re(e,()=>e.shaderSource(n,t)),Re(e,()=>e.compileShader(n)),e.getShaderParameter(n,e.COMPILE_STATUS)===!1)throw console.log(e.getShaderInfoLog(n)),new Error("Failed to compile vertex shader.");return n}function nj(e,t){const n=rr(e,()=>e.createShader(e.FRAGMENT_SHADER),"Unable to create fragment WebGLShader.");if(Re(e,()=>e.shaderSource(n,t)),Re(e,()=>e.compileShader(n)),e.getShaderParameter(n,e.COMPILE_STATUS)===!1)throw ij(t,e.getShaderInfoLog(n)),new Error("Failed to compile fragment shader.");return n}const sj=/ERROR: [0-9]+:([0-9]+):/g;function ij(e,t){const n=sj.exec(t);if(n==null){console.log(`Couldn't parse line number in error: ${t}`),console.log(e);return}const s=+n[1],i=e.split(` -`),o=i.length.toString().length+2,a=i.map((y,b)=>co((b+1).toString(),o)+y);let c=0;for(let y=0;y0&&Yi(n[0])){const i=n.map(o=>Wd(o));s=this.write(i,e,t)}else s=this.write(n,e,t);return{dataId:s,shape:e,dtype:t}}incRef(e){const t=this.data.get(e);t.refCount++}decRef(e){if(this.data.has(e)){const t=this.data.get(e);t.refCount--}}move(e,t,n,s){this.data.set(e,{values:t,dtype:s,refCount:1})}numDataIds(){return this.data.numDataIds()}async read(e){return this.readSync(e)}readSync(e){const{dtype:t,complexTensorInfos:n}=this.data.get(e);if(t==="complex64"){const s=this.readSync(n.real.dataId),i=this.readSync(n.imag.dataId);return tr(s,i)}return this.data.get(e).values}bufferSync(e){const t=this.readSync(e.dataId);let n=t;if(e.dtype==="string")try{n=t.map(s=>Kl(s))}catch(s){throw new Error("Failed to decode encoded string bytes into utf-8")}return wt(e.shape,e.dtype,n)}makeOutput(e,t,n){const s=this.write(e,t,n);return Ki().makeTensorFromDataId(s,t,n,this)}disposeData(e){if(this.data.has(e)){const{complexTensorInfos:t}=this.data.get(e);t!=null&&(this.disposeData(t.real.dataId),this.disposeData(t.imag.dataId)),this.data.delete(e)}}disposeIntermediateTensorInfo(e){const t=e.dataId;if(this.data.has(t)){const n=this.data.get(t);n.refCount--,n.refCount<1&&this.disposeData(t)}}async time(e){const t=jn();e();const n=jn()-t;return{kernelMs:n}}memory(){return{unreliable:!0,reasons:["The reported memory is an upper bound. Due to automatic garbage collection, the true allocated memory may be less."]}}stridedSlice(e,t,n,s){Te(e,"stridedSlice");const i=jd(t,n,s);if(i.some(c=>c===0))return sn([],i);const o=wt(i,e.dtype),a=this.bufferSync(e);for(let c=0;ca[c]=e.shape[c]-1-a[c]),n.set(s.get(...a),...o)}return n.toTensor()}neg(e){return Te(e,"neg"),X(Ce(-1),e)}addN(e){Te(e,"addN");const t=e.map(i=>this.readSync(i.dataId)),n=wt(e[0].shape,e[0].dtype),s=n.values;for(let i=0;iMath.pow(n,s))}floorDiv(e,t){Te([e,t],"floorDiv");const n=(i,o)=>Math.floor(i/o),s="int32";return this.broadcastedBinaryOp(e,t,s,n)}sum(e,t){Te(e,"sum"),Kn("sum",t,e.rank);const[n,s]=An(e.shape,t),i=$n(e.dtype,"int32"),o=dt(n,i),a=P(s),c=this.readSync(o.dataId),h=this.readSync(e.dataId);for(let d=0;df&&(f=L,b=w)}c[d]=b}return o}cumsum(e,t,n,s){if(Te(e,"cumsum"),t!==e.rank-1)throw new Error(`backend.cumsum in CPU expects an inner-most axis=${e.rank-1} but got axis=${t}`);const i=$n(e.dtype,"int32"),o=dt(e.shape,i),a=this.readSync(o.dataId),c=this.readSync(e.dataId),h=e.shape[e.rank-1],d=s?(m,f)=>m+h-f-1:(m,f)=>m+f;for(let m=0;mn===s?1:0)}notEqual(e,t){return Te([e,t],"notEqual"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n!==s?1:0)}less(e,t){return Te([e,t],"less"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>nn<=s?1:0)}greater(e,t){return Te([e,t],"greater"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n>s?1:0)}greaterEqual(e,t){return Te([e,t],"greaterEqual"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n>=s?1:0)}logicalAnd(e,t){return Te([e,t],"logicalAnd"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n&&s)}logicalOr(e,t){return Te([e,t],"logicalOr"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n||s)}select(e,t,n){Te([e,t,n],"select");const s=this.readSync(e.dataId),i=this.readSync(t.dataId),o=this.readSync(n.dataId),a=dt(t.shape,$n(t.dtype,n.dtype)),c=this.readSync(a.dataId);let h=0;const d=e.rank===0||e.rank>1||t.rank===1?1:P(t.shape.slice(1));for(let m=0;mMath.min(n,s))}mod(e,t){return Te([e,t],"mod"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>{const i=n%s;return n<0&&s<0||n>=0&&s>=0?i:(i+s)%s})}maximum(e,t){return Te([e,t],"maximum"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>Math.max(n,s))}all(e,t){Te(e,"all"),Kn("all",t,e.rank);const[n,s]=An(e.shape,t),i=dt(n,e.dtype),o=P(s),a=this.readSync(i.dataId),c=this.readSync(e.dataId);for(let h=0;h{const i=n-s;return i*i})}eluDer(e,t){Te([e,t],"eluDer");const n=new Float32Array(t.size),s=this.readSync(t.dataId),i=this.readSync(e.dataId);for(let o=0;o=1?n[o]=i[o]:n[o]=i[o]*(a+1)}return this.makeOutput(n,t.shape,"float32")}atan2(e,t){return Te([e,t],"atan2"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>Math.atan2(n,s))}tile(e,t){return Te(e,"tile"),ZH(this.bufferSync(e),t)}gather(e,t,n){Te([e,t],"gather");const s=e.shape.slice(),i=this.readSync(t.dataId);s[n]=i.length;const o=wt(s,e.dtype),a=this.bufferSync(e);for(let c=0;cd*m),i=Oh(e.shape,t,s),o=Eh(i.length,t.length),a=Dh(e.shape,t,s),c=Zb(n,t.length),h=Qb(a,n,t.length);return Ye(e.reshape(i),o).reshape(a).slice(c,h)}pool3d(e,t,n){Te(e,"pool3d");const s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,a=t.dilationDepth,c=t.dilationHeight,h=t.dilationWidth,d=t.effectiveFilterDepth,m=t.effectiveFilterHeight,f=t.effectiveFilterWidth,b=t.padInfo.front,w=t.padInfo.top,L=t.padInfo.left,x=n==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,v=this.readSync(e.dataId),N=wt(t.outShape,e.dtype),O=N.values,E=t.outShape[1]*t.outShape[2]*t.outShape[3]*t.outShape[4],k=t.outShape[2]*t.outShape[3]*t.outShape[4],F=t.outShape[3]*t.outShape[4],U=t.outShape[4];for(let $=0;$ze?ze=Ft:n==="avg"&&(ht+=Ft,it++),isNaN(ze))break}if(isNaN(ze))break}if(isNaN(ze))break}const rt=Ue+Z;O[rt]=n==="avg"?ht/it:ze}}}}return N.toTensor()}avgPool3d(e,t){return Te(e,"avgPool3d"),this.pool3d(e,t,"avg").toFloat()}avgPool3dBackprop(e,t,n){Te([e,t],"avgPool3dBackprop");const s=n.strideDepth,i=n.strideHeight,o=n.strideWidth,a=n.filterDepth,c=n.filterHeight,h=n.filterWidth,d=n.dilationDepth,m=n.dilationHeight,f=n.dilationWidth,b=n.effectiveFilterDepth,w=n.effectiveFilterHeight,L=n.effectiveFilterWidth,x=b-1-n.padInfo.front,v=L-1-n.padInfo.left,N=w-1-n.padInfo.top,O=wt(t.shape,"float32"),E=1/(a*c*h),k=this.bufferSync(e);for(let F=0;F=n.outDepth||Math.floor(me)!==me)continue;for(let ce=0;ce=n.outHeight||Math.floor(ye)!==ye)continue;for(let pe=0;pe=n.outWidth||Math.floor(we)!==we)continue;const Se=k.get(F,me,ye,we,U);he+=Se}}}O.set(he*E,F,$,Y,j,U)}return O.toTensor()}maxPool3d(e,t){return Te(e,"maxPool3d"),this.pool3d(e,t,"max").toFloat()}maxPool3dPositions(e,t){const n=wt(t.outShape,"int32"),s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,a=t.dilationDepth,c=t.dilationHeight,h=t.dilationWidth,d=t.effectiveFilterDepth,m=t.effectiveFilterHeight,f=t.effectiveFilterWidth,b=t.padInfo.front,w=t.padInfo.top,L=t.padInfo.left,x=this.bufferSync(e);for(let v=0;v=ue&&(ue=Oe,me=ye*m*f+we*m+xe)}}}n.set(me,v,O,U,Z,N)}}}return n.toTensor()}maxPool3dBackprop(e,t,n,s){Te([t,n],"maxPool3dBackprop");const i=this.maxPool3dPositions(t,s),o=s.strideDepth,a=s.strideHeight,c=s.strideWidth,h=s.dilationDepth,d=s.dilationHeight,m=s.dilationWidth,f=s.effectiveFilterDepth,b=s.effectiveFilterHeight,w=s.effectiveFilterWidth,L=f-1-s.padInfo.front,x=w-1-s.padInfo.left,v=b-1-s.padInfo.top,N=wt(t.shape,"float32"),O=this.bufferSync(i),E=this.bufferSync(e);for(let k=0;k=s.outDepth||Math.floor(ue)!==ue)continue;for(let me=0;me=s.outHeight||Math.floor(ce)!==ce)continue;for(let ye=0;ye=s.outWidth||Math.floor(pe)!==pe)continue;const we=f*b*w-1-O.get(k,ue,ce,pe,F),Se=he*b*w+me*w+ye,xe=we===Se?1:0;if(xe===0)continue;const Oe=E.get(k,ue,ce,pe,F);de+=Oe*xe}}}N.set(de,k,U,$,Y,F)}return N.toTensor()}resizeBilinear(e,t,n,s){Te(e,"resizeBilinear");const[i,o,a,c]=e.shape,h=this.readSync(e.dataId),d=new Float32Array(P([i,t,n,c])),m=[s&&t>1?o-1:o,s&&n>1?a-1:a],f=[s&&t>1?t-1:t,s&&n>1?n-1:n];let b=0;const w=m[0]/f[0],L=m[1]/f[1];for(let x=0;x1?i-1:i,n&&h>1?o-1:o],f=[n&&c>1?c-1:c,n&&h>1?h-1:h],b=m[0]/f[0],w=m[1]/f[1],L=this.readSync(e.dataId);let x=0;for(let v=0;v1?o-1:o,s&&n>1?a-1:a],f=[s&&t>1?t-1:t,s&&n>1?n-1:n],b=m[0]/f[0],w=m[1]/f[1];let L=0;for(let x=0;x1?i-1:i,n&&h>1?o-1:o],b=[n&&c>1?c-1:c,n&&h>1?h-1:h],w=f[0]/b[0],L=f[1]/b[1],x=1/w,v=1/L,N=Math.ceil(x)*2+2,O=Math.ceil(v)*2+2;for(let E=0;E=c)continue;const ye=k+ce*e.strides[1],pe=ce*w,we=Math.min(i-1,n?Math.round(pe):Math.floor(pe));if(F!==we)continue;for(let Se=0;Se=h)continue;const Oe=ye+xe*e.strides[2],Ne=xe*L,De=Math.min(o-1,n?Math.round(Ne):Math.floor(Ne));j===De&&(ue+=m[Oe+he])}}d[Z+he]=ue}}}}return Ka(d,t.shape,t.dtype)}localResponseNormalization4D(e,t,n,s,i){Te(e,"localResponseNormalization4D");const o=e.shape[3],a=o-1,c=this.readSync(e.dataId),h=e.size,d=new Float32Array(h);function m(f){const b=f%o;let w=f-b+Math.max(0,b-t);const L=f-b+Math.min(b+t,a);let x=0;for(;w<=L;w++){const v=c[w];x+=v*v}return x}for(let f=0;f=0&&o[a]`Only NHWC dataFormat supported on CPU for depthToSpace. Got ${n}`),A(t>1,()=>`blockSize should be > 1 for depthToSpace, but was: ${t}`);const s=e.shape[0],i=e.shape[1],o=e.shape[2],a=e.shape[3],c=i*t,h=o*t,d=a/(t*t),m=this.readSync(e.dataId),f=new Float32Array(s*c*h*d);let b=0;for(let w=0;wx[E]=0);const v=f.locToIndex(x),N=L.slice(-t.rank);d.forEach(E=>N[E]=0);const O=b.locToIndex(N);m[w]=s(a[v],c[O])}}return o.toTensor()}split(e,t,n){return JH(e,t,n)}dispose(){}floatPrecision(){return 32}epsilon(){return super.epsilon()}cropAndResize(e,t,n,s,i,o){const[a,c,h,d]=e.shape,m=t.shape[0],[f,b]=s,w=wt([m,f,b,d],"float32"),L=this.readSync(t.dataId),x=this.readSync(n.dataId),v=this.readSync(e.dataId),N=e.strides,O=w.strides;for(let E=0;E=a)continue;const Z=f>1?($-F)*(c-1)/(f-1):0,ie=b>1?(Y-U)*(h-1)/(b-1):0;for(let de=0;de1?F*(c-1)+de*Z:.5*(F+$)*(c-1);if(he<0||he>c-1){for(let ue=0;ue1?U*(h-1)+ye*ie:.5*(U+Y)*(h-1);if(pe<0||pe>h-1){for(let Oe=0;Oe1?U*(h-1)+ue*ie:.5*(U+Y)*(h-1);if(me<0||me>h-1){for(let pe=0;pe=e.size/a)throw new Error(`Invalid indices: ${b} does not index into ${e.shape}`);for(let L=0;L=s/i)throw new Error(`Invalid indices: ${x} does not index into ${n}`);for(let N=0;N{const{x:t}=e.inputs,n=e.backend;let s=new Float32Array(P(t.shape));if(t.dtype!=="complex64"){const i=n.data.get(t.dataId).values;s=b0(i)}else{const i=n.data.get(t.dataId),o=i.complexTensorInfos.real,a=i.complexTensorInfos.imag,c=n.data.get(o.dataId).values,h=n.data.get(a.dataId).values;for(let d=0;d{const a=nt(t,n),c=a.length,h=je(a),d=P(a),m=bt(o,d),f=t.length,b=n.length,w=je(t),L=je(n),x=Ro(t,a),v=Ro(n,a);if(x.length+v.length===0)for(let N=0;NE[$]=0);const k=_s(E,f,w),F=O.slice(-b);v.forEach($=>F[$]=0);const U=_s(F,b,L);m[N]=e(s[k],i[U])}return[m,a]}}function ci(e){const{inputs:t,backend:n}=e,{real:s,imag:i}=t,o=n.data.get(s.dataId).values,a=n.data.get(i.dataId).values,c=n.makeTensorInfo(s.shape,"complex64"),h=n.data.get(c.dataId);return h.complexTensorInfos={real:n.makeTensorInfo(s.shape,"float32",o),imag:n.makeTensorInfo(i.shape,"float32",a)},c}const iq={kernelName:rd,backendName:"cpu",kernelFunc:ci};function Go(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const rq={kernelName:xl,backendName:"cpu",kernelFunc:Go};function ru(e){const{inputs:t,backend:n}=e,{input:s}=t,i=n.data.get(s.dataId).complexTensorInfos.real,o=n.data.get(i.dataId).values;return n.makeTensorInfo(i.shape,i.dtype,o)}const oq={kernelName:Td,backendName:"cpu",kernelFunc:ru};function ou(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{dtype:o}=s;if(o==="complex64"){if(i.dtype==="complex64")return Go({inputs:{x:i},backend:n});const a=dt(i.shape),c=ou({inputs:{x:i},backend:n,attrs:{dtype:"float32"}}),h=ci({inputs:{real:c,imag:a},backend:n});return a.dispose(),n.disposeIntermediateTensorInfo(c),h}if(i.dtype==="complex64"){const a=ru({inputs:{input:i},backend:n}),c=ou({inputs:{x:a},backend:n,attrs:{dtype:o}});return n.disposeIntermediateTensorInfo(a),c}if(!ba(i.dtype,o)){const a=Go({inputs:{x:i},backend:n});return{dataId:a.dataId,shape:a.shape,dtype:o}}if(o==="int32"){const a=n.data.get(i.dataId).values,c=Int32Array.from(a);return n.makeTensorInfo(i.shape,"int32",c)}if(o==="bool"){const a=n.data.get(i.dataId).values,c=Or([0],i.dtype),[h,d]=Zr((m,f)=>m!==f?1:0)(i.shape,[],a,c,"bool");return n.makeTensorInfo(d,"bool",h)}throw new Error(`Error in Cast: failed to cast ${i.dtype} to ${o}`)}const aq={kernelName:Sa,backendName:"cpu",kernelFunc:ou};function hc(e,t,n,s){return n==null?({inputs:i,backend:o})=>{const{a,b:c}=i,h=o;Te([a,c],e);const d=h.data.get(a.dataId).values,m=h.data.get(c.dataId).values,f=s||a.dtype,[b,w]=t(a.shape,c.shape,d,m,f);return h.makeTensorInfo(w,f,b)}:({inputs:i,backend:o})=>{const{a,b:c}=i,h=o;if(a.dtype==="complex64"||c.dtype==="complex64"){const d=ou({inputs:{x:a},backend:h,attrs:{dtype:"complex64"}}),m=h.data.get(d.dataId),f=m.complexTensorInfos.real,b=m.complexTensorInfos.imag,w=h.data.get(f.dataId).values,L=h.data.get(b.dataId).values,x=ou({inputs:{x:c},backend:h,attrs:{dtype:"complex64"}}),v=h.data.get(x.dataId),N=v.complexTensorInfos.real,O=v.complexTensorInfos.imag,E=h.data.get(N.dataId).values,k=h.data.get(O.dataId).values,[F,U,$]=n(a.shape,c.shape,w,L,E,k),Y=h.makeTensorInfo($,"float32",F),j=h.makeTensorInfo($,"float32",U),Z=ci({inputs:{real:Y,imag:j},backend:h});return h.disposeIntermediateTensorInfo(d),h.disposeIntermediateTensorInfo(x),h.disposeIntermediateTensorInfo(Y),h.disposeIntermediateTensorInfo(j),Z}else{const d=h.data.get(a.dataId).values,m=h.data.get(c.dataId).values,f=s||a.dtype,[b,w]=t(a.shape,c.shape,d,m,f);return h.makeTensorInfo(w,f,b)}}}function cS(e){return(t,n,s,i,o,a)=>{const c=nt(t,n),h=P(c),d=c.length,m=je(c),f=bt("float32",h),b=bt("float32",h),w=Ro(t,c),L=Ro(n,c),x=tr(s,i),v=tr(o,a),N=t.length,O=je(t),E=n.length,k=je(n);if(w.length+L.length===0)for(let F=0;F$[de]=0);const Y=_s($,N,O),j=U.slice(-E);L.forEach(de=>j[de]=0);const Z=_s(j,E,k),ie=e(x[Y*2],x[Y*2+1],v[Z*2],v[Z*2+1]);f[F]=ie.real,b[F]=ie.imag}return[f,b,c]}}const w0=Zr((e,t)=>e+t),cq=cS((e,t,n,s)=>({real:e+n,imag:t+s})),au=hc(wo,w0,cq),lq={kernelName:wo,backendName:"cpu",kernelFunc:au};function uc(e){return(t,n,s)=>{const i=bt(n,t.length);for(let o=0;o{const{x:a}=s;if(Te(a,e),a.dtype==="string"||n==="string")throw new Error("unaryKernelFunc does not support string input/output");const c=o,h=c.data.get(a.dataId).values,d=P(a.shape),m=n||a.dtype,f=ws(m,d);for(let b=0;b{const{x:a}=s;if(Te(a,e),a.dtype==="string"||n==="string")throw new Error("unaryKernelFunc does not support string input/output");const c=o,h=c.data.get(a.dataId).values,d=n||a.dtype,m=t(h,d,i);return c.makeTensorInfo(a.shape,d,m)}}const L0=uc(e=>Math.ceil(e)),hq=dc(pl,L0),uq={kernelName:pl,backendName:"cpu",kernelFunc:hq};const S0=uc(e=>Math.exp(e)),dq=dc(wl,S0),pq={kernelName:wl,backendName:"cpu",kernelFunc:dq};const I0=uc(e=>Math.expm1(e)),mq=dc(Ll,I0),fq={kernelName:Ll,backendName:"cpu",kernelFunc:mq};const x0=uc(e=>Math.floor(e)),gq=dc(Sl,x0),yq={kernelName:Sl,backendName:"cpu",kernelFunc:gq};const T0=uc(e=>Math.log(e)),bq=dc(Nl,T0),wq={kernelName:Nl,backendName:"cpu",kernelFunc:bq};function A0(e,t,n,s){const i=bt(s,P(n));for(let o=0;oc&&(c=d)}i[o]=c}return i}const v0=Zr((e,t)=>e*t),Lq=cS((e,t,n,s)=>({real:e*n-t*s,imag:e*s+t*n})),N0=hc(Ta,v0,Lq),Sq={kernelName:Ta,backendName:"cpu",kernelFunc:N0};const C0=Zr((e,t)=>e!==t?1:0),Iq=hc(Dl,C0,null,"bool"),xq={kernelName:Dl,backendName:"cpu",kernelFunc:Iq};const R0=uc(e=>1/Math.sqrt(e)),Tq=dc(Ul,R0),Aq={kernelName:Ul,backendName:"cpu",kernelFunc:Tq};function O0(e,t,n,s,i){const o=eb(s,t,n),a=P(n),c=je(s);if(o){const d=tb(t,c);return e.subarray(d,d+a)}const h=bt(i,a);for(let d=0;dx+t[v]),L=_s(w,s.length,c);h[d]=e[L]}return h}function lS(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{begin:o,size:a}=s;Te(i,"slice");const[c,h]=Kd(i,o,a);Qy(i,c,h);const d=n.data.get(i.dataId).values,m=O0(d,c,h,i.shape,i.dtype);return n.makeTensorInfo(h,i.dtype,m)}const vq={kernelName:Ad,backendName:"cpu",kernelFunc:lS};const E0=Zr((e,t)=>{const n=e-t;return n*n}),Nq=hc(va,E0),Cq={kernelName:va,backendName:"cpu",kernelFunc:Nq};const D0=Zr((e,t)=>e-t),Rq=cS((e,t,n,s)=>({real:e-n,imag:t-s})),k0=hc(Na,D0,Rq),Oq={kernelName:Na,backendName:"cpu",kernelFunc:k0};function hS(e,t,n,s,i){const o=t.length,a=P(t),c=je(t),h=je(i),d=bt(n,P(i));for(let m=0;m{for(let v=0;vnew tq,1);const _0=xt(yl,e=>e>=0?e:Math.exp(e)-1),kq={kernelName:yl,backendName:"cpu",kernelFunc:_0};const Fq=Zr((e,t)=>e<0?t*e:e);function W0(e){const{inputs:t,backend:n}=e,{x:s,alpha:i}=t;Te([s,i],"prelu");const o=n.data.get(s.dataId).values,a=n.data.get(i.dataId).values,[c,h]=Fq(s.shape,i.shape,o,a,s.dtype);return n.makeTensorInfo(h,s.dtype,c)}const _q={kernelName:xd,backendName:"cpu",kernelFunc:W0};const $0=xt(Fl,e=>Math.max(0,e)),Wq={kernelName:Fl,backendName:"cpu",kernelFunc:$0};const U0=xt(Wl,e=>Math.min(Math.max(0,e),6)),$q={kernelName:Wl,backendName:"cpu",kernelFunc:U0};function uS(e,t,n,s){if(n==="linear")return Go({inputs:{x:t},backend:e});if(n==="relu")return $0({inputs:{x:t},backend:e});if(n==="elu")return _0({inputs:{x:t},backend:e});if(n==="relu6")return U0({inputs:{x:t},backend:e});if(n==="prelu")return W0({inputs:{x:t,alpha:s},backend:e});throw new Error(`Activation ${n} has not been implemented for the CPU backend.`)}function Di(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{shape:o}=s,a=P(i.shape),c=Vt(o,a),h=P(c);A(a===h,()=>`The new shape (${c}) has ${h} elements and the old shape (${i.shape}) has ${a} elements. The new shape and old shape must have the same number of elements.`),n.incRef(i.dataId);const d=n.data.get(i.dataId);if(d.complexTensorInfos!=null){const m=d.complexTensorInfos.real,f=d.complexTensorInfos.imag;m.shape=c,f.shape=c}return{dataId:i.dataId,shape:c,dtype:i.dtype}}const Uq={kernelName:_l,backendName:"cpu",kernelFunc:Di};function B0(e){const{inputs:t,backend:n,attrs:s}=e,{a:i,b:o}=t,{transposeA:a,transposeB:c}=s;Te([i,o],"matMul");const h=i.shape.length,d=o.shape.length,m=a?i.shape[h-2]:i.shape[h-1],f=c?o.shape[d-1]:o.shape[d-2],b=a?i.shape[h-1]:i.shape[h-2],w=c?o.shape[d-2]:o.shape[d-1],L=i.shape.slice(0,-2),x=o.shape.slice(0,-2),v=P(L),N=P(x),O=v===N||v===1||N===1;A(h>=2&&d>=2&&O,()=>`Error in matMul: the input batch dimensions must either be the same or at least one input batch dimension must be 1. Got input batch dimensions of (${L}) and (${x}).`);const E=v>N?i.shape.slice(0,-2):o.shape.slice(0,-2),k=E.concat([b,w]);A(m===f,()=>`Error in matMul: inner shapes (${m}) and (${f}) of Tensors with shapes ${i.shape} and ${o.shape} and transposeA=${a} and transposeB=${c} must match.`);const F=a?[v,m,b]:[v,b,m],U=c?[N,w,f]:[N,f,w],$=Di({inputs:{x:i},backend:n,attrs:{shape:F}}),Y=Di({inputs:{x:o},backend:n,attrs:{shape:U}}),j=a?$.shape[1]:$.shape[2],Z=a?$.shape[2]:$.shape[1],ie=c?Y.shape[1]:Y.shape[2],de=Math.max(v,N),he=n.data.get($.dataId).values,ue=n.data.get(Y.dataId).values,me=je($.shape),ce=je(Y.shape),[ye,pe,we]=a?[me[0],1,me[1]]:[me[0],me[1],1],[Se,xe,Oe]=c?[1,ce[1],ce[0]]:[ce[1],1,ce[0]],Ne=Z*ie,De=wt([de,Z,ie],$.dtype),Ue=De.values,ze=n.blockSize;for(let ht=0;htMath.acos(e)),Vq={kernelName:ol,backendName:"cpu",kernelFunc:zq};const Gq=xt(al,e=>Math.acosh(e)),Yq={kernelName:al,backendName:"cpu",kernelFunc:Gq};const Hq=xt(cl,e=>Math.asin(e)),qq={kernelName:cl,backendName:"cpu",kernelFunc:Hq};const jq=xt(ll,e=>Math.asinh(e)),Kq={kernelName:ll,backendName:"cpu",kernelFunc:jq};const Xq=xt(hl,e=>Math.atan(e)),Jq={kernelName:hl,backendName:"cpu",kernelFunc:Xq};const Zq=xt(ul,e=>Math.atanh(e)),Qq={kernelName:ul,backendName:"cpu",kernelFunc:Zq};function dS(e,t,n,s,i,o){const a=i.strideHeight,c=i.strideWidth,h=i.dilationHeight,d=i.dilationWidth,m=i.effectiveFilterHeight,f=i.effectiveFilterWidth,b=i.padInfo.top,w=i.padInfo.left,L=o==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,x=wt(i.outShape,n),v=x.values,N=i.outShape[1]*i.outShape[2]*i.outShape[3],O=i.outShape[2]*i.outShape[3],E=i.outShape[3];for(let k=0;kye?ye=Ue:o==="avg"&&(pe+=Ue,we++)}if(isNaN(ye))break}const Se=de+he*E+$;v[Se]=o==="avg"?pe/we:ye}}}return x}function M0(e,t,n,s,i=!1,o=!1){const a=wt(s.outShape,"int32"),c=s.strideHeight,h=s.strideWidth,d=s.dilationHeight,m=s.dilationWidth,f=s.effectiveFilterHeight,b=s.effectiveFilterWidth,w=s.padInfo.top,L=s.padInfo.left,x=wt(t,n,e);for(let v=0;vZ&&(Z=ce,i?ie=o?((v*s.inHeight+de)*s.inWidth+ue)*s.inChannels+N:(de*s.inWidth+ue)*s.inChannels+N:ie=he*b+me)}}a.set(ie,v,O,U,N)}}return a}function e4(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t;Te(i,"avgPool");const{filterSize:o,strides:a,pad:c,dimRoundingMode:h}=s,d=1;A(cn(a,d),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Un(i.shape,o,a,d,c,h);let f;if(m.filterWidth===1&&m.filterHeight===1&&ae(m.inShape,m.outShape))f=Go({inputs:{x:i},backend:n});else{const b=n.data.get(i.dataId).values,w=je(i.shape),L=dS(b,i.shape,i.dtype,w,m,"avg");f=n.makeTensorInfo(m.outShape,i.dtype,L.values)}return f}const t4={kernelName:dl,backendName:"cpu",kernelFunc:e4};function n4(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o}=t,a=o;Te([i,o],"avgPoolBackprop");const{filterSize:c,strides:h,pad:d}=s,m=Un(a.shape,c,h,1,d),f=m.strideHeight,b=m.strideWidth,w=m.filterHeight,L=m.filterWidth,x=m.dilationHeight,v=m.dilationWidth,N=m.effectiveFilterHeight,O=m.effectiveFilterWidth,E=O-1-m.padInfo.left,k=N-1-m.padInfo.top,F=wt(a.shape,"float32"),U=1/(w*L),$=n.data.get(i.dataId).values,Y=wt(i.shape,"float32",$);for(let j=0;j=m.outHeight||Math.floor(ye)!==ye)continue;for(let pe=0;pe=m.outWidth||Math.floor(we)!==we)continue;const Se=Y.get(j,ye,we,Z);me+=Se}}F.set(me*U,j,ie,de,Z)}return n.makeTensorInfo(F.shape,F.dtype,F.values)}const s4={kernelName:sd,backendName:"cpu",kernelFunc:n4};function i4(e){const{inputs:t,backend:n,attrs:s}=e,{x:i,scale:o,offset:a,mean:c,variance:h}=t;A(c.shape.length===h.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),A(a==null||c.shape.length===a.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),A(o==null||c.shape.length===o.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks."),Te([i,c,h,o,a],"batchNorm");let{varianceEpsilon:d}=s;d==null&&(d=.001);const m=n.data.get(i.dataId).values,f=n.data.get(c.dataId).values,b=n.data.get(h.dataId).values,w=o?n.data.get(o.dataId).values:new Float32Array([1]),L=a?n.data.get(a.dataId).values:new Float32Array([0]),x=new Float32Array(m.length),v=L.length,N=w.length,O=b.length,E=f.length;let k=0,F=0,U=0,$=0;for(let Y=0;Y=v&&(k=0),F>=E&&(F=0),U>=N&&(U=0),$>=O&&($=0);return n.makeTensorInfo(i.shape,i.dtype,x)}const r4={kernelName:Il,backendName:"cpu",kernelFunc:i4};const o4=xt(ml,(e,t)=>{const n=t;return e>n.clipValueMax?n.clipValueMax:ew.shape),o);if(P(a)===0)return n.makeTensorInfo(a,t[0].dtype,[]);const c=t.filter(w=>P(w.shape)>0);if(c.length===1)return c[0];const h=c.map(w=>w.shape);if(np(h,o),c[0].dtype==="complex64"){const w=c.map(O=>ru({inputs:{input:O},backend:n})),L=c.map(O=>Am({inputs:{input:O},backend:n})),x=cu({inputs:w,backend:n,attrs:{axis:o}}),v=cu({inputs:L,backend:n,attrs:{axis:o}}),N=ci({inputs:{real:x,imag:v},backend:n});return w.forEach(O=>n.disposeIntermediateTensorInfo(O)),L.forEach(O=>n.disposeIntermediateTensorInfo(O)),n.disposeIntermediateTensorInfo(x),n.disposeIntermediateTensorInfo(v),N}const d=c.map(w=>{const L=P(w.shape.slice(o)),x=[-1,L];return Di({inputs:{x:w},backend:n,attrs:{shape:x}})});a=Xi(d.map(w=>w.shape),1);const m=bt(c[0].dtype,P(a));if(d[0].shape[0]===1){let w=0;d.forEach(L=>{const x=n.data.get(L.dataId).values,v=P(L.shape);m.set(x,w),w+=v})}else{let w=0;d.forEach(L=>{const x=n.data.get(L.dataId).values;let v=0;for(let N=0;Nw.shape),o),b=n.makeTensorInfo(f,t[0].dtype,m);return d.forEach(w=>n.disposeIntermediateTensorInfo(w)),b}const l4={kernelName:fl,backendName:"cpu",kernelFunc:cu};function P0(e){const{inputs:t,backend:n,attrs:s}=e,{x:i,filter:o}=t,{strides:a,pad:c,dataFormat:h,dilations:d,dimRoundingMode:m}=s;Te([i,o],"conv2d");const f=Wr(h),b=kn(i.shape,o.shape,a,d,c,m,!1,f),w=b.filterHeight,L=b.filterWidth,x=b.dilationHeight,v=b.dilationWidth,N=b.padInfo.left,O=b.padInfo.top,E=b.dataFormat==="channelsLast",k=new an(b.outShape,i.dtype),F=je(i.shape),U=je(o.shape),$=F[0],Y=E?F[1]:F[2],j=E?F[2]:1,Z=E?1:F[1],ie=k.strides[0],de=E?k.strides[1]:k.strides[2],he=E?k.strides[2]:1,ue=E?1:k.strides[1],me=n.data.get(i.dataId).values,ce=n.data.get(o.dataId).values,ye=k.values;for(let pe=0;pe=b.inHeight)continue;const ze=De*U[0],ht=we+Ue*Y;for(let it=0;it=b.inWidth)continue;const rn=ze+ut*U[1],Ut=ht+Dt*j;let kt=rn;for(let Ft=0;Ft=d.inDepth)continue;const pe=ce*j[0],we=ie+ye*Y[1];for(let Se=0;Se=d.inHeight)continue;const Ue=pe+Ne*j[1],ze=we+De*Y[2];for(let ht=0;ht=d.inWidth)continue;const Dt=Ue+mt*j[2],rn=ze+ut*d.inChannels;let Ut=Dt;for(let kt=0;ktMath.cos(e)),I4={kernelName:Ia,backendName:"cpu",kernelFunc:S4};const x4=xt(gl,e=>Math.cosh(e)),T4={kernelName:gl,backendName:"cpu",kernelFunc:x4};function z0(e){const{inputs:t,backend:n,attrs:s}=e,{x:i,filter:o}=t,{strides:a,pad:c,dilations:h,dimRoundingMode:d}=s;Te([i,o],"depthwiseConv2DNative");const m=je(i.shape),f=je(o.shape);let b=h;b==null&&(b=[1,1]),A(cn(a,b),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${a} and dilations '${b}'`);const w=kn(i.shape,o.shape,a,b,c,d,!0),{filterHeight:L,filterWidth:x,dilationHeight:v,dilationWidth:N,padInfo:O}=w,E=O.left,k=O.top,F=w.outChannels/w.inChannels,U=new an(w.outShape,i.dtype),$=n.data.get(i.dataId).values,Y=n.data.get(o.dataId).values,j=U.values;for(let Z=0;Z=w.inHeight)continue;const pe=ce*f[0],we=ie+ye*m[1];for(let Se=0;Se=w.inWidth)continue;const Ue=pe+Ne*f[1],ze=we+De*w.inChannels;let ht=xe,it=Ue;for(let rt=0;rt{const{x:s,filter:i}=e,{strides:o,pad:a,dilations:c}=n,h=t,d=h.data.get(s.dataId).values,m=s.shape.length,f=h.data.get(i.dataId).values,b=i.shape.length,{batchSize:w,inHeight:L,inWidth:x,inChannels:v,outHeight:N,outWidth:O,padInfo:E,strideHeight:k,strideWidth:F,filterHeight:U,filterWidth:$,dilationHeight:Y,dilationWidth:j,outShape:Z}=ep(s.shape,i.shape,o,a,"NHWC",c),ie=P(Z),de=Z.length,he=ws(s.dtype,ie);for(let me=0;me=0&&De=0&&zexe&&(xe=rt)}}}const Oe=_s([me,ce,pe,Se],de,je(Z));he[Oe]=xe}}}const ue=h.write(Or(he,s.dtype),Z,s.dtype);return{dataId:ue,shape:Z,dtype:s.dtype}}};const E4={kernelName:dd,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i,dy:o}=e,{strides:a,pad:c,dilations:h}=n,d=t,m=Ls(s.shape,d.data.get(s.dataId).values),f=Ls(i.shape,d.data.get(i.dataId).values),{batchSize:b,inHeight:w,inWidth:L,inChannels:x,outHeight:v,outWidth:N,padInfo:O,strideHeight:E,strideWidth:k,filterHeight:F,filterWidth:U,dilationHeight:$,dilationWidth:Y,outShape:j}=ep(s.shape,i.shape,a,c,"NHWC",h);A(o.rank===j.length,()=>`Error in ${dd}, dy must have the same rank as output ${j.length}, but got ${o.rank}`);const Z=Ls(j,d.data.get(o.dataId).values),ie=Pg(i.shape,i.dtype);for(let he=0;he=0&&Ne=0&&Uewe&&(we=ze,Se=Oe,xe=De)}}}ie[Se][xe][pe]+=Z[he][ue][ce][pe]}}}const de=d.write(Or(ie,s.dtype),i.shape,i.dtype);return{dataId:de,shape:i.shape,dtype:i.dtype}}};const D4={kernelName:ud,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i,dy:o}=e,{strides:a,pad:c,dilations:h}=n,d=t,m=Ls(s.shape,d.data.get(s.dataId).values),f=Ls(i.shape,d.data.get(i.dataId).values),{batchSize:b,inHeight:w,inWidth:L,inChannels:x,outHeight:v,outWidth:N,padInfo:O,strideHeight:E,strideWidth:k,filterHeight:F,filterWidth:U,dilationHeight:$,dilationWidth:Y,outShape:j}=ep(s.shape,i.shape,a,c,"NHWC",h);A(o.rank===j.length,()=>`Error in ${ud}, dy must have the same rank as output ${j.length}, but got ${o.rank}`);const Z=Ls(j,d.data.get(o.dataId).values),ie=Pg(s.shape,s.dtype);for(let he=0;he=0&&Ne=0&&Uewe&&(we=ze,Se=Ne,xe=Ue)}}}ie[he][Se][xe][pe]+=Z[he][ue][ce][pe]}}}const de=d.write(Or(ie,s.dtype),s.shape,s.dtype);return{dataId:de,shape:s.shape,dtype:s.dtype}}};const k4=Zr((e,t)=>e/t),F4=hc(xa,k4),pS={kernelName:xa,backendName:"cpu",kernelFunc:F4};const _4=ew,W4=tw,$4=nw,U4=sw,B4=iw,M4=rw,P4=xt(bl,e=>{const t=Math.sign(e),n=Math.abs(e),s=1/(1+_4*n);return t*(1-((((M4*s+B4)*s+U4)*s+$4)*s+W4)*s*Math.exp(-n*n))}),z4={kernelName:bl,backendName:"cpu",kernelFunc:P4};function V0(e,t,n){const s=e.shape,i=s[0],o=s[1],a=n.data.get(e.dataId),c=a.complexTensorInfos.real,h=a.complexTensorInfos.imag,d=[i,o],m=P(d),f=bt("float32",m),b=bt("float32",m);for(let v=0;v{const{image:s}=e,i=n,o=bt(s.dtype,P(s.shape)),[a,c,h,d]=s.shape,m=i.data.get(s.dataId).values;for(let b=0;b=0&&FNumber.isFinite(e)?1:0,"bool"),rj={kernelName:Tl,backendName:"cpu",kernelFunc:ij};const oj=xt(Al,e=>Math.abs(e)===Infinity?1:0,"bool"),aj={kernelName:Al,backendName:"cpu",kernelFunc:oj};const cj=xt(vl,e=>Number.isNaN(e)?1:0,"bool"),lj={kernelName:vl,backendName:"cpu",kernelFunc:cj};const hj=xt(Cl,e=>Math.log1p(e)),uj={kernelName:Cl,backendName:"cpu",kernelFunc:hj};const dj=xt(yd,e=>e?0:1,"bool"),pj={kernelName:yd,backendName:"cpu",kernelFunc:dj};const mj={kernelName:Rl,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{reductionIndices:i,keepDims:o}=t,a=n;let c=s.shape;const h=c.length,d=qe(i,c);let m=d;const f=Xn(m,h);let b=a.data.get(s.dataId).values;if(f!=null){const E=new Array(h);for(let k=0;k`Error in maxPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Un(i.shape,o,a,d,c,h);let f;if(m.filterWidth===1&&m.filterHeight===1&&ae(m.inShape,m.outShape))f=Go({inputs:{x:i},backend:n});else{const b=n.data.get(i.dataId).values,w=je(i.shape),L=dS(b,i.shape,i.dtype,w,m,"max");f=n.makeTensorInfo(m.outShape,i.dtype,L.values)}return f}const gj={kernelName:Ol,backendName:"cpu",kernelFunc:fj};function yj(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o,output:a}=t,c=o;Te([o,a],"maxPoolBackprop");const{filterSize:h,strides:d,pad:m,dimRoundingMode:f}=s,b=Un(c.shape,h,d,1,m,f),w=n.data.get(c.dataId).values,L=wt(b.outShape,c.dtype,M0(w,c.shape,c.dtype,b).values),x=b.strideHeight,v=b.strideWidth,N=b.dilationHeight,O=b.dilationWidth,E=b.effectiveFilterHeight,k=b.effectiveFilterWidth,F=k-1-b.padInfo.left,U=E-1-b.padInfo.top,$=wt(c.shape,"float32"),Y=n.data.get(i.dataId).values,j=wt(i.shape,"float32",Y);for(let Z=0;Z=b.outHeight||Math.floor(pe)!==pe)continue;for(let we=0;we=b.outWidth||Math.floor(Se)!==Se)continue;const xe=E*k-1-L.get(Z,pe,Se,ie),Oe=ye*k+we,Ne=xe===Oe?1:0;if(Ne===0)continue;const De=j.get(Z,pe,Se,ie);ce+=De*Ne}}$.set(ce,Z,de,he,ie)}return n.makeTensorInfo($.shape,$.dtype,$.values)}const bj={kernelName:bd,backendName:"cpu",kernelFunc:yj};function wj(e,t,n,s,i){const o=je(t),a=dS(e,t,n,o,i,"max"),c=M0(e,t,n,i,!0,s);return[a.values,c.values]}const Lj={kernelName:wd,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:i,strides:o,pad:a,includeBatchInIndex:c}=t,h=n;Te(s,"MaxPoolWithArgmax");const d=h.data.get(s.dataId).values,m=Un(s.shape,i,o,[1,1],a),[f,b]=wj(d,s.shape,s.dtype,c,m),w=h.write(f,m.outShape,s.dtype),L=h.write(b,m.outShape,s.dtype);return[{dataId:w,shape:m.outShape,dtype:s.dtype},{dataId:L,shape:m.outShape,dtype:"int32"}]}};function Sj(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{paddings:o,mode:a}=s;Te(i,"mirrorPad");const c=o.map((E,k)=>E[0]+i.shape[k]+E[1]),h=o.map(E=>E[0]),d=o.map((E,k)=>E[0]+i.shape[k]),m=a==="reflect"?0:1,f=n.data.get(i.dataId).values,b=i.shape.length,w=je(i.shape),L=P(c),x=c.length,v=je(c),N=bt(i.dtype,L);for(let E=0;E=d[U]&&(k[U]=(d[U]-1)*2-k[U]+m);k=k.map((U,$)=>U-h[$]);const F=_s(k,b,w);N[E]=f[F]}const O=n.write(N,c,i.dtype);return{dataId:O,shape:c,dtype:i.dtype}}const Ij={kernelName:El,backendName:"cpu",kernelFunc:Sj};const xj=kp,Tj={kernelName:Ld,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,padToMaxOutputSize:h}=n,d=t;Te(s,"NonMaxSuppressionPadded");const m=d.data.get(s.dataId).values,f=d.data.get(i.dataId).values,{selectedIndices:b,validOutputs:w}=xj(m,f,o,a,c,h);return[b,w]}};const Aj=Fp,vj={kernelName:Sd,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}=n,d=t;Te(s,"NonMaxSuppressionWithScore");const m=d.data.get(s.dataId).values,f=d.data.get(i.dataId).values,b=o,w=a,L=c,x=h,{selectedIndices:v,selectedScores:N}=Aj(m,f,b,w,L,x);return[v,N]}};function Nj(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{paddings:o,constantValue:a}=s;Te(i,"pad");const c=o.map((O,E)=>O[0]+i.shape[E]+O[1]),h=o.map(O=>O[0]),d=n.data.get(i.dataId).values,m=P(i.shape),f=i.shape.length,b=je(i.shape),w=P(c),L=c.length,x=je(c),v=bt(i.dtype,w);a!==0&&v.fill(a);for(let O=0;OU+h[$]),F=_s(k,L,x);v[F]=d[O]}const N=n.write(v,c,i.dtype);return{dataId:N,shape:c,dtype:i.dtype}}const G0={kernelName:Id,backendName:"cpu",kernelFunc:Nj};const Cj=xt(kl,e=>1/e),Rj={kernelName:kl,backendName:"cpu",kernelFunc:Cj};const Oj={kernelName:Od,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,{radians:i,fillValue:o,center:a}=t,c=n,h=bt(s.dtype,P(s.shape)),[d,m,f,b]=s.shape,[w,L]=Jb(a,m,f),x=255,v=Math.sin(i),N=Math.cos(i),O=c.data.get(s.dataId).values;for(let k=0;k=0&&ue=0&&me{const t=Math.floor(e);return e-t<.5?Math.floor(e):e-t>.5?Math.ceil(e):t%2===0?t:t+1}),Dj={kernelName:$l,backendName:"cpu",kernelFunc:Ej};const kj=Wp,Fj=$p,_j=xt(Bl,e=>e>=0?Fj*e:kj*(Math.exp(e)-1)),Wj={kernelName:Bl,backendName:"cpu",kernelFunc:_j};const $j=xt(zl,e=>1/(1+Math.exp(-e))),Uj={kernelName:zl,backendName:"cpu",kernelFunc:$j};const Bj=xt(Pl,e=>e<0?-1:e>0?1:0),Mj={kernelName:Pl,backendName:"cpu",kernelFunc:Bj};const Pj=xt(Aa,e=>Math.sin(e)),zj={kernelName:Aa,backendName:"cpu",kernelFunc:Pj};const Vj=xt(Ml,e=>Math.sinh(e)),Gj={kernelName:Ml,backendName:"cpu",kernelFunc:Vj};const Yj=11920928955078125e-23,Y0=Math.log(Yj)+2,Hj=xt(Vl,e=>{const t=e>-Y0,n=eMath.sqrt(e)),Zj={kernelName:Gl,backendName:"cpu",kernelFunc:Jj};const Qj={kernelName:Nd,backendName:"cpu",kernelFunc:({inputs:e,backend:t})=>{const{x:n}=e,s=t;Te(n,"square");const i=s.data.get(n.dataId).values,o=new Float32Array(i.length);for(let c=0;c{const n=t;return isNaN(e)?NaN:e>0?1:n.alpha}),tK={kernelName:ql,backendName:"cpu",kernelFunc:eK};const nK=xt(Ca,e=>Math.tan(e)),sK={kernelName:Ca,backendName:"cpu",kernelFunc:nK};const iK=xt(Yl,e=>Math.tanh(e)),rK={kernelName:Yl,backendName:"cpu",kernelFunc:iK};function oK(e){const{inputs:t,attrs:n,backend:s}=e,{axis:i}=n,{x:o}=t;Te(o,"unique");const a=s.data.get(o.dataId).values,{outputValues:c,outputShape:h,indices:d}=F0(a,i,o.shape,o.dtype);return[s.makeTensorInfo(h,o.dtype,c),s.makeTensorInfo([d.length],"int32",d)]}const aK={kernelName:Cd,backendName:"cpu",kernelFunc:oK};const cK=[Pq,sq,Vq,Yq,lq,qq,Kq,Jq,Qq,t4,s4,Bq,r4,aq,uq,a4,iq,l4,d4,m4,h4,b4,L4,g4,I4,T4,A4,N4,R4,O4,D4,E4,pS,kq,z4,pq,fq,q4,K4,J4,yq,Q4,tj,rq,sj,c4,rj,aj,lj,wq,uj,pj,gj,bj,Lj,mj,Ij,Sq,Tj,vj,xq,G0,_q,oq,Rj,Wq,$q,Uq,Oj,Dj,Aq,Wj,Uj,Mj,zj,Gj,vq,qj,Xj,Zj,Qj,Cq,tK,Oq,sK,rK,jj,aK];for(const e of cK)_d(e);const Qr={},fS={alpha:!1,antialias:!1,premultipliedAlpha:!1,preserveDrawingBuffer:!1,depth:!1,stencil:!1,failIfMajorPerformanceCaveat:!0};function jte(e){delete Qr[e]}function lK(e,t){Qr[e]=t}function ki(e){if(!(e in Qr)){const n=uK(e);if(n!==null)Qr[e]=n;else return console.log("Could not get context for WebGL version",e),null}const t=Qr[e];return t.isContextLost()?(delete Qr[e],ki(e)):(t.disable(t.DEPTH_TEST),t.disable(t.STENCIL_TEST),t.disable(t.BLEND),t.disable(t.DITHER),t.disable(t.POLYGON_OFFSET_FILL),t.disable(t.SAMPLE_COVERAGE),t.enable(t.SCISSOR_TEST),t.enable(t.CULL_FACE),t.cullFace(t.BACK),Qr[e])}function hK(e){if(typeof OffscreenCanvas!="undefined"&&e===2)return new OffscreenCanvas(300,150);if(typeof document!="undefined")return document.createElement("canvas");throw new Error("Cannot create a canvas in this context")}function uK(e){if(e!==1&&e!==2)throw new Error("Cannot get WebGL rendering context, WebGL is disabled.");const t=hK(e);return t.addEventListener("webglcontextlost",n=>{n.preventDefault(),delete Qr[e]},!1),e===1?t.getContext("webgl",fS)||t.getContext("experimental-webgl",fS):t.getContext("webgl2",fS)}var lu;(function(e){e[e.DENSE=0]="DENSE",e[e.SHARED_BATCH=1]="SHARED_BATCH"})(lu||(lu={}));var Ns;(function(e){e[e.RENDER=0]="RENDER",e[e.UPLOAD=1]="UPLOAD",e[e.PIXELS=2]="PIXELS",e[e.DOWNLOAD=3]="DOWNLOAD"})(Ns||(Ns={}));var Cn;(function(e){e[e.UNPACKED_FLOAT16=0]="UNPACKED_FLOAT16",e[e.UNPACKED_FLOAT32=1]="UNPACKED_FLOAT32",e[e.PACKED_4X1_UNSIGNED_BYTE=2]="PACKED_4X1_UNSIGNED_BYTE",e[e.PACKED_2X2_FLOAT32=3]="PACKED_2X2_FLOAT32",e[e.PACKED_2X2_FLOAT16=4]="PACKED_2X2_FLOAT16"})(Cn||(Cn={}));function hu(e,t){return[t,e]}function dK(e,t){return e*t}function Kte(e,t){return[t*4,e]}function uu(e){const t=P(e),n=Math.ceil(t/4);return Ve(n)}function Xte(e,t){if(e%t!==0)throw new Error(`unpackedSize (${e}) must be a multiple of ${t}`);return e/t}function Jte(e,t,n){const s=e.length*n/4;if(t.length= ${s}`);let i=0;for(let o=0;oe.getExtension(t),'Extension "'+t+'" not supported on this browser.')}function wK(e,t){const n=cr(e,()=>e.createShader(e.VERTEX_SHADER),"Unable to create vertex WebGLShader.");if(Ee(e,()=>e.shaderSource(n,t)),Ee(e,()=>e.compileShader(n)),e.getShaderParameter(n,e.COMPILE_STATUS)===!1)throw console.log(e.getShaderInfoLog(n)),new Error("Failed to compile vertex shader.");return n}function LK(e,t){const n=cr(e,()=>e.createShader(e.FRAGMENT_SHADER),"Unable to create fragment WebGLShader.");if(Ee(e,()=>e.shaderSource(n,t)),Ee(e,()=>e.compileShader(n)),e.getShaderParameter(n,e.COMPILE_STATUS)===!1)throw IK(t,e.getShaderInfoLog(n)),new Error("Failed to compile fragment shader.");return n}const SK=/ERROR: [0-9]+:([0-9]+):/g;function IK(e,t){const n=SK.exec(t);if(n==null){console.log(`Couldn't parse line number in error: ${t}`),console.log(e);return}const s=+n[1],i=e.split(` +`),o=i.length.toString().length+2,a=i.map((f,b)=>pt((b+1).toString(),o)+f);let c=0;for(let f=0;fe.createProgram(),"Unable to create WebGLProgram.")}function oj(e,t){if(Re(e,()=>e.linkProgram(t)),e.getProgramParameter(t,e.LINK_STATUS)===!1)throw console.log(e.getProgramInfoLog(t)),new Error("Failed to link vertex and fragment shaders.")}function jL(e,t){if(Re(e,()=>e.validateProgram(t)),e.getProgramParameter(t,e.VALIDATE_STATUS)===!1)throw console.log(e.getProgramInfoLog(t)),new Error("Shader program validation failed.")}function aj(e,t){const n=rr(e,()=>e.createBuffer(),"Unable to create WebGLBuffer");return Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,n)),Re(e,()=>e.bufferData(e.ARRAY_BUFFER,t,e.STATIC_DRAW)),n}function cj(e,t){const n=rr(e,()=>e.createBuffer(),"Unable to create WebGLBuffer");return Re(e,()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,n)),Re(e,()=>e.bufferData(e.ELEMENT_ARRAY_BUFFER,t,e.STATIC_DRAW)),n}function uee(){return C().getNumber("WEBGL_VERSION")===2?1:4}function lj(e){return rr(e,()=>e.createTexture(),"Unable to create WebGLTexture.")}function hj(e,t){const n=C().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(e<=0||t<=0){const s=`[${e}x${t}]`;throw new Error("Requested texture size "+s+" is invalid.")}if(e>n||t>n){const s=`[${e}x${t}]`,i=`[${n}x${n}]`;throw new Error("Requested texture size "+s+" greater than WebGL maximum on this browser / GPU "+i+".")}}function uj(e){return rr(e,()=>e.createFramebuffer(),"Unable to create WebGLFramebuffer.")}function S0(e,t,n,s,i,o,a){const c=e.getAttribLocation(t,n);return c===-1?!1:(Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,s)),Re(e,()=>e.vertexAttribPointer(c,i,e.FLOAT,!1,o,a)),Re(e,()=>e.enableVertexAttribArray(c)),!0)}function dj(e,t,n){x0(e,n),Re(e,()=>e.activeTexture(e.TEXTURE0+n)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,t))}function dee(e,t){x0(e,t),Re(e,()=>e.activeTexture(e.TEXTURE0+t)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function pj(e,t,n){return rr(e,()=>e.getUniformLocation(t,n),'uniform "'+n+'" not present in program.')}function mj(e,t,n){return e.getUniformLocation(t,n)}function fj(e,t,n,s){Re(e,()=>dj(e,t,s)),Re(e,()=>e.uniform1i(n,s))}function pee(e){Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,null)),Re(e,()=>e.viewport(0,0,e.canvas.width,e.canvas.height)),Re(e,()=>e.scissor(0,0,e.canvas.width,e.canvas.height))}function KL(e,t,n){Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,n)),Re(e,()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,t,0))}function I0(e,t){Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,t)),Re(e,()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,null,0))}function Xp(e){const t=e.checkFramebufferStatus(e.FRAMEBUFFER);if(t!==e.FRAMEBUFFER_COMPLETE)throw new Error("Error binding framebuffer: "+gj(e,t))}function gj(e,t){switch(t){case e.FRAMEBUFFER_INCOMPLETE_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_DIMENSIONS:return"FRAMEBUFFER_INCOMPLETE_DIMENSIONS";case e.FRAMEBUFFER_UNSUPPORTED:return"FRAMEBUFFER_UNSUPPORTED";default:return`unknown error ${t}`}}function rr(e,t,n){const s=Re(e,()=>t());if(s==null)throw new Error(n);return s}function x0(e,t){const n=e.MAX_COMBINED_TEXTURE_IMAGE_UNITS-1,s=t+e.TEXTURE0;if(sn){const i=`[gl.TEXTURE0, gl.TEXTURE${n}]`;throw new Error(`textureUnit must be in ${i}.`)}}function ic(e,t=2){return we(e.slice(0,e.length-t))}function rc(e){if(e.length===0)throw Error("Cannot get rows and columns of an empty shape array.");return[e.length>1?e[e.length-2]:1,e[e.length-1]]}function XL(e){let t=[1,1,1];const n=e.length===0||e.length===1&&e[0]===1;return n||(t=[ic(e),...rc(e)]),t}function yj(e,t=!1){let n=C().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(t&&(n=n*2,e=e.map((i,o)=>o>=e.length-2?ny(e[o]):e[o]),e.length===1&&(e=[2,e[0]])),e.length!==2){const i=Sr(e);e=i.newShape}let s=we(e);if(e.length<=1&&s<=n)return[1,s];if(e.length===2&&e[0]<=n&&e[1]<=n)return e;if(e.length===3&&e[0]*e[1]<=n&&e[2]<=n)return[e[0]*e[1],e[2]];if(e.length===3&&e[0]<=n&&e[1]*e[2]<=n)return[e[0],e[1]*e[2]];if(e.length===4&&e[0]*e[1]*e[2]<=n&&e[3]<=n)return[e[0]*e[1]*e[2],e[3]];if(e.length===4&&e[0]<=n&&e[1]*e[2]*e[3]<=n)return[e[0],e[1]*e[2]*e[3]];if(t){const i=ic(e);let o=2,a=2;return e.length&&([o,a]=rc(e)),s=i*(o/2)*(a/2),sd(s).map(c=>c*2)}return sd(s)}function Jp(e){return e%2===0}function Zp(e,t){if(e=e.slice(-2),t=t.slice(-2),ot(e,t))return!0;if(!e.length||!t.length)return!0;if(e[0]===0||e[1]===0||t[0]===0||t[1]===0)return!0;if(e.length!==t.length){const n=e.slice(-1)[0],s=t.slice(-1)[0];if(n===s)return!0;if(Jp(n)&&Jp(s)&&(e[0]===1||t[0]===1))return!0}return e[1]===t[1]&&Jp(e[0])&&Jp(t[0])}let Qp,em;function bj(e){if(Qp==null){const t=_i(e);Qp=t.getParameter(t.MAX_TEXTURE_SIZE)}return Qp}function mee(){Qp=null}function fee(){em=null}function wj(e){if(em==null){const t=_i(e);em=t.getParameter(t.MAX_TEXTURE_IMAGE_UNITS)}return Math.min(16,em)}function Lj(e){if(e===0)return 0;let t;const n=_i(e);return Ps(n,"EXT_disjoint_timer_query_webgl2")&&e===2?t=2:Ps(n,"EXT_disjoint_timer_query")?t=1:t=0,t}function Ps(e,t){const n=e.getExtension(t);return n!=null}function T0(e){try{const t=_i(e);if(t!=null)return!0}catch(t){return console.log("Error when getting WebGL context: ",t),!1}return!1}function Sj(e){if(e===0)return!1;const t=_i(e);if(e===1){if(!Ps(t,"OES_texture_float"))return!1}else if(!Ps(t,"EXT_color_buffer_float"))return!1;const n=JL(t);return n}function Ij(e){if(e===0)return!1;const t=_i(e);if(e===1){if(!Ps(t,"OES_texture_float"))return!1;if(!Ps(t,"WEBGL_color_buffer_float"))return!1}else{if(Ps(t,"EXT_color_buffer_float"))return JL(t);const s="EXT_color_buffer_half_float";if(Ps(t,s)){const i=t.getExtension(s);return xj(t,i)}return!1}const n=JL(t);return n}function JL(e){const t=qL(e),n=e.createTexture();e.bindTexture(e.TEXTURE_2D,n);const s=1,i=1;e.texImage2D(e.TEXTURE_2D,0,t.internalFormatFloat,s,i,0,t.textureFormatFloat,t.textureTypeFloat,null);const o=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,o),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,n,0);const a=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(n),e.deleteFramebuffer(o),a}function xj(e,t){const n=qL(e,t),s=e.createTexture();e.bindTexture(e.TEXTURE_2D,s);const i=1,o=1;e.texImage2D(e.TEXTURE_2D,0,n.internalFormatHalfFloat,i,o,0,n.textureFormatFloat,n.textureTypeHalfFloat,null);const a=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,a),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,s,0);const c=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(s),e.deleteFramebuffer(a),c}function Tj(e){if(e!==2)return!1;const t=_i(e),n=t.fenceSync!=null;return n}function Xh(e,t){Array.isArray(e)||(e=[e]),e.forEach(n=>{n!=null&&k(n.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the WebGL backend.`)})}const Ge=C();Ge.registerFlag("HAS_WEBGL",()=>Ge.getNumber("WEBGL_VERSION")>0),Ge.registerFlag("WEBGL_VERSION",()=>T0(2)?2:T0(1)?1:0),Ge.registerFlag("WEBGL_CHECK_NUMERICAL_PROBLEMS",()=>!1),Ge.registerFlag("WEBGL_BUFFER_SUPPORTED",()=>Ge.get("WEBGL_VERSION")===2),Ge.registerFlag("WEBGL_CPU_FORWARD",()=>!0),Ge.registerFlag("WEBGL_FORCE_F16_TEXTURES",()=>!1),Ge.registerFlag("WEBGL_PACK",()=>Ge.getBool("HAS_WEBGL")),Ge.registerFlag("WEBGL_PACK_NORMALIZATION",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_CLIP",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_DEPTHWISECONV",()=>!1),Ge.registerFlag("WEBGL_PACK_BINARY_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_UNARY_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_ARRAY_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_IMAGE_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_REDUCE",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_LAZILY_UNPACK",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_CONV_IM2COL",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_MAX_TEXTURE_SIZE",()=>bj(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_MAX_TEXTURES_IN_SHADER",()=>wj(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION",()=>{const e=Ge.getNumber("WEBGL_VERSION");return e===0?0:Lj(e)}),Ge.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE",()=>Ge.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0&&!Yx()),Ge.registerFlag("WEBGL_RENDER_FLOAT32_CAPABLE",()=>Sj(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_RENDER_FLOAT32_ENABLED",()=>Ge.getBool("WEBGL_FORCE_F16_TEXTURES")?!1:Ge.getBool("WEBGL_RENDER_FLOAT32_CAPABLE")),Ge.registerFlag("WEBGL_DOWNLOAD_FLOAT_ENABLED",()=>Ij(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_FENCE_API_ENABLED",()=>Tj(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_SIZE_UPLOAD_UNIFORM",()=>{const e=Ge.getBool("WEBGL_RENDER_FLOAT32_ENABLED");return e?4:0}),Ge.registerFlag("WEBGL_DELETE_TEXTURE_THRESHOLD",()=>-1,e=>{if(e<0&&e!==-1)throw new Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be -1 (indicating never delete) or at least 0, but got ${e}.`)});const{simpleAbsImpl:Aj,addImpl:vj,ceilImpl:Nj,expImpl:Cj,expm1Impl:Rj,floorImpl:Oj,logImpl:Ej,maxImpl:Dj,multiplyImpl:kj,rsqrtImpl:Fj,sliceImpl:_j,subImpl:Wj,transposeImpl:A0,uniqueImpl:$j}=UH;class Uj{constructor(e,t){this.outputShape=[],this.outputShape=e,this.variableNames=t.map((i,o)=>`T${o}`);const n=[];this.variableNames.forEach(i=>{n.push(`float v${i} = get${i}AtOutCoords();`)});const s=this.variableNames.map(i=>`v${i}`).join(" + ");this.userCode=` +`)[0]),console.log(`%c ${pt(d[0],c)}`,"border:1px solid red; background-color:#e3d2d2; color:#a61717"),console.log(m.join(` +`))}function xK(e){return cr(e,()=>e.createProgram(),"Unable to create WebGLProgram.")}function TK(e,t){if(Ee(e,()=>e.linkProgram(t)),e.getProgramParameter(t,e.LINK_STATUS)===!1)throw console.log(e.getProgramInfoLog(t)),new Error("Failed to link vertex and fragment shaders.")}function yS(e,t){if(Ee(e,()=>e.validateProgram(t)),e.getProgramParameter(t,e.VALIDATE_STATUS)===!1)throw console.log(e.getProgramInfoLog(t)),new Error("Shader program validation failed.")}function AK(e,t){const n=cr(e,()=>e.createBuffer(),"Unable to create WebGLBuffer");return Ee(e,()=>e.bindBuffer(e.ARRAY_BUFFER,n)),Ee(e,()=>e.bufferData(e.ARRAY_BUFFER,t,e.STATIC_DRAW)),n}function vK(e,t){const n=cr(e,()=>e.createBuffer(),"Unable to create WebGLBuffer");return Ee(e,()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,n)),Ee(e,()=>e.bufferData(e.ELEMENT_ARRAY_BUFFER,t,e.STATIC_DRAW)),n}function Zte(){return oe().getNumber("WEBGL_VERSION")===2?1:4}function NK(e){return cr(e,()=>e.createTexture(),"Unable to create WebGLTexture.")}function CK(e,t){const n=oe().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(e<=0||t<=0){const s=`[${e}x${t}]`;throw new Error("Requested texture size "+s+" is invalid.")}if(e>n||t>n){const s=`[${e}x${t}]`,i=`[${n}x${n}]`;throw new Error("Requested texture size "+s+" greater than WebGL maximum on this browser / GPU "+i+".")}}function RK(e){return cr(e,()=>e.createFramebuffer(),"Unable to create WebGLFramebuffer.")}function q0(e,t,n,s,i,o,a){const c=e.getAttribLocation(t,n);return c===-1?!1:(Ee(e,()=>e.bindBuffer(e.ARRAY_BUFFER,s)),Ee(e,()=>e.vertexAttribPointer(c,i,e.FLOAT,!1,o,a)),Ee(e,()=>e.enableVertexAttribArray(c)),!0)}function OK(e,t,n){K0(e,n),Ee(e,()=>e.activeTexture(e.TEXTURE0+n)),Ee(e,()=>e.bindTexture(e.TEXTURE_2D,t))}function Qte(e,t){K0(e,t),Ee(e,()=>e.activeTexture(e.TEXTURE0+t)),Ee(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function EK(e,t,n){return cr(e,()=>e.getUniformLocation(t,n),'uniform "'+n+'" not present in program.')}function DK(e,t,n){return e.getUniformLocation(t,n)}function kK(e,t,n,s){Ee(e,()=>OK(e,t,s)),Ee(e,()=>e.uniform1i(n,s))}function ene(e){Ee(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,null)),Ee(e,()=>e.viewport(0,0,e.canvas.width,e.canvas.height)),Ee(e,()=>e.scissor(0,0,e.canvas.width,e.canvas.height))}function bS(e,t,n){Ee(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,n)),Ee(e,()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,t,0))}function j0(e,t){Ee(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,t)),Ee(e,()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,null,0))}function Nm(e){const t=e.checkFramebufferStatus(e.FRAMEBUFFER);if(t!==e.FRAMEBUFFER_COMPLETE)throw new Error("Error binding framebuffer: "+FK(e,t))}function FK(e,t){switch(t){case e.FRAMEBUFFER_INCOMPLETE_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_DIMENSIONS:return"FRAMEBUFFER_INCOMPLETE_DIMENSIONS";case e.FRAMEBUFFER_UNSUPPORTED:return"FRAMEBUFFER_UNSUPPORTED";default:return`unknown error ${t}`}}function cr(e,t,n){const s=Ee(e,()=>t());if(s==null)throw new Error(n);return s}function K0(e,t){const n=e.MAX_COMBINED_TEXTURE_IMAGE_UNITS-1,s=t+e.TEXTURE0;if(sn){const i=`[gl.TEXTURE0, gl.TEXTURE${n}]`;throw new Error(`textureUnit must be in ${i}.`)}}function mc(e,t=2){return P(e.slice(0,e.length-t))}function fc(e){if(e.length===0)throw Error("Cannot get rows and columns of an empty shape array.");return[e.length>1?e[e.length-2]:1,e[e.length-1]]}function wS(e){let t=[1,1,1];const n=e.length===0||e.length===1&&e[0]===1;return n||(t=[mc(e),...fc(e)]),t}function _K(e,t=!1){let n=oe().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(t&&(n=n*2,e=e.map((i,o)=>o>=e.length-2?T(e[o]):e[o]),e.length===1&&(e=[2,e[0]])),e.length!==2){const i=ln(e);e=i.newShape}let s=P(e);if(e.length<=1&&s<=n)return[1,s];if(e.length===2&&e[0]<=n&&e[1]<=n)return e;if(e.length===3&&e[0]*e[1]<=n&&e[2]<=n)return[e[0]*e[1],e[2]];if(e.length===3&&e[0]<=n&&e[1]*e[2]<=n)return[e[0],e[1]*e[2]];if(e.length===4&&e[0]*e[1]*e[2]<=n&&e[3]<=n)return[e[0]*e[1]*e[2],e[3]];if(e.length===4&&e[0]<=n&&e[1]*e[2]*e[3]<=n)return[e[0],e[1]*e[2]*e[3]];if(t){const i=mc(e);let o=2,a=2;return e.length&&([o,a]=fc(e)),s=i*(o/2)*(a/2),Ve(s).map(c=>c*2)}return Ve(s)}function Cm(e){return e%2===0}function Rm(e,t){if(e=e.slice(-2),t=t.slice(-2),ae(e,t))return!0;if(!e.length||!t.length)return!0;if(e[0]===0||e[1]===0||t[0]===0||t[1]===0)return!0;if(e.length!==t.length){const n=e.slice(-1)[0],s=t.slice(-1)[0];if(n===s)return!0;if(Cm(n)&&Cm(s)&&(e[0]===1||t[0]===1))return!0}return e[1]===t[1]&&Cm(e[0])&&Cm(t[0])}let Om,Em;function WK(e){if(Om==null){const t=ki(e);Om=t.getParameter(t.MAX_TEXTURE_SIZE)}return Om}function tne(){Om=null}function nne(){Em=null}function $K(e){if(Em==null){const t=ki(e);Em=t.getParameter(t.MAX_TEXTURE_IMAGE_UNITS)}return Math.min(16,Em)}function UK(e){if(e===0)return 0;let t;const n=ki(e);return Vs(n,"EXT_disjoint_timer_query_webgl2")&&e===2?t=2:Vs(n,"EXT_disjoint_timer_query")?t=1:t=0,t}function Vs(e,t){const n=e.getExtension(t);return n!=null}function X0(e){try{const t=ki(e);if(t!=null)return!0}catch(t){return console.log("Error when getting WebGL context: ",t),!1}return!1}function BK(e){if(e===0)return!1;const t=ki(e);if(e===1){if(!Vs(t,"OES_texture_float"))return!1}else if(!Vs(t,"EXT_color_buffer_float"))return!1;const n=LS(t);return n}function MK(e){if(e===0)return!1;const t=ki(e);if(e===1){if(!Vs(t,"OES_texture_float"))return!1;if(!Vs(t,"WEBGL_color_buffer_float"))return!1}else{if(Vs(t,"EXT_color_buffer_float"))return LS(t);const s="EXT_color_buffer_half_float";if(Vs(t,s)){const i=t.getExtension(s);return PK(t,i)}return!1}const n=LS(t);return n}function LS(e){const t=gS(e),n=e.createTexture();e.bindTexture(e.TEXTURE_2D,n);const s=1,i=1;e.texImage2D(e.TEXTURE_2D,0,t.internalFormatFloat,s,i,0,t.textureFormatFloat,t.textureTypeFloat,null);const o=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,o),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,n,0);const a=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(n),e.deleteFramebuffer(o),a}function PK(e,t){const n=gS(e,t),s=e.createTexture();e.bindTexture(e.TEXTURE_2D,s);const i=1,o=1;e.texImage2D(e.TEXTURE_2D,0,n.internalFormatHalfFloat,i,o,0,n.textureFormatFloat,n.textureTypeHalfFloat,null);const a=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,a),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,s,0);const c=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(s),e.deleteFramebuffer(a),c}function zK(e){if(e!==2)return!1;const t=ki(e),n=t.fenceSync!=null;return n}function du(e,t){Array.isArray(e)||(e=[e]),e.forEach(n=>{n!=null&&A(n.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the WebGL backend.`)})}const Ge=oe();Ge.registerFlag("HAS_WEBGL",()=>Ge.getNumber("WEBGL_VERSION")>0),Ge.registerFlag("WEBGL_VERSION",()=>X0(2)?2:X0(1)?1:0),Ge.registerFlag("WEBGL_CHECK_NUMERICAL_PROBLEMS",()=>!1),Ge.registerFlag("WEBGL_BUFFER_SUPPORTED",()=>Ge.get("WEBGL_VERSION")===2),Ge.registerFlag("WEBGL_CPU_FORWARD",()=>!0),Ge.registerFlag("WEBGL_FORCE_F16_TEXTURES",()=>!1),Ge.registerFlag("WEBGL_PACK",()=>Ge.getBool("HAS_WEBGL")),Ge.registerFlag("WEBGL_PACK_NORMALIZATION",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_CLIP",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_DEPTHWISECONV",()=>!1),Ge.registerFlag("WEBGL_PACK_BINARY_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_UNARY_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_ARRAY_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_IMAGE_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_REDUCE",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_LAZILY_UNPACK",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_CONV_IM2COL",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_MAX_TEXTURE_SIZE",()=>WK(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_MAX_TEXTURES_IN_SHADER",()=>$K(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION",()=>{const e=Ge.getNumber("WEBGL_VERSION");return e===0?0:UK(e)}),Ge.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE",()=>Ge.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0&&!hT()),Ge.registerFlag("WEBGL_RENDER_FLOAT32_CAPABLE",()=>BK(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_RENDER_FLOAT32_ENABLED",()=>Ge.getBool("WEBGL_FORCE_F16_TEXTURES")?!1:Ge.getBool("WEBGL_RENDER_FLOAT32_CAPABLE")),Ge.registerFlag("WEBGL_DOWNLOAD_FLOAT_ENABLED",()=>MK(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_FENCE_API_ENABLED",()=>zK(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_SIZE_UPLOAD_UNIFORM",()=>{const e=Ge.getBool("WEBGL_RENDER_FLOAT32_ENABLED");return e?4:0}),Ge.registerFlag("WEBGL_DELETE_TEXTURE_THRESHOLD",()=>-1,e=>{if(e<0&&e!==-1)throw new Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be -1 (indicating never delete) or at least 0, but got ${e}.`)});const{simpleAbsImpl:VK,addImpl:GK,ceilImpl:YK,expImpl:HK,expm1Impl:qK,floorImpl:jK,logImpl:KK,maxImpl:XK,multiplyImpl:JK,rsqrtImpl:ZK,sliceImpl:QK,subImpl:e5,transposeImpl:SS,uniqueImpl:t5}=Eq;class n5{constructor(e,t){this.outputShape=[],this.outputShape=e,this.variableNames=t.map((i,o)=>`T${o}`);const n=[];this.variableNames.forEach(i=>{n.push(`float v${i} = get${i}AtOutCoords();`)});const s=this.variableNames.map(i=>`v${i}`).join(" + ");this.userCode=` void main() { ${n.join(` `)} @@ -41,7 +41,7 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi float result = ${s}; setOutput(result); } - `}}class Bj{constructor(e,t){this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.variableNames=t.map((i,o)=>`T${o}`);const n=[];this.variableNames.forEach(i=>{n.push(`vec4 v${i} = get${i}AtOutCoords();`)});const s=this.variableNames.map(i=>`v${i}`).join(" + ");this.userCode=` + `}}class s5{constructor(e,t){this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.variableNames=t.map((i,o)=>`T${o}`);const n=[];this.variableNames.forEach(i=>{n.push(`vec4 v${i} = get${i}AtOutCoords();`)});const s=this.variableNames.map(i=>`v${i}`).join(" + ");this.userCode=` void main() { ${n.join(` `)} @@ -49,7 +49,7 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi vec4 result = ${s}; setOutput(result); } - `}}class Mj{constructor(e,t,n){this.variableNames=["A"];const{windowSize:s,batchSize:i,outSize:o}=e;n||this.variableNames.push("bestIndicesA"),this.outputShape=[i,o];const a=t==="max"?">":"<",c=n?"inOffset + i;":"round(getBestIndicesA(batch, inOffset + i));";this.userCode=` + `}}class i5{constructor(e,t,n){this.variableNames=["A"];const{windowSize:s,batchSize:i,outSize:o}=e;n||this.variableNames.push("bestIndicesA"),this.outputShape=[i,o];const a=t==="max"?">":"<",c=n?"inOffset + i;":"round(getBestIndicesA(batch, inOffset + i));";this.userCode=` void main() { ivec2 coords = getOutputCoords(); int batch = coords[0]; @@ -69,7 +69,7 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi } setOutput(float(bestIndex)); } - `}}function v0(e,t){return["x","y","z","w","u","v"].slice(0,t).map(n=>`${e}.${n}`)}function cs(e,t){return t===1?[e]:v0(e,t)}function Pj(e,t){if(e===1)return"rc";let n="";for(let s=0;s`${e}.${n}`)}function Mn(e,t){return t===1?[e]:J0(e,t)}function r5(e,t){if(e===1)return"rc";let n="";for(let s=0;s 0.0 || val < 0.0) ? false : val != 0.0; } @@ -114,11 +114,11 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi ivec4 round(vec4 value) { return ivec4(floor(value + vec4(0.5))); } - `),{version:e,attribute:t,varyingVs:n,varyingFs:s,texture2D:i,output:o,defineOutput:a,defineSpecialNaN:c,defineSpecialInf:h,defineRound:d}}function Bo(e,t,n="index"){const s=Ot(t);return s.map((i,o)=>{const a=`int ${e[o]} = ${n} / ${i}`,c=o===s.length-1?`int ${e[o+1]} = ${n} - ${e[o]} * ${i}`:`index -= ${e[o]} * ${i}`;return`${a}; ${c};`}).join("")}function tm(e){return e.length===1?`${e[0]}`:`vec${e.length}(${e.join(",")})`}function gee(e,t){if(e.length!==t.length)throw new Error(`Vectors to be dotted must be of the same length -got ${e.length} and ${t.length}`);const n=[],s=Math.floor(e.length/4),i=e.length%4;for(let o=0;o`float(${c})`),a=a.map(c=>`float(${c})`)),n.push(`${tm(o)}, ${tm(a)}`)}return n.map((o,a)=>`dot(${o})`).join("+")}function ZL(e){const t=Ot(e).map(n=>n.toString());return` + `),{version:e,attribute:t,varyingVs:n,varyingFs:s,texture2D:i,output:o,defineOutput:a,defineSpecialNaN:c,defineSpecialInf:h,defineRound:d}}function Yo(e,t,n="index"){const s=je(t);return s.map((i,o)=>{const a=`int ${e[o]} = ${n} / ${i}`,c=o===s.length-1?`int ${e[o+1]} = ${n} - ${e[o]} * ${i}`:`index -= ${e[o]} * ${i}`;return`${a}; ${c};`}).join("")}function Dm(e){return e.length===1?`${e[0]}`:`vec${e.length}(${e.join(",")})`}function sne(e,t){if(e.length!==t.length)throw new Error(`Vectors to be dotted must be of the same length -got ${e.length} and ${t.length}`);const n=[],s=Math.floor(e.length/4),i=e.length%4;for(let o=0;o`float(${c})`),a=a.map(c=>`float(${c})`)),n.push(`${Dm(o)}, ${Dm(a)}`)}return n.map((o,a)=>`dot(${o})`).join("+")}function IS(e){const t=je(e).map(n=>n.toString());return` int getFlatIndex(ivec3 coords) { return coords.x * ${t[0]} + coords.y * ${t[1]} + coords.z; } -`}const N0=` +`}const Z0=` const float FLOAT_MAX = 1.70141184e38; const float FLOAT_MIN = 1.17549435e-38; @@ -157,22 +157,22 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi return c / 255.0; } -`;const{getBroadcastDims:C0}=Bb;function zj(e,t,n,s){const i=[];e.forEach(L=>{const T=we(L.shapeInfo.logicalShape);L.shapeInfo.isUniform?i.push(`uniform float ${L.name}${T>1?`[${T}]`:""};`):(i.push(`uniform sampler2D ${L.name};`),i.push(`uniform int offset${L.name};`))});const o=i.join(` -`),a=e.map(L=>Gj(L,t,s)).join(` -`),c=t.texShape,h=Wn(),d=Hj(h);let m,y,b=Kj(h);t.isPacked?(m=Vj(t.logicalShape,c),y=jj(h)):(m=Yj(t.logicalShape,c),y=qj(h)),s&&(b+=Qj);const w=[b,d,y,o,m,a,n].join(` -`);return w}function oc(e){const t=e.shapeInfo.logicalShape;switch(t.length){case 0:return uK(e);case 1:return pK(e);case 2:return fK(e);case 3:return yK(e);case 4:return wK(e);case 5:return LK(e);case 6:return SK(e);default:throw new Error(`${t.length}-D input sampling is not yet supported`)}}function R0(e){const t=e.shapeInfo.logicalShape;switch(t.length){case 0:return hK(e);case 1:return dK(e);case 2:return mK(e);case 3:return gK(e);default:return bK(e)}}function Gj(e,t,n=!1){let s="";n?s+=R0(e):s+=oc(e);const i=e.shapeInfo.logicalShape,o=t.logicalShape;return i.length<=o.length&&(n?s+=IK(e,t):s+=xK(e,t)),s}function Vj(e,t){switch(e.length){case 0:return O0();case 1:return eK(e,t);case 2:return cK(e,t);case 3:return nK(e,t);default:return iK(e,t)}}function Yj(e,t){switch(e.length){case 0:return O0();case 1:return tK(e,t);case 2:return lK(e,t);case 3:return sK(e,t);case 4:return rK(e,t);case 5:return oK(e,t);case 6:return aK(e,t);default:throw new Error(`${e.length}-D output sampling is not yet supported`)}}function Hj(e){return` +`;const{getBroadcastDims:Q0}=cw;function o5(e,t,n,s){const i=[];e.forEach(L=>{const x=P(L.shapeInfo.logicalShape);L.shapeInfo.isUniform?i.push(`uniform float ${L.name}${x>1?`[${x}]`:""};`):(i.push(`uniform sampler2D ${L.name};`),i.push(`uniform int offset${L.name};`))});const o=i.join(` +`),a=e.map(L=>a5(L,t,s)).join(` +`),c=t.texShape,h=Pn(),d=h5(h);let m,f,b=p5(h);t.isPacked?(m=c5(t.logicalShape,c),f=d5(h)):(m=l5(t.logicalShape,c),f=u5(h)),s&&(b+=y5);const w=[b,d,f,o,m,a,n].join(` +`);return w}function gc(e){const t=e.shapeInfo.logicalShape;switch(t.length){case 0:return R5(e);case 1:return E5(e);case 2:return k5(e);case 3:return _5(e);case 4:return $5(e);case 5:return U5(e);case 6:return B5(e);default:throw new Error(`${t.length}-D input sampling is not yet supported`)}}function eC(e){const t=e.shapeInfo.logicalShape;switch(t.length){case 0:return C5(e);case 1:return O5(e);case 2:return D5(e);case 3:return F5(e);default:return W5(e)}}function a5(e,t,n=!1){let s="";n?s+=eC(e):s+=gc(e);const i=e.shapeInfo.logicalShape,o=t.logicalShape;return i.length<=o.length&&(n?s+=M5(e,t):s+=P5(e,t)),s}function c5(e,t){switch(e.length){case 0:return tC();case 1:return b5(e,t);case 2:return v5(e,t);case 3:return L5(e,t);default:return I5(e,t)}}function l5(e,t){switch(e.length){case 0:return tC();case 1:return w5(e,t);case 2:return N5(e,t);case 3:return S5(e,t);case 4:return x5(e,t);case 5:return T5(e,t);case 6:return A5(e,t);default:throw new Error(`${e.length}-D output sampling is not yet supported`)}}function h5(e){return` float sampleTexture(sampler2D textureSampler, vec2 uv) { return ${e.texture2D}(textureSampler, uv).r; } - `}function qj(e){return` + `}function u5(e){return` void setOutput(float val) { ${e.output} = vec4(val, 0, 0, 0); } - `}function jj(e){return` + `}function d5(e){return` void setOutput(vec4 val) { ${e.output} = val; } - `}function Kj(e){const t=`${e.version} + `}function p5(e){const t=`${e.version} precision highp float; precision highp int; precision highp sampler2D; @@ -227,10 +227,10 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi return fract((p3.x + p3.y) * p3.z); } - ${Xj} - ${Jj} - ${Zj} - `;return t}const Xj=` + ${m5} + ${f5} + ${g5} + `;return t}const m5=` vec2 uvFromFlat(int texNumR, int texNumC, int index) { int texR = index / texNumC; int texC = index - texR * texNumC; @@ -242,7 +242,7 @@ vec2 packedUVfrom1D(int texNumR, int texNumC, int index) { int texC = texelIndex - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } -`,Jj=` +`,f5=` vec2 packedUVfrom2D(int texelsInLogicalRow, int texNumR, int texNumC, int row, int col) { int texelIndex = (row / 2) * texelsInLogicalRow + (col / 2); @@ -250,7 +250,7 @@ vec2 packedUVfrom2D(int texelsInLogicalRow, int texNumR, int texC = texelIndex - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } -`,Zj=` +`,g5=` vec2 packedUVfrom3D(int texNumR, int texNumC, int texelsInBatch, int texelsInLogicalRow, int b, int row, int col) { @@ -259,7 +259,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, int texC = index - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } -`,Qj=` +`,y5=` float getChannel(vec4 frag, vec2 innerDims) { vec2 modCoord = mod(innerDims, 2.); return modCoord.x == 0. ? @@ -270,11 +270,11 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, float modCoord = mod(float(dim), 2.); return modCoord == 0. ? frag.r : frag.g; } -`;function O0(){return` +`;function tC(){return` int getOutputCoords() { return 0; } - `}function eK(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)];return n[0]===1?` + `}function b5(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)];return n[0]===1?` int getOutputCoords() { return 2 * int(resultUV.x * ${n[1]}.0); } @@ -288,7 +288,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, vec2(${n[0]}, ${n[1]})); return 2 * (resTexRC.x * ${n[1]} + resTexRC.y); } - `}function tK(e,t){return t[0]===1?` + `}function w5(e,t){return t[0]===1?` int getOutputCoords() { return int(resultUV.x * ${t[1]}.0); } @@ -302,7 +302,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, vec2(${t[0]}, ${t[1]})); return resTexRC.x * ${t[1]} + resTexRC.y; } - `}function nK(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)],s=Math.ceil(e[2]/2),i=s*Math.ceil(e[1]/2);return` + `}function L5(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)],s=Math.ceil(e[2]/2),i=s*Math.ceil(e[1]/2);return` ivec3 getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(${n[0]}, ${n[1]})); @@ -316,7 +316,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, return ivec3(b, r, c); } - `}function sK(e,t){const n=Bo(["r","c","d"],e);return` + `}function S5(e,t){const n=Yo(["r","c","d"],e);return` ivec3 getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(${t[0]}, ${t[1]})); @@ -324,7 +324,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, ${n} return ivec3(r, c, d); } - `}function iK(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)],s=Math.ceil(e[e.length-1]/2),i=s*Math.ceil(e[e.length-2]/2);let o=i,a="",c="b, r, c";for(let h=2;h=1?m="coords = 0;":m=c.map(E=>`coords.${y[E+d]} = 0;`).join(` -`);let b="";a<2&&o>0?b="coords":b=e.shapeInfo.logicalShape.map((E,D)=>`coords.${y[D+d]}`).join(", ");let w="return outputValue;";const L=we(e.shapeInfo.logicalShape),T=L===1,A=we(t.logicalShape),N=A===1;if(o===1&&!T&&!N)w=` + `}function M5(e,t){const n=e.name,s=n.charAt(0).toUpperCase()+n.slice(1),i="get"+s+"AtOutCoords",o=e.shapeInfo.logicalShape.length,a=t.logicalShape.length,c=Q0(e.shapeInfo.logicalShape,t.logicalShape),h=Rt(a),d=a-o;let m;const f=["x","y","z","w","u","v"];o===0?m="":a<2&&c.length>=1?m="coords = 0;":m=c.map(O=>`coords.${f[O+d]} = 0;`).join(` +`);let b="";a<2&&o>0?b="coords":b=e.shapeInfo.logicalShape.map((O,E)=>`coords.${f[E+d]}`).join(", ");let w="return outputValue;";const L=P(e.shapeInfo.logicalShape),x=L===1,v=P(t.logicalShape),N=v===1;if(o===1&&!x&&!N)w=` return vec4(outputValue.xy, outputValue.xy); - `;else if(T&&!N)a===1?w=` + `;else if(x&&!N)a===1?w=` return vec4(outputValue.x, outputValue.x, 0., 0.); `:w=` return vec4(outputValue.x); - `;else if(c.length){const E=o-2,D=o-1;c.indexOf(E)>-1&&c.indexOf(D)>-1?w="return vec4(outputValue.x);":c.indexOf(E)>-1?w="return vec4(outputValue.x, outputValue.y, outputValue.x, outputValue.y);":c.indexOf(D)>-1&&(w="return vec4(outputValue.xx, outputValue.zz);")}return` + `;else if(c.length){const O=o-2,E=o-1;c.indexOf(O)>-1&&c.indexOf(E)>-1?w="return vec4(outputValue.x);":c.indexOf(O)>-1?w="return vec4(outputValue.x, outputValue.y, outputValue.x, outputValue.y);":c.indexOf(E)>-1&&(w="return vec4(outputValue.xx, outputValue.zz);")}return` vec4 ${i}() { ${h} coords = getOutputCoords(); ${m} vec4 outputValue = get${s}(${b}); ${w} } - `}function xK(e,t){const n=e.name,s=n.charAt(0).toUpperCase()+n.slice(1),i="get"+s+"AtOutCoords",o=t.texShape,a=e.shapeInfo.texShape,c=e.shapeInfo.logicalShape.length,h=t.logicalShape.length;if(!e.shapeInfo.isUniform&&c===h&&e.shapeInfo.flatOffset==null&&ot(a,o))return` + `}function P5(e,t){const n=e.name,s=n.charAt(0).toUpperCase()+n.slice(1),i="get"+s+"AtOutCoords",o=t.texShape,a=e.shapeInfo.texShape,c=e.shapeInfo.logicalShape.length,h=t.logicalShape.length;if(!e.shapeInfo.isUniform&&c===h&&e.shapeInfo.flatOffset==null&&ae(a,o))return` float ${i}() { return sampleTexture(${n}, resultUV); } - `;const d=Et(h),m=C0(e.shapeInfo.logicalShape,t.logicalShape),y=h-c;let b;const w=["x","y","z","w","u","v"];c===0?b="":h<2&&m.length>=1?b="coords = 0;":b=m.map(T=>`coords.${w[T+y]} = 0;`).join(` -`);let L="";return h<2&&c>0?L="coords":L=e.shapeInfo.logicalShape.map((T,A)=>`coords.${w[A+y]}`).join(", "),` + `;const d=Rt(h),m=Q0(e.shapeInfo.logicalShape,t.logicalShape),f=h-c;let b;const w=["x","y","z","w","u","v"];c===0?b="":h<2&&m.length>=1?b="coords = 0;":b=m.map(x=>`coords.${w[x+f]} = 0;`).join(` +`);let L="";return h<2&&c>0?L="coords":L=e.shapeInfo.logicalShape.map((x,v)=>`coords.${w[v+f]}`).join(", "),` float ${i}() { ${d} coords = getOutputCoords(); ${b} return get${s}(${L}); } - `}function Et(e){if(e<=1)return"int";if(e===2)return"ivec2";if(e===3)return"ivec3";if(e===4)return"ivec4";if(e===5)return"ivec5";if(e===6)return"ivec6";throw Error(`GPU for rank ${e} is not yet supported`)}function cc(e,t){const n=JSON.parse(JSON.stringify(e));return n.shapeInfo.logicalShape=t,n}function lc(e,t){return t.map(n=>e[n]).join(", ")}class TK{constructor(e,t,n,s){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,k(e.length>2,()=>`Packed arg${n.charAt(0).toUpperCase()+n.slice(1)} supports only inputs with rank above 2.`);const i=e[e.length-1],o=Math.ceil(i/t);this.outputShape=e.slice(0,-1),o>1&&this.outputShape.push(o),s||this.variableNames.push("bestIndicesA");const a=this.outputShape,c=a.length,h=Et(c),d=cs("coords",c);let m,y;if(o===1){y=c+1;const $=Et(y);m=` + `}function Rt(e){if(e<=1)return"int";if(e===2)return"ivec2";if(e===3)return"ivec3";if(e===4)return"ivec4";if(e===5)return"ivec5";if(e===6)return"ivec6";throw Error(`GPU for rank ${e} is not yet supported`)}function bc(e,t){const n=JSON.parse(JSON.stringify(e));return n.shapeInfo.logicalShape=t,n}function wc(e,t){return t.map(n=>e[n]).join(", ")}class z5{constructor(e,t,n,s){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,A(e.length>2,()=>`Packed arg${n.charAt(0).toUpperCase()+n.slice(1)} supports only inputs with rank above 2.`);const i=e[e.length-1],o=Math.ceil(i/t);this.outputShape=e.slice(0,-1),o>1&&this.outputShape.push(o),s||this.variableNames.push("bestIndicesA");const a=this.outputShape,c=a.length,h=Rt(c),d=Mn("coords",c);let m,f;if(o===1){f=c+1;const $=Rt(f);m=` ${$} sourceLocR = ${$}(${d.join()}, 0); ++${d[c-1]}; ${$} sourceLocG = ${$}(${d.join()}, 0); @@ -723,7 +723,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, ${$} sourceLocA = ${$}(${d.join()}, 0); --${d[c-1]}; ${$} sourceLocB = ${$}(${d.join()}, 0); - --${d[c-2]};`}else y=c,m=` + --${d[c-2]};`}else f=c,m=` ${h} sourceLocR = coords; ++${d[c-1]}; ${h} sourceLocG = coords; @@ -731,15 +731,15 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, ${h} sourceLocA = coords; --${d[c-1]}; ${h} sourceLocB = coords; - --${d[c-2]};`;const b=["x","y","z","w","u","v"].slice(0,y),w="."+b[y-1],L=b.map($=>"int "+$),T=cs("sourceLocR",y-1).concat("inIdx.r"),A=cs("sourceLocG",y-1).concat("inIdx.g"),N=cs("sourceLocB",y-1).concat("inIdx.b"),E=cs("sourceLocA",y-1).concat("inIdx.a"),D=n==="max"?"greaterThan":"lessThan",F=s?"":` - inIdx = round(vec4(getBestIndicesAChannel(${T.join()}), - getBestIndicesAChannel(${A.join()}), + --${d[c-2]};`;const b=["x","y","z","w","u","v"].slice(0,f),w="."+b[f-1],L=b.map($=>"int "+$),x=Mn("sourceLocR",f-1).concat("inIdx.r"),v=Mn("sourceLocG",f-1).concat("inIdx.g"),N=Mn("sourceLocB",f-1).concat("inIdx.b"),O=Mn("sourceLocA",f-1).concat("inIdx.a"),E=n==="max"?"greaterThan":"lessThan",k=s?"":` + inIdx = round(vec4(getBestIndicesAChannel(${x.join()}), + getBestIndicesAChannel(${v.join()}), getBestIndicesAChannel(${N.join()}), - getBestIndicesAChannel(${E.join()})));`,_=`vec4( - getAChannel(${T.join()}), - hasNextCol ? getAChannel(${A.join()}) : 0., + getBestIndicesAChannel(${O.join()})));`,F=`vec4( + getAChannel(${x.join()}), + hasNextCol ? getAChannel(${v.join()}) : 0., hasNextRow ? getAChannel(${N.join()}) : 0., - hasNextRow && hasNextCol ? getAChannel(${E.join()}) : 0.)`,B=s?"":` + hasNextRow && hasNextCol ? getAChannel(${O.join()}) : 0.)`,U=s?"":` float getBestIndicesAChannel(${L.join()}) { return getChannel(getBestIndicesA(${b.join()}), vec2(${b.slice(-2).join()})); @@ -748,7 +748,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, return getChannel(getA(${b.join()}), vec2(${b.slice(-2).join()})); } - ${B} + ${U} void main() { ${h} coords = getOutputCoords(); bool hasNextCol = ${d[c-1]} < ${a[c-1]-1}; @@ -758,15 +758,15 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, sourceLocB${w}, sourceLocA${w}) * ${t}; ivec4 inIdx = srcIdx; vec4 bestIndex = vec4(inIdx); - vec4 bestValue = ${_}; + vec4 bestValue = ${F}; for (int i = 0; i < ${t}; i++) { inIdx = srcIdx; - ${F} - vec4 candidate = ${_}; + ${k} + vec4 candidate = ${F}; bvec4 nan = isnan(candidate); bvec4 replace = bvec4( - vec4(${D}(candidate, bestValue)) * (vec4(1.0) - vec4(nan))); + vec4(${E}(candidate, bestValue)) * (vec4(1.0) - vec4(nan))); bestValue = vec4(replace.x ? candidate.x : bestValue.x, replace.y ? candidate.y : bestValue.y, @@ -777,9 +777,9 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, } setOutput(bestIndex); } - `}}class AK{constructor(e){this.variableNames=["dy"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,i=e.strideWidth,o=e.dilationHeight,a=e.dilationWidth,c=e.effectiveFilterHeight,h=e.effectiveFilterWidth,d=c-1-e.padInfo.top,m=h-1-e.padInfo.left,y=1/(t*n);this.userCode=` + `}}class V5{constructor(e){this.variableNames=["dy"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,i=e.strideWidth,o=e.dilationHeight,a=e.dilationWidth,c=e.effectiveFilterHeight,h=e.effectiveFilterWidth,d=c-1-e.padInfo.top,m=h-1-e.padInfo.left,f=1/(t*n);this.userCode=` const ivec2 pads = ivec2(${d}, ${m}); - const float avgMultiplier = float(${y}); + const float avgMultiplier = float(${f}); void main() { ivec4 coords = getOutputCoords(); @@ -819,9 +819,9 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, } setOutput(dotProd); } - `}}class vK{constructor(e){this.variableNames=["dy"],this.outputShape=e.inShape;const t=e.filterDepth,n=e.filterHeight,s=e.filterWidth,i=e.strideDepth,o=e.strideHeight,a=e.strideWidth,c=e.dilationDepth,h=e.dilationHeight,d=e.dilationWidth,m=e.effectiveFilterDepth,y=e.effectiveFilterHeight,b=e.effectiveFilterWidth,w=m-1-e.padInfo.front,L=y-1-e.padInfo.top,T=b-1-e.padInfo.left,A=1/(t*n*s);this.userCode=` - const ivec3 pads = ivec3(${w}, ${L}, ${T}); - const float avgMultiplier = float(${A}); + `}}class G5{constructor(e){this.variableNames=["dy"],this.outputShape=e.inShape;const t=e.filterDepth,n=e.filterHeight,s=e.filterWidth,i=e.strideDepth,o=e.strideHeight,a=e.strideWidth,c=e.dilationDepth,h=e.dilationHeight,d=e.dilationWidth,m=e.effectiveFilterDepth,f=e.effectiveFilterHeight,b=e.effectiveFilterWidth,w=m-1-e.padInfo.front,L=f-1-e.padInfo.top,x=b-1-e.padInfo.left,v=1/(t*n*s);this.userCode=` + const ivec3 pads = ivec3(${w}, ${L}, ${x}); + const float avgMultiplier = float(${v}); void main() { ivec5 coords = getOutputCoords(); @@ -847,7 +847,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, } int idyD = int(dyD); - for (int wR = 0; wR < ${y}; + for (int wR = 0; wR < ${f}; wR += ${h}) { float dyR = float(dyRCorner + wR) / ${o}.0; @@ -875,23 +875,10 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, } setOutput(dotProd); } - `}}const E0={REAL:"return areal * breal - aimag * bimag;",IMAG:"return areal * bimag + aimag * breal;"};class D0{constructor(e,t,n){this.variableNames=["AReal","AImag","BReal","BImag"],this.outputShape=tt(t,n),this.userCode=` - float binaryOpComplex( - float areal, float aimag, float breal, float bimag) { - ${e} - } - - void main() { - float areal = getARealAtOutCoords(); - float aimag = getAImagAtOutCoords(); - float breal = getBRealAtOutCoords(); - float bimag = getBImagAtOutCoords(); - setOutput(binaryOpComplex(areal, aimag, breal, bimag)); - } - `}}const k0=` + `}}const nC=` if (isnan(a)) return a; if (isnan(b)) return b; -`,QL="return a + b;",eS="return a - b;",F0="return a * b;",NK=` +`,Y5=` float s = sign(a) * sign(b); int ia = round(a); int ib = round(b); @@ -901,7 +888,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, } else { return NAN; } -`,CK=` +`,H5=` if(a < 0.0 && floor(b) < b){ return NAN; } @@ -910,12 +897,12 @@ if (b == 0.0) { } return (round(mod(b, 2.0)) != 1) ? pow(abs(a), b) : sign(a) * pow(abs(a), b); -`,yee="return (a - b) * (a - b);",RK="return float(a == b);",OK="return float(a != b);",EK="return float(a < b);",DK="return float(a <= b);",kK="return float(a > b);",FK="return float(a >= b);",_K="return float(a >= 1.0 && b >= 1.0);",WK="return float(a >= 1.0 || b >= 1.0);",$K=k0+` +`,ine="return (a - b) * (a - b);",q5="return float(a == b);",j5="return float(a < b);",K5="return float(a <= b);",X5="return float(a > b);",J5="return float(a >= b);",Z5="return float(a >= 1.0 && b >= 1.0);",Q5="return float(a >= 1.0 || b >= 1.0);",e8=nC+` return max(a, b); -`,UK=k0+` +`,t8=nC+` return min(a, b); -`,BK=`if (b == 0.0) return NAN; - return mod(a, b);`,MK="return (b >= 1.0) ? a : a * (b + 1.0);",_0="return (a < 0.) ? b * a : a;";class hn{constructor(e,t,n){this.variableNames=["A","B"],this.outputShape=tt(t,n),this.userCode=` +`,n8=`if (b == 0.0) return NAN; + return mod(a, b);`,s8="return (b >= 1.0) ? a : a * (b + 1.0);",sC="return (a < 0.) ? b * a : a;";class _n{constructor(e,t,n){this.variableNames=["A","B"],this.outputShape=nt(t,n),this.userCode=` float binaryOperation(float a, float b) { ${e} } @@ -925,12 +912,12 @@ return (round(mod(b, 2.0)) != 1) ? float b = getBAtOutCoords(); setOutput(binaryOperation(a, b)); } - `}}const nm=` + `}}const km=` result.r = isNaN.r > 0. ? NAN : result.r; result.g = isNaN.g > 0. ? NAN : result.g; result.b = isNaN.b > 0. ? NAN : result.b; result.a = isNaN.a > 0. ? NAN : result.a; -`,PK=` +`,i8=` ivec4 ia = round(a); ivec4 ib = round(b); bvec4 cond = notEqual(ib, ivec4(0)); @@ -951,7 +938,7 @@ return (round(mod(b, 2.0)) != 1) ? result[3] = idiv(ia[3], ib[3], s[3]); } return vec4(result); -`,zK=` +`,r8=` // isModRound1 has 1 for components with round(mod(b, 2.0)) == 1, 0 otherwise. vec4 isModRound1 = vec4(equal(round(mod(b, 2.0)), ivec4(1))); vec4 multiplier = sign(a) * isModRound1 + (vec4(1.0) - isModRound1); @@ -965,61 +952,61 @@ return (round(mod(b, 2.0)) != 1) ? result.a = isExpZero.a ? 1.0 : result.a; vec4 isNaN = vec4(lessThan(a, vec4(0.0))) * vec4(lessThan(floor(b), b)); - `+nm+` + `+km+` return result; -`,W0=` +`,iC=` vec4 aLessThanZero = vec4(lessThan(a, vec4(0.))); return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a); -`,GK=` +`,o8=` vec4 bGTEZero = vec4(greaterThanEqual(b, vec4(0.))); return (bGTEZero * a) + ((vec4(1.0) - bGTEZero) * (a * (b + vec4(1.0)))); -`,VK=` +`,a8=` return vec4(equal(a, b)); -`,YK=` +`,rne=` return vec4(notEqual(a, b)); -`,HK=` +`,c8=` return vec4(lessThan(a, b)); -`,qK=` +`,l8=` return vec4(lessThanEqual(a, b)); -`,jK=` +`,h8=` return vec4(greaterThan(a, b)); -`,KK=` +`,u8=` return vec4(greaterThanEqual(a, b)); -`,XK=` +`,d8=` return vec4( vec4(greaterThanEqual(a, vec4(1.0))) * vec4(greaterThanEqual(b, vec4(1.0)))); -`,JK=` +`,p8=` return min( vec4(greaterThanEqual(a, vec4(1.0))) + vec4(greaterThanEqual(b, vec4(1.0))), vec4(1.0)); -`,ZK=` +`,m8=` vec4 result = vec4(max(a, b)); vec4 isNaN = min(vec4(isnan(a)) + vec4(isnan(b)), vec4(1.0)); - `+nm+` + `+km+` return result; -`,QK=` +`,f8=` vec4 result = vec4(min(a, b)); vec4 isNaN = min(vec4(isnan(a)) + vec4(isnan(b)), vec4(1.0)); - `+nm+` + `+km+` return result; -`,e5=` +`,g8=` vec4 result = mod(a, b); vec4 isNaN = vec4(equal(b, vec4(0.0))); - `+nm+` + `+km+` return result; -`;class qr{constructor(e,t,n,s=!1){this.variableNames=["A","B"],this.supportsBroadcasting=!0,this.packedInputs=!0,this.packedOutput=!0,this.outputShape=tt(t,n);const i=this.outputShape.length;let o="";if(s)if(i===0||we(this.outputShape)===1)o=` +`;class lr{constructor(e,t,n,s=!1){this.variableNames=["A","B"],this.supportsBroadcasting=!0,this.packedInputs=!0,this.packedOutput=!0,this.outputShape=nt(t,n);const i=this.outputShape.length;let o="";if(s)if(i===0||P(this.outputShape)===1)o=` result.y = 0.; result.z = 0.; result.w = 0.; - `;else{const a=Et(i);if(o=` + `;else{const a=Rt(i);if(o=` ${a} coords = getOutputCoords(); `,i===1)o+=` result.y = (coords + 1) >= ${this.outputShape[0]} ? 0. : result.y; result.z = 0.; result.w = 0.; - `;else{const c=cs("coords",i);o+=` + `;else{const c=Mn("coords",i);o+=` bool nextRowOutOfBounds = (${c[i-2]} + 1) >= ${this.outputShape[i-2]}; bool nextColOutOfBounds = @@ -1041,7 +1028,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(result); } - `}}class t5{constructor(e){this.variableNames=["A"],this.outputShape=e,this.userCode=` + `}}class y8{constructor(e){this.variableNames=["A"],this.outputShape=e,this.userCode=` uniform float minVal; uniform float maxVal; @@ -1054,7 +1041,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(clamp(value, minVal, maxVal)); } - `}getCustomSetupFunc(e,t){return(n,s)=>{this.minLoc==null&&(this.minLoc=n.getUniformLocationNoThrow(s,"minVal"),this.maxLoc=n.getUniformLocationNoThrow(s,"maxVal")),n.gl.uniform1f(this.minLoc,e),n.gl.uniform1f(this.maxLoc,t)}}}class n5{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.userCode=` + `}getCustomSetupFunc(e,t){return(n,s)=>{this.minLoc==null&&(this.minLoc=n.getUniformLocationNoThrow(s,"minVal"),this.maxLoc=n.getUniformLocationNoThrow(s,"maxVal")),n.gl.uniform1f(this.minLoc,e),n.gl.uniform1f(this.maxLoc,t)}}}class b8{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.userCode=` uniform float minVal; uniform float maxVal; @@ -1068,7 +1055,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(clamp(value, vec4(minVal), vec4(maxVal))); } - `}getCustomSetupFunc(e,t){return(n,s)=>{this.minLoc==null&&(this.minLoc=n.getUniformLocationNoThrow(s,"minVal"),this.maxLoc=n.getUniformLocationNoThrow(s,"maxVal")),n.gl.uniform1f(this.minLoc,e),n.gl.uniform1f(this.maxLoc,t)}}}class s5{constructor(e){this.variableNames=["real","imag"],this.outputShape=e,this.userCode=` + `}getCustomSetupFunc(e,t){return(n,s)=>{this.minLoc==null&&(this.minLoc=n.getUniformLocationNoThrow(s,"minVal"),this.maxLoc=n.getUniformLocationNoThrow(s,"maxVal")),n.gl.uniform1f(this.minLoc,e),n.gl.uniform1f(this.maxLoc,t)}}}class w8{constructor(e){this.variableNames=["real","imag"],this.outputShape=e,this.userCode=` void main() { float re = abs(getRealAtOutCoords()); float im = abs(getImagAtOutCoords()); @@ -1081,53 +1068,7 @@ return (round(mod(b, 2.0)) != 1) ? mx == 0.0 ? 0.0 : mx * length(vec2(1, min(re, im)/mx)) ); } - `}}class i5{constructor(e){this.outputShape=[],this.outputShape=Or(e,1),this.variableNames=e.map((o,a)=>`T${a}`);const t=new Array(e.length-1);t[0]=e[0][1];for(let o=1;o`T${T}`);const c=new Array(e.length-1);c[0]=e[0][t];for(let L=1;L= ${c[L-1]}) { - return getChannel( - getT${L}(${sm(a,h,T)}), - vec2(${sm(d,h,T)})); - }`}const b=c.length,w=c[c.length-1];y+=` - return getChannel( - getT${b}(${sm(a,h,w)}), - vec2(${sm(d,h,w)}));`,this.userCode=` - float getValue(${a.map(L=>"int "+L)}) { - ${y} - } - - void main() { - ${i} coords = getOutputCoords(); - vec4 result = vec4(getValue(${o}), 0., 0., 0.); - - ${o[s-1]} = ${o[s-1]} + 1; - if (${o[s-1]} < ${n[s-1]}) { - result.g = getValue(${o}); - } - - ${o[s-2]} = ${o[s-2]} + 1; - if (${o[s-2]} < ${n[s-2]}) { - result.a = getValue(${o}); - } - - ${o[s-1]} = ${o[s-1]} - 1; - if (${o[s-2]} < ${n[s-2]} && - ${o[s-1]} < ${n[s-1]}) { - result.b = getValue(${o}); - } - setOutput(result); - } - `}}function sm(e,t,n){const s=e.indexOf(t),i=e.map((o,a)=>a===s?`${o} - ${n}`:o);return i.join()}class o5{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideHeight,n=e.strideWidth,s=e.padInfo.top,i=e.padInfo.left,o=e.dataFormat==="channelsLast";this.userCode=` + `}}class L8{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideHeight,n=e.strideWidth,s=e.padInfo.top,i=e.padInfo.left,o=e.dataFormat==="channelsLast";this.userCode=` void main() { ivec4 coords = getOutputCoords(); int wR = coords.x; @@ -1169,7 +1110,7 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class a5{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,i=e.strideWidth,o=e.dataFormat==="channelsLast",a=t-1-e.padInfo.top,c=n-1-e.padInfo.left,h=o?1:2,d=o?2:3,m=o?3:1;this.userCode=` + `}}class S8{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,i=e.strideWidth,o=e.dataFormat==="channelsLast",a=t-1-e.padInfo.top,c=n-1-e.padInfo.left,h=o?1:2,d=o?2:3,m=o?3:1;this.userCode=` const ivec2 pads = ivec2(${a}, ${c}); void main() { @@ -1222,7 +1163,7 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class c5{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideDepth,n=e.strideHeight,s=e.strideWidth,i=e.padInfo.front,o=e.padInfo.top,a=e.padInfo.left;this.userCode=` + `}}class I8{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideDepth,n=e.strideHeight,s=e.strideWidth,i=e.padInfo.front,o=e.padInfo.top,a=e.padInfo.left;this.userCode=` void main() { ivec5 coords = getOutputCoords(); int wF = coords.x; @@ -1264,7 +1205,7 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class l5{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterDepth,n=e.filterHeight,s=e.filterWidth,i=e.strideDepth,o=e.strideHeight,a=e.strideWidth,c=t-1-e.padInfo.front,h=n-1-e.padInfo.top,d=s-1-e.padInfo.left;this.userCode=` + `}}class x8{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterDepth,n=e.filterHeight,s=e.filterWidth,i=e.strideDepth,o=e.strideHeight,a=e.strideWidth,c=t-1-e.padInfo.front,h=n-1-e.padInfo.top,d=s-1-e.padInfo.left;this.userCode=` const ivec3 pads = ivec3(${c}, ${h}, ${d}); void main() { @@ -1321,7 +1262,7 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class h5{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideHeight,n=e.strideWidth,s=e.padInfo.top,i=e.padInfo.left,o=e.outChannels/e.inChannels;this.userCode=` + `}}class T8{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideHeight,n=e.strideWidth,s=e.padInfo.top,i=e.padInfo.left,o=e.outChannels/e.inChannels;this.userCode=` void main() { ivec4 coords = getOutputCoords(); int wR = coords.x; @@ -1356,7 +1297,7 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class u5{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,i=e.strideWidth,o=t-1-e.padInfo.top,a=n-1-e.padInfo.left,c=e.outChannels/e.inChannels;this.userCode=` + `}}class A8{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,i=e.strideWidth,o=t-1-e.padInfo.top,a=n-1-e.padInfo.left,c=e.outChannels/e.inChannels;this.userCode=` const ivec2 pads = ivec2(${o}, ${a}); void main() { @@ -1401,15 +1342,15 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class $0{constructor(e,t=!1,n=null,s=!1){this.variableNames=["x","W"],this.outputShape=e.outShape;const i=e.padInfo.top,o=e.padInfo.left,a=e.strideHeight,c=e.strideWidth,h=e.dilationHeight,d=e.dilationWidth,m=e.filterHeight,y=e.filterWidth,b=Math.floor(e.inChannels/4)*4,w=e.inChannels%4,L=e.dataFormat==="channelsLast",T=L?1:2,A=L?2:3,N=L?3:1;let E="",D="";n&&(s?E=`float activation(float a) { + `}}class rC{constructor(e,t=!1,n=null,s=!1){this.variableNames=["x","W"],this.outputShape=e.outShape;const i=e.padInfo.top,o=e.padInfo.left,a=e.strideHeight,c=e.strideWidth,h=e.dilationHeight,d=e.dilationWidth,m=e.filterHeight,f=e.filterWidth,b=Math.floor(e.inChannels/4)*4,w=e.inChannels%4,L=e.dataFormat==="channelsLast",x=L?1:2,v=L?2:3,N=L?3:1;let O="",E="";n&&(s?O=`float activation(float a) { float b = getPreluActivationWeightsAtOutCoords(); ${n} - }`:E=` + }`:O=` float activation(float x) { ${n} } - `,D="result = activation(result);");const F=t?"result += getBiasAtOutCoords();":"";t&&this.variableNames.push("bias"),s&&this.variableNames.push("preluActivationWeights"),this.userCode=` - ${E} + `,E="result = activation(result);");const k=t?"result += getBiasAtOutCoords();":"";t&&this.variableNames.push("bias"),s&&this.variableNames.push("preluActivationWeights"),this.userCode=` + ${O} const ivec2 strides = ivec2(${a}, ${c}); const ivec2 pads = ivec2(${i}, ${o}); @@ -1420,7 +1361,7 @@ return (round(mod(b, 2.0)) != 1) ? int d2 = coords[${N}]; ivec2 xRCCorner = - ivec2(coords[${T}], coords[${A}]) * strides - pads; + ivec2(coords[${x}], coords[${v}]) * strides - pads; int xRCorner = xRCCorner.x; int xCCorner = xRCCorner.y; @@ -1434,7 +1375,7 @@ return (round(mod(b, 2.0)) != 1) ? continue; } - for (int wC = 0; wC < ${y}; wC++) { + for (int wC = 0; wC < ${f}; wC++) { int xC = xCCorner + wC * ${d}; if (xC < 0 || xC >= ${e.inWidth}) { @@ -1528,11 +1469,11 @@ return (round(mod(b, 2.0)) != 1) ? } float result = dotProd; - ${F} - ${D} + ${k} + ${E} setOutput(result); } - `}}class d5{constructor(e){this.variableNames=["x","W"],this.outputShape=e.outShape;const t=e.padInfo.front,n=e.padInfo.top,s=e.padInfo.left,i=e.strideDepth,o=e.strideHeight,a=e.strideWidth,c=e.dilationDepth,h=e.dilationHeight,d=e.dilationWidth,m=e.filterDepth,y=e.filterHeight,b=e.filterWidth,w=Math.floor(e.inChannels/4)*4,L=e.inChannels%4;this.userCode=` + `}}class v8{constructor(e){this.variableNames=["x","W"],this.outputShape=e.outShape;const t=e.padInfo.front,n=e.padInfo.top,s=e.padInfo.left,i=e.strideDepth,o=e.strideHeight,a=e.strideWidth,c=e.dilationDepth,h=e.dilationHeight,d=e.dilationWidth,m=e.filterDepth,f=e.filterHeight,b=e.filterWidth,w=Math.floor(e.inChannels/4)*4,L=e.inChannels%4;this.userCode=` const ivec3 strides = ivec3(${i}, ${o}, ${a}); const ivec3 pads = ivec3(${t}, ${n}, ${s}); @@ -1557,7 +1498,7 @@ return (round(mod(b, 2.0)) != 1) ? continue; } - for (int wR = 0; wR < ${y}; wR++) { + for (int wR = 0; wR < ${f}; wR++) { int xR = xRCorner + wR * ${h}; if (xR < 0 || xR >= ${e.inHeight}) { @@ -1620,15 +1561,15 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class U0{constructor(e,t=!1,n=null,s=!1){this.variableNames=["x","W"],this.outputShape=e.outShape;const i=e.inHeight,o=e.inWidth,a=e.padInfo.top,c=e.padInfo.left,h=e.strideHeight,d=e.strideWidth,m=e.dilationHeight,y=e.dilationWidth,b=e.filterHeight,w=e.filterWidth,L=e.outChannels/e.inChannels;let T="",A="";n&&(s?T=`float activation(float a) { + `}}class oC{constructor(e,t=!1,n=null,s=!1){this.variableNames=["x","W"],this.outputShape=e.outShape;const i=e.inHeight,o=e.inWidth,a=e.padInfo.top,c=e.padInfo.left,h=e.strideHeight,d=e.strideWidth,m=e.dilationHeight,f=e.dilationWidth,b=e.filterHeight,w=e.filterWidth,L=e.outChannels/e.inChannels;let x="",v="";n&&(s?x=`float activation(float a) { float b = getPreluActivationWeightsAtOutCoords(); ${n} - }`:T=` + }`:x=` float activation(float x) { ${n} } - `,A="result = activation(result);");const N=t?"result += getBiasAtOutCoords();":"";t&&this.variableNames.push("bias"),s&&this.variableNames.push("preluActivationWeights"),this.userCode=` - ${T} + `,v="result = activation(result);");const N=t?"result += getBiasAtOutCoords();":"";t&&this.variableNames.push("bias"),s&&this.variableNames.push("preluActivationWeights"),this.userCode=` + ${x} const ivec2 strides = ivec2(${h}, ${d}); const ivec2 pads = ivec2(${a}, ${c}); @@ -1656,7 +1597,7 @@ return (round(mod(b, 2.0)) != 1) ? } for (int wC = 0; wC < ${w}; wC++) { - int xC = xCCorner + wC * ${y}; + int xC = xCCorner + wC * ${f}; if (xC < 0 || xC >= ${o}) { continue; @@ -1670,27 +1611,27 @@ return (round(mod(b, 2.0)) != 1) ? float result = dotProd; ${N} - ${A} + ${v} setOutput(result); } - `}}class B0{constructor(e,t=!1,n=null,s=!1){this.variableNames=["x","W"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e.outShape;const i=e.inHeight,o=e.inWidth,a=e.padInfo.top,c=e.padInfo.left,h=e.strideHeight,d=e.strideWidth,m=e.dilationHeight,y=e.dilationWidth,b=e.filterHeight,w=e.filterWidth,L=w;let T="int xR; int xC; int xCOffset;";for(let D=0;D= 0 && xR < ${i} && xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${D}C${_} = getX(batch, xR, xCOffset, d1); + xTexelR${E}C${F} = getX(batch, xR, xCOffset, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. if(xCOffset + 1 >= ${o}) { - xTexelR${D}C${_}.zw = vec2(0.); + xTexelR${E}C${F}.zw = vec2(0.); } } else { - xTexelR${D}C${_} = vec4(0.); + xTexelR${E}C${F} = vec4(0.); } xCOffset = xC + 1 - 2; @@ -1703,102 +1644,102 @@ return (round(mod(b, 2.0)) != 1) ? previous.zw = vec2(0.); } - xR${D}C${_} = vec4(previous.zw, xTexelR${D}C${_}.xy); + xR${E}C${F} = vec4(previous.zw, xTexelR${E}C${F}.xy); } else { - xR${D}C${_} = vec4(0, 0, xTexelR${D}C${_}.xy); + xR${E}C${F} = vec4(0, 0, xTexelR${E}C${F}.xy); } - `:T+=` + `:x+=` if(xR >= 0 && xR < ${i} && xC >= 0 && xC < ${o}) { - xTexelR${D}C${_} = getX(batch, xR, xC, d1); + xTexelR${E}C${F} = getX(batch, xR, xC, d1); } else { - xTexelR${D}C${_} = vec4(0.); + xTexelR${E}C${F} = vec4(0.); } - xR${D}C${_} = xTexelR${D}C${_}; - `,_+1= 0 && xR < ${i} && xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${D}C${_+2} = getX(batch, xR, xCOffset, d1); + xTexelR${E}C${F+2} = getX(batch, xR, xCOffset, d1); } - `,y>1&&(T+=` + `,f>1&&(x+=` xCOffset -= 2; if(xR >= 0 && xR < ${i} && xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${D}C${_} = getX(batch, xR, xCOffset, d1); + xTexelR${E}C${F} = getX(batch, xR, xCOffset, d1); } else { - xTexelR${D}C${_} = vec4(0.); + xTexelR${E}C${F} = vec4(0.); } - `),T+=` - xR${D}C${_+1} = vec4( - xTexelR${D}C${_}.zw, xTexelR${D}C${_+2}.xy); - `):T+=` - xCOffset = xC + ${B}; + `),x+=` + xR${E}C${F+1} = vec4( + xTexelR${E}C${F}.zw, xTexelR${E}C${F+2}.xy); + `):x+=` + xCOffset = xC + ${U}; if(xR >= 0 && xR < ${i} && xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${D}C${_+2} = getX(batch, xR, xCOffset, d1); + xTexelR${E}C${F+2} = getX(batch, xR, xCOffset, d1); } - xR${D}C${_+1} = xTexelR${D}C${_+2}; - `}}else _= 0 && xR < ${i}) { - `,c%2===1?(T+=` + `,c%2===1?(x+=` xCOffset = xC + 1 - ${d}; if(xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${D}C${_} = getX(batch, xR, xCOffset, d1); + xTexelR${E}C${F} = getX(batch, xR, xCOffset, d1); } else { - xTexelR${D}C${_} = vec4(0.); + xTexelR${E}C${F} = vec4(0.); } if(xC + 1 >= 0 && xC + 1 < ${o}) { - xTexelR${D}C${_+2} = getX(batch, xR, xC + 1, d1); + xTexelR${E}C${F+2} = getX(batch, xR, xC + 1, d1); } else { - xTexelR${D}C${_+2} = vec4(0.); + xTexelR${E}C${F+2} = vec4(0.); } - xR${D}C${_} = vec4( - xTexelR${D}C${_}.zw, xTexelR${D}C${_+2}.zw); - `,_+1= 0 && xCOffset < ${o}) { final = getX(batch, xR, xCOffset, d1); } - xR${D}C${_+1} = vec4(xTexelR${D}C${_+2}.xy, final.xy); - `)):(T+=` + xR${E}C${F+1} = vec4(xTexelR${E}C${F+2}.xy, final.xy); + `)):(x+=` if(xC >= 0 && xC < ${o}) { - xTexelR${D}C${_} = getX(batch, xR, xC, d1); + xTexelR${E}C${F} = getX(batch, xR, xC, d1); } else { - xTexelR${D}C${_} = vec4(0.); + xTexelR${E}C${F} = vec4(0.); } xCOffset = xC + ${d}; if(xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${D}C${_+2} = getX(batch, xR, xCOffset, d1); + xTexelR${E}C${F+2} = getX(batch, xR, xCOffset, d1); } else { - xTexelR${D}C${_+2} = vec4(0.); + xTexelR${E}C${F+2} = vec4(0.); } - xR${D}C${_} = vec4( - xTexelR${D}C${_}.xy, xTexelR${D}C${_+2}.xy); - `,_+11?[`${(a-1)/(m-1)}`,"(y2-y1) * height_ratio",`y1*${w} + float(y)*(height_scale)`]:["0.0","0.0",`0.5 * (y1+y2) * ${w}`],[E,D,F]=y>1?[`${(c-1)/(y-1)}`,"(x2-x1) * width_ratio",`x1*${L} + float(x)*(width_scale)`]:["0.0","0.0",`0.5 * (x1+x2) * ${L}`];this.userCode=` - const float height_ratio = float(${T}); - const float width_ratio = float(${E}); + `}}class N8{constructor(e,t,n,s,i){this.variableNames=["Image","Boxes","BoxInd"],this.outputShape=[];const[o,a,c,h]=e,[d]=t,[m,f]=n;this.outputShape=[d,m,f,h];const b=s==="bilinear"?1:0,[w,L]=[`${a-1}.0`,`${c-1}.0`],[x,v,N]=m>1?[`${(a-1)/(m-1)}`,"(y2-y1) * height_ratio",`y1*${w} + float(y)*(height_scale)`]:["0.0","0.0",`0.5 * (y1+y2) * ${w}`],[O,E,k]=f>1?[`${(c-1)/(f-1)}`,"(x2-x1) * width_ratio",`x1*${L} + float(x)*(width_scale)`]:["0.0","0.0",`0.5 * (x1+x2) * ${L}`];this.userCode=` + const float height_ratio = float(${x}); + const float width_ratio = float(${O}); void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -1845,15 +1786,15 @@ return (round(mod(b, 2.0)) != 1) ? return; } - float height_scale = ${A}; - float width_scale = ${D}; + float height_scale = ${v}; + float width_scale = ${E}; float in_y = ${N}; if( in_y < 0.0 || in_y > ${w} ) { setOutput(float(${i})); return; } - float in_x = ${F}; + float in_x = ${k}; if( in_x < 0.0 || in_x > ${L} ) { setOutput(float(${i})); return; @@ -1884,23 +1825,23 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(newValue); } } - `}}class M0{constructor(e,t,n){this.variableNames=["x"],this.outputShape=e;const s=e.length,i=t?"0.0":`getX(${P0(s,"coords")})`,o=e[e.length-1];let a="",c="";t?(a=n?`end != ${o-1}`:"end != 0",c=n?"end + 1":"end - 1"):(a=n?`end + pow2 < ${o}`:"end >= pow2",c=n?"end + pow2":"end - pow2"),this.userCode=` + `}}class cC{constructor(e,t,n){this.variableNames=["x"],this.outputShape=e;const s=e.length,i=t?"0.0":`getX(${lC(s,"coords")})`,o=e[e.length-1];let a="",c="";t?(a=n?`end != ${o-1}`:"end != 0",c=n?"end + 1":"end - 1"):(a=n?`end + pow2 < ${o}`:"end >= pow2",c=n?"end + pow2":"end - pow2"),this.userCode=` uniform float index; void main() { - ${Et(s)} coords = getOutputCoords(); - int end = ${z0(s,"coords")}; + ${Rt(s)} coords = getOutputCoords(); + int end = ${hC(s,"coords")}; float val = ${i}; int pow2 = int(pow(2.0, index)); if (${a}) { int idx = ${c}; - ${z0(s,"coords")} = idx; - val += getX(${P0(s,"coords")}); + ${hC(s,"coords")} = idx; + val += getX(${lC(s,"coords")}); } setOutput(val); } - `}getCustomSetupFunc(e){return(t,n)=>{this.index==null&&(this.index=t.getUniformLocation(n,"index")),t.gl.uniform1f(this.index,e)}}}function P0(e,t){if(e===1)return`${t}`;if(e===2)return`${t}.x, ${t}.y`;if(e===3)return`${t}.x, ${t}.y, ${t}.z`;if(e===4)return`${t}.x, ${t}.y, ${t}.z, ${t}.w`;throw Error(`Cumulative sum for rank ${e} is not yet supported`)}function z0(e,t){if(e===1)return`${t}`;if(e===2)return`${t}.y`;if(e===3)return`${t}.z`;if(e===4)return`${t}.w`;throw Error(`Cumulative sum for rank ${e} is not yet supported`)}class m5{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outPackingScheme=qh.DENSE;const t=Kh(e),n=Wn();this.outputShape=e,this.userCode=` + `}getCustomSetupFunc(e){return(t,n)=>{this.index==null&&(this.index=t.getUniformLocation(n,"index")),t.gl.uniform1f(this.index,e)}}}function lC(e,t){if(e===1)return`${t}`;if(e===2)return`${t}.x, ${t}.y`;if(e===3)return`${t}.x, ${t}.y, ${t}.z`;if(e===4)return`${t}.x, ${t}.y, ${t}.z, ${t}.w`;throw Error(`Cumulative sum for rank ${e} is not yet supported`)}function hC(e,t){if(e===1)return`${t}`;if(e===2)return`${t}.y`;if(e===3)return`${t}.z`;if(e===4)return`${t}.w`;throw Error(`Cumulative sum for rank ${e} is not yet supported`)}class C8{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outPackingScheme=lu.DENSE;const t=uu(e),n=Pn();this.outputShape=e,this.userCode=` ivec3 outCoordsFromFlatIndex(int index) { - ${Bo(["r","c","d"],e)} + ${Yo(["r","c","d"],e)} return ivec3(r, c, d); } @@ -1919,9 +1860,9 @@ return (round(mod(b, 2.0)) != 1) ? ${n.output} = result; } - `}}class f5{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outPackingScheme=qh.DENSE;const t=Kh(e),n=Wn();this.outputShape=e,this.userCode=` + `}}class R8{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outPackingScheme=lu.DENSE;const t=uu(e),n=Pn();this.outputShape=e,this.userCode=` ivec3 outCoordsFromFlatIndex(int index) { - ${Bo(["r","c","d"],e)} + ${Yo(["r","c","d"],e)} return ivec3(r, c, d); } @@ -1940,7 +1881,7 @@ return (round(mod(b, 2.0)) != 1) ? ${n.output} = result; } - `}}class g5{constructor(e,t,n){this.variableNames=["x"],this.outputShape=[],this.outputShape=e,this.blockSize=t,this.dataFormat=n,this.userCode=` + `}}class O8{constructor(e,t,n){this.variableNames=["x"],this.outputShape=[],this.outputShape=e,this.blockSize=t,this.dataFormat=n,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -1959,29 +1900,29 @@ return (round(mod(b, 2.0)) != 1) ? float result = ${this.getInputSamplingString()}; setOutput(result); } - `}getHeightCoordString(){return this.dataFormat==="NHWC"?"coords[1]":"coords[2]"}getWidthCoordString(){return this.dataFormat==="NHWC"?"coords[2]":"coords[3]"}getDepthCoordString(){return this.dataFormat==="NHWC"?"coords[3]":"coords[1]"}getOutputDepthSize(){return this.dataFormat==="NHWC"?this.outputShape[3]:this.outputShape[1]}getInputSamplingString(){return this.dataFormat==="NHWC"?"getX(b, in_h, in_w, in_d)":"getX(b, in_d, in_h, in_w)"}}class y5{constructor(e){this.variableNames=["X"],this.outputShape=[e,e],this.userCode=` + `}getHeightCoordString(){return this.dataFormat==="NHWC"?"coords[1]":"coords[2]"}getWidthCoordString(){return this.dataFormat==="NHWC"?"coords[2]":"coords[3]"}getDepthCoordString(){return this.dataFormat==="NHWC"?"coords[3]":"coords[1]"}getOutputDepthSize(){return this.dataFormat==="NHWC"?this.outputShape[3]:this.outputShape[1]}getInputSamplingString(){return this.dataFormat==="NHWC"?"getX(b, in_h, in_w, in_d)":"getX(b, in_d, in_h, in_w)"}}class E8{constructor(e){this.variableNames=["X"],this.outputShape=[e,e],this.userCode=` void main() { ivec2 coords = getOutputCoords(); float val = coords[0] == coords[1] ? getX(coords[0]) : 0.0; setOutput(val); } - `}}class b5{constructor(e){this.variableNames=["A"],this.outTexUsage=As.DOWNLOAD;const t=Wn();this.outputShape=e,this.userCode=` - ${N0} + `}}class D8{constructor(e){this.variableNames=["A"],this.outTexUsage=Ns.DOWNLOAD;const t=Pn();this.outputShape=e,this.userCode=` + ${Z0} void main() { float x = getAAtOutCoords(); ${t.output} = encode_float(x); } - `}}class w5{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outTexUsage=As.DOWNLOAD;const t=Wn();this.outputShape=e,this.userCode=` - ${N0} + `}}class k8{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outTexUsage=Ns.DOWNLOAD;const t=Pn();this.outputShape=e,this.userCode=` + ${Z0} void main() { ivec3 coords = getOutputCoords(); float x = getChannel(getAAtOutCoords(), vec2(coords.y, coords.z)); ${t.output} = encode_float(x); } - `}}class L5{constructor(e,t,n=!1){this.variableNames=["A"];const s=Wn(),[i,o]=t;this.outputShape=e;let a="result";n&&(a="floor(result * 255. + 0.5)"),this.userCode=` - ${ZL(e)} + `}}class F8{constructor(e,t,n=!1){this.variableNames=["A"];const s=Pn(),[i,o]=t;this.outputShape=e;let a="result";n&&(a="floor(result * 255. + 0.5)"),this.userCode=` + ${IS(e)} void main() { ivec3 coords = getOutputCoords(); @@ -2010,7 +1951,7 @@ return (round(mod(b, 2.0)) != 1) ? ${s.output} = vec4(${a}, 0., 0., 0.); } - `}}class S5{constructor(e,t,n=!1){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0;const s=Wn(),[i,o]=t;this.outputShape=e;let a="",c="result";n&&(c="floor(result * 255. + 0.5)");for(let h=0;h<=1;h++)for(let d=0;d<=1;d++){const m=h*2+d;a+=` + `}}class _8{constructor(e,t,n=!1){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0;const s=Pn(),[i,o]=t;this.outputShape=e;let a="",c="result";n&&(c="floor(result * 255. + 0.5)");for(let h=0;h<=1;h++)for(let d=0;d<=1;d++){const m=h*2+d;a+=` localCoords = coords; if(localCoords[2] + ${d} < ${e[2]}) { localCoords[2] += ${d}; @@ -2039,7 +1980,7 @@ return (round(mod(b, 2.0)) != 1) ? } } `}this.userCode=` - ${ZL(e)} + ${IS(e)} void main() { ivec3 coords = getOutputCoords(); @@ -2054,51 +1995,18 @@ return (round(mod(b, 2.0)) != 1) ? ${s.output} = ${c}; } - `}}const G0={REAL:"return real * expR - imag * expI;",IMAG:"return real * expI + imag * expR;"};class V0{constructor(e,t,n){this.variableNames=["real","imag"];const s=t[1];this.outputShape=t;const i=n?`2.0 * ${Math.PI}`:`-2.0 * ${Math.PI}`,o=n?`${s}.0`:"1.0";this.userCode=` - const float exponentMultiplier = ${i}; - - float unaryOpComplex(float real, float expR, float imag, float expI) { - ${e} - } - - float mulMatDFT(int batch, int index) { - float indexRatio = float(index) / float(${s}); - float exponentMultiplierTimesIndexRatio = - exponentMultiplier * indexRatio; - - float result = 0.0; - - for (int i = 0; i < ${s}; i++) { - // x = (-2|2 * PI / N) * index * i; - float x = exponentMultiplierTimesIndexRatio * float(i); - float expR = cos(x); - float expI = sin(x); - float real = getReal(batch, i); - float imag = getImag(batch, i); - - result += - unaryOpComplex(real, expR, imag, expI) / ${o}; - } - - return result; - } - - void main() { - ivec2 coords = getOutputCoords(); - setOutput(mulMatDFT(coords[0], coords[1])); - } - `}}class I5{constructor(e,t){this.outputShape=[],this.variableNames=["x"],this.outputShape=e,this.userCode=` + `}}class W8{constructor(e,t){this.outputShape=[],this.variableNames=["x"],this.outputShape=e,this.userCode=` uniform float value; void main() { // Input can be obtained from uniform value. setOutput(value); } - `}getCustomSetupFunc(e){return(t,n)=>{this.valueLoc==null&&(this.valueLoc=t.getUniformLocationNoThrow(n,"value")),t.gl.uniform1f(this.valueLoc,e)}}}class x5{constructor(e,t,n){this.variableNames=["A","indices"];const s=e.slice();s[n]=t,this.outputShape=s,this.rank=s.length;const i=Et(this.rank),o=T5(e,n);this.userCode=` + `}getCustomSetupFunc(e){return(t,n)=>{this.valueLoc==null&&(this.valueLoc=t.getUniformLocationNoThrow(n,"value")),t.gl.uniform1f(this.valueLoc,e)}}}class $8{constructor(e,t,n){this.variableNames=["A","indices"];const s=e.slice();s[n]=t,this.outputShape=s,this.rank=s.length;const i=Rt(this.rank),o=U8(e,n);this.userCode=` void main() { ${i} resRC = getOutputCoords(); setOutput(getA(${o})); } - `}}function T5(e,t){const n=e.length;if(n>4)throw Error(`Gather for rank ${n} is not yet supported`);if(n===1)return"int(getIndices(resRC))";const s=["resRC.x","resRC.y","resRC.z","resRC.w"],i=[];for(let o=0;o1?"strides[j]":"strides";this.userCode=` + `}}function U8(e,t){const n=e.length;if(n>4)throw Error(`Gather for rank ${n} is not yet supported`);if(n===1)return"int(getIndices(resRC))";const s=["resRC.x","resRC.y","resRC.z","resRC.w"],i=[];for(let o=0;o1?"strides[j]":"strides";this.userCode=` ${s} strides = ${s}(${this.strides}); void main() { ${i} coords = getOutputCoords(); @@ -2109,7 +2017,7 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(getX(flattenIndex, coords[1])); } - `}}function v5(e){const t=Wn(),n=`${t.version} + `}}function M8(e){const t=Pn(),n=`${t.version} precision highp float; ${t.attribute} vec3 clipSpacePos; ${t.attribute} vec2 uv; @@ -2118,9 +2026,9 @@ return (round(mod(b, 2.0)) != 1) ? void main() { gl_Position = vec4(clipSpacePos, 1); resultUV = uv; - }`;return tj(e,n)}function N5(e){const t=new Float32Array([-1,1,0,0,1,-1,-1,0,0,0,1,1,0,1,1,1,-1,0,1,0]);return aj(e,t)}function C5(e){const t=new Uint16Array([0,1,2,2,1,3]);return cj(e,t)}function Jh(e,t,n,s,i,o){hj(t,n);const a=lj(e),c=e.TEXTURE_2D;return Re(e,()=>e.bindTexture(c,a)),Re(e,()=>e.texParameteri(c,e.TEXTURE_WRAP_S,e.CLAMP_TO_EDGE)),Re(e,()=>e.texParameteri(c,e.TEXTURE_WRAP_T,e.CLAMP_TO_EDGE)),Re(e,()=>e.texParameteri(c,e.TEXTURE_MIN_FILTER,e.NEAREST)),Re(e,()=>e.texParameteri(c,e.TEXTURE_MAG_FILTER,e.NEAREST)),Re(e,()=>e.texImage2D(c,0,s,t,n,0,i,o,null)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null)),a}function Y0(e){return e.internalFormatFloat}function R5(e,t,n,s){const[i,o]=jh(t,n);return Jh(e,i,o,Y0(s),s.textureFormatFloat,e.FLOAT)}function H0(e){return e.internalFormatHalfFloat}function O5(e,t,n,s){const[i,o]=jh(t,n);return Jh(e,i,o,H0(s),s.textureFormatFloat,s.textureTypeHalfFloat)}function q0(e){return e.downloadTextureFormat}function E5(e,t,n,s){const[i,o]=jh(t,n);return Jh(e,i,o,q0(s),e.RGBA,e.UNSIGNED_BYTE)}function j0(e){return e.internalFormatPackedFloat}function D5(e,t,n,s){const[i,o]=sc(t,n);return Jh(e,i,o,j0(s),e.RGBA,e.FLOAT)}function K0(e){return e.internalFormatPackedHalfFloat}function k5(e,t,n,s){const[i,o]=sc(t,n);return Jh(e,i,o,K0(s),e.RGBA,s.textureTypeHalfFloat)}function F5(e,t,n){const s=0,i=3*4,o=3*4+2*4;Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,n));const a=S0(e,t,"clipSpacePos",n,3,o,s);return a&&S0(e,t,"uv",n,2,o,i)}function _5(e,t,n,s,i,o){Re(e,()=>e.bindTexture(e.TEXTURE_2D,t));let a,c,h;i instanceof Uint8Array?(a=new Uint8Array(n*s*4),c=e.UNSIGNED_BYTE,h=e.RGBA):(a=new Float32Array(n*s*4),c=e.FLOAT,h=o.internalFormatPackedFloat),a.set(i),Re(e,()=>e.texImage2D(e.TEXTURE_2D,0,h,n,s,0,e.RGBA,c,a)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function W5(e,t,n){Re(e,()=>e.bindTexture(e.TEXTURE_2D,t)),n.data instanceof Uint8Array?Re(e,()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,n.width,n.height,0,e.RGBA,e.UNSIGNED_BYTE,n.data)):Re(e,()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,e.RGBA,e.UNSIGNED_BYTE,n)),Re(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function $5(e,t,n,s){const i=e.createBuffer();Re(e,()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,i));const o=4,a=4,c=o*a*t*n;return Re(e,()=>e.bufferData(e.PIXEL_PACK_BUFFER,c,e.STREAM_READ)),Re(e,()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,0)),Re(e,()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,null)),i}function U5(e,t,n){const s=e,i=new Float32Array(n);return s.bindBuffer(s.PIXEL_PACK_BUFFER,t),s.getBufferSubData(s.PIXEL_PACK_BUFFER,0,i),s.bindBuffer(s.PIXEL_PACK_BUFFER,null),i}function B5(e,t,n,s){const[i,o]=jh(t,n),a=4,c=new Uint8Array(j4(t*n,a));return Re(e,()=>e.readPixels(0,0,i,o,s.downloadTextureFormat,e.UNSIGNED_BYTE,c)),new Float32Array(c.buffer)}function M5(e,t,n,s,i,o,a,c){const h=e,d=new Float32Array(K4(o,a));return h.bindBuffer(h.PIXEL_PACK_BUFFER,t),h.getBufferSubData(h.PIXEL_PACK_BUFFER,0,d),h.bindBuffer(h.PIXEL_PACK_BUFFER,null),d}function P5(e,t,n){const s=new Float32Array(t*n*4);return Re(e,()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,s)),s}class z5{constructor(e){this.outputTexture=null,this.program=null,this.disposed=!1,this.vertexAttrsAreBound=!1,this.itemsToPoll=[];const t=C().getNumber("WEBGL_VERSION");e!=null?(this.gl=e,Y4(t,e)):this.gl=_i(t);let n="WEBGL_color_buffer_float";const s="EXT_color_buffer_half_float";if(C().getNumber("WEBGL_VERSION")===1){const i="OES_texture_float",o="OES_texture_half_float";if(this.textureFloatExtension=Kp(this.gl,i),Ps(this.gl,o))this.textureHalfFloatExtension=Kp(this.gl,o);else if(C().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support half float textures, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.");if(this.colorBufferFloatExtension=this.gl.getExtension(n),Ps(this.gl,s))this.colorBufferHalfFloatExtension=Kp(this.gl,s);else if(C().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support color renderable half floats, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.")}else if(n="EXT_color_buffer_float",Ps(this.gl,n))this.colorBufferFloatExtension=this.gl.getExtension(n);else if(Ps(this.gl,s))this.colorBufferHalfFloatExtension=this.gl.getExtension(s);else throw new Error("GL context does not support color renderable floats");this.vertexBuffer=N5(this.gl),this.indexBuffer=C5(this.gl),this.framebuffer=uj(this.gl),this.textureConfig=qL(this.gl,this.textureHalfFloatExtension)}get debug(){return C().getBool("DEBUG")}dispose(){if(this.disposed)return;this.program!=null&&console.warn("Disposing a GPGPUContext that still has a bound WebGLProgram. This is probably a resource leak, delete the program with GPGPUContext.deleteProgram before disposing."),this.outputTexture!=null&&console.warn("Disposing a GPGPUContext that still has a bound output matrix texture. This is probably a resource leak, delete the output matrix texture with GPGPUContext.deleteMatrixTexture before disposing.");const e=this.gl;Re(e,()=>e.finish()),Re(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,null)),Re(e,()=>e.deleteFramebuffer(this.framebuffer)),Re(e,()=>e.bindBuffer(e.ARRAY_BUFFER,null)),Re(e,()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,null)),Re(e,()=>e.deleteBuffer(this.indexBuffer)),this.disposed=!0}createFloat32MatrixTexture(e,t){return this.throwIfDisposed(),R5(this.gl,e,t,this.textureConfig)}createFloat16MatrixTexture(e,t){return this.throwIfDisposed(),O5(this.gl,e,t,this.textureConfig)}createUnsignedBytesMatrixTexture(e,t){return this.throwIfDisposed(),E5(this.gl,e,t,this.textureConfig)}uploadPixelDataToTexture(e,t){this.throwIfDisposed(),W5(this.gl,e,t)}uploadDenseMatrixToTexture(e,t,n,s){this.throwIfDisposed(),_5(this.gl,e,t,n,s,this.textureConfig)}createFloat16PackedMatrixTexture(e,t){return this.throwIfDisposed(),k5(this.gl,e,t,this.textureConfig)}createPackedMatrixTexture(e,t){return this.throwIfDisposed(),D5(this.gl,e,t,this.textureConfig)}deleteMatrixTexture(e){this.throwIfDisposed(),this.outputTexture===e&&(I0(this.gl,this.framebuffer),this.outputTexture=null),Re(this.gl,()=>this.gl.deleteTexture(e))}downloadByteEncodedFloatMatrixFromOutputTexture(e,t,n){return this.downloadMatrixDriver(e,()=>B5(this.gl,t,n,this.textureConfig))}downloadPackedMatrixFromBuffer(e,t,n,s,i,o){return M5(this.gl,e,t,n,s,i,o,this.textureConfig)}downloadFloat32MatrixFromBuffer(e,t){return U5(this.gl,e,t)}createBufferFromTexture(e,t,n){this.bindTextureToFrameBuffer(e);const s=$5(this.gl,t,n,this.textureConfig);return this.unbindTextureToFrameBuffer(),s}createAndWaitForFence(){const e=this.createFence(this.gl);return this.pollFence(e)}createFence(e){let t,n;if(C().getBool("WEBGL_FENCE_API_ENABLED")){const s=e,i=s.fenceSync(s.SYNC_GPU_COMMANDS_COMPLETE,0);e.flush(),n=()=>{const o=s.clientWaitSync(i,0,0);return o===s.ALREADY_SIGNALED||o===s.CONDITION_SATISFIED},t=i}else C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0?(t=this.beginQuery(),this.endQuery(),n=()=>this.isQueryAvailable(t,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))):n=()=>!0;return{query:t,isFencePassed:n}}downloadMatrixFromPackedTexture(e,t,n){return this.downloadMatrixDriver(e,()=>P5(this.gl,t,n))}createProgram(e){this.throwIfDisposed();const t=this.gl,n=nj(t,e),s=v5(t),i=rj(t);return Re(t,()=>t.attachShader(i,s)),Re(t,()=>t.attachShader(i,n)),oj(t,i),this.debug&&jL(t,i),this.vertexAttrsAreBound||(this.setProgram(i),this.vertexAttrsAreBound=F5(t,this.program,this.vertexBuffer)),i}deleteProgram(e){this.throwIfDisposed(),e===this.program&&(this.program=null),e!=null&&Re(this.gl,()=>this.gl.deleteProgram(e))}setProgram(e){this.throwIfDisposed(),this.program=e,this.program!=null&&this.debug&&jL(this.gl,this.program),Re(this.gl,()=>this.gl.useProgram(e))}getUniformLocation(e,t,n=!0){return this.throwIfDisposed(),n?pj(this.gl,e,t):mj(this.gl,e,t)}getAttributeLocation(e,t){return this.throwIfDisposed(),Re(this.gl,()=>this.gl.getAttribLocation(e,t))}getUniformLocationNoThrow(e,t){return this.throwIfDisposed(),this.gl.getUniformLocation(e,t)}setInputMatrixTexture(e,t,n){this.throwIfDisposed(),this.throwIfNoProgram(),fj(this.gl,e,t,n)}setOutputMatrixTexture(e,t,n){this.setOutputMatrixTextureDriver(e,n,t)}setOutputPackedMatrixTexture(e,t,n){this.throwIfDisposed();const[s,i]=sc(t,n);this.setOutputMatrixTextureDriver(e,s,i)}setOutputMatrixWriteRegion(e,t,n,s){this.setOutputMatrixWriteRegionDriver(n,e,s,t)}setOutputPackedMatrixWriteRegion(e,t,n,s){throw new Error("setOutputPackedMatrixWriteRegion not implemented.")}debugValidate(){this.program!=null&&jL(this.gl,this.program),Xp(this.gl)}executeProgram(){this.throwIfDisposed(),this.throwIfNoProgram();const e=this.gl;this.debug&&this.debugValidate(),Re(e,()=>e.drawElements(e.TRIANGLES,6,e.UNSIGNED_SHORT,0))}blockUntilAllProgramsCompleted(){this.throwIfDisposed(),Re(this.gl,()=>this.gl.finish())}getQueryTimerExtension(){return this.disjointQueryTimerExtension==null&&(this.disjointQueryTimerExtension=Kp(this.gl,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2?"EXT_disjoint_timer_query_webgl2":"EXT_disjoint_timer_query")),this.disjointQueryTimerExtension}getQueryTimerExtensionWebGL2(){return this.getQueryTimerExtension()}getQueryTimerExtensionWebGL1(){return this.getQueryTimerExtension()}beginQuery(){if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){const n=this.gl,s=this.getQueryTimerExtensionWebGL2(),i=n.createQuery();return n.beginQuery(s.TIME_ELAPSED_EXT,i),i}const e=this.getQueryTimerExtensionWebGL1(),t=e.createQueryEXT();return e.beginQueryEXT(e.TIME_ELAPSED_EXT,t),t}endQuery(){if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){const t=this.gl,n=this.getQueryTimerExtensionWebGL2();t.endQuery(n.TIME_ELAPSED_EXT);return}const e=this.getQueryTimerExtensionWebGL1();e.endQueryEXT(e.TIME_ELAPSED_EXT)}async waitForQueryAndGetTime(e){return await sy(()=>this.disposed||this.isQueryAvailable(e,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))),this.getQueryTime(e,C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))}getQueryTime(e,t){if(t===0)return null;if(t===2){const n=this.gl,s=n.getQueryParameter(e,n.QUERY_RESULT);return s/1e6}else{const n=this.getQueryTimerExtensionWebGL1(),s=n.getQueryObjectEXT(e,n.QUERY_RESULT_EXT);return s/1e6}}isQueryAvailable(e,t){if(t===0)return!0;if(t===2){const n=this.gl,s=this.getQueryTimerExtensionWebGL2(),i=n.getQueryParameter(e,n.QUERY_RESULT_AVAILABLE);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(s.GPU_DISJOINT_EXT)),i&&!this.disjoint}else{const n=this.getQueryTimerExtensionWebGL1(),s=n.getQueryObjectEXT(e,n.QUERY_RESULT_AVAILABLE_EXT);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(n.GPU_DISJOINT_EXT)),s&&!this.disjoint}}pollFence(e){return new Promise(t=>{this.addItemToPoll(()=>e.isFencePassed(),()=>t())})}pollItems(){const e=G5(this.itemsToPoll.map(t=>t.isDoneFn));for(let t=0;t<=e;++t){const{resolveFn:n}=this.itemsToPoll[t];n()}this.itemsToPoll=this.itemsToPoll.slice(e+1)}addItemToPoll(e,t){if(this.itemsToPoll.push({isDoneFn:e,resolveFn:t}),this.itemsToPoll.length>1)return;sy(()=>(this.pollItems(),this.itemsToPoll.length===0))}bindTextureToFrameBuffer(e){this.throwIfDisposed(),KL(this.gl,e,this.framebuffer),this.debug&&Xp(this.gl)}unbindTextureToFrameBuffer(){this.outputTexture!=null?(KL(this.gl,this.outputTexture,this.framebuffer),this.debug&&Xp(this.gl)):I0(this.gl,this.framebuffer)}downloadMatrixDriver(e,t){this.bindTextureToFrameBuffer(e);const n=t();return this.unbindTextureToFrameBuffer(),n}setOutputMatrixTextureDriver(e,t,n){this.throwIfDisposed();const s=this.gl;KL(s,e,this.framebuffer),this.debug&&Xp(s),this.outputTexture=e,Re(s,()=>s.viewport(0,0,t,n)),Re(s,()=>s.scissor(0,0,t,n))}setOutputMatrixWriteRegionDriver(e,t,n,s){this.throwIfDisposed(),Re(this.gl,()=>this.gl.scissor(e,t,n,s))}throwIfDisposed(){if(this.disposed)throw new Error("Attempted to use disposed GPGPUContext.")}throwIfNoProgram(){if(this.program==null)throw new Error("No GPU program is currently set.")}}function G5(e){let t=0;for(;t{const T={logicalShape:w.shape,texShape:w.isUniform?null:w.texData.texShape,isUniform:w.isUniform,isPacked:w.isUniform?!1:w.texData.isPacked,flatOffset:null};return w.texData!=null&&w.texData.slice!=null&&w.texData.slice.flatOffset>0&&(T.flatOffset=w.texData.slice.flatOffset),{name:t.variableNames[L],shapeInfo:T}}),a=o.map(w=>w.shapeInfo),c={logicalShape:s.shape,texShape:s.texData.texShape,isUniform:!1,isPacked:s.texData.isPacked,flatOffset:null},h=zj(o,c,i,t.packedInputs),d=e.createProgram(h);let m=null;const y=e.getUniformLocation(d,"NAN",!1);C().getNumber("WEBGL_VERSION")===1&&(m=e.getUniformLocation(d,"INFINITY",!1));const b={};for(let w=0;w{const i=n.logicalShape,o=t[s],a=o.shape;if(!ot(i,a))throw Error(`Binary was compiled with different shapes than the current args. Shapes ${i} and ${a} must match`);if(n.isUniform&&o.isUniform)return;const c=n.texShape,h=o.isUniform?null:o.texData.texShape;if(!ot(c,h))throw Error(`Binary was compiled with different texture shapes than the current args. Shape ${c} and ${h} must match`)})}function Y5(e,t,n,s,i){X0(t.inShapeInfos,n),X0([t.outShapeInfo],[s]);const o=s.texData.texture,a=s.texData.texShape;s.texData.isPacked?e.setOutputPackedMatrixTexture(o,a[0],a[1]):e.setOutputMatrixTexture(o,a[0],a[1]),e.setProgram(t.webGLProgram),C().getNumber("WEBGL_VERSION")===1&&(t.infLoc!==null&&e.gl.uniform1f(t.infLoc,Infinity)),t.nanLoc!==null&&e.gl.uniform1f(t.nanLoc,NaN),n.forEach((c,h)=>{const d=t.program.variableNames[h],m=t.uniformLocations[d],y=t.uniformLocations[`offset${d}`];if(m==null)return;if(c.isUniform){if(we(c.shape)<2)e.gl.uniform1f(m,c.uniformValues[0]);else{let b=c.uniformValues;b instanceof Float32Array||(b=new Float32Array(b)),e.gl.uniform1fv(m,b)}return}c.texData.slice!=null&&y!=null&&e.gl.uniform1i(y,c.texData.slice.flatOffset),e.setInputMatrixTexture(c.texData.texture,m,h)}),i!=null&&i(e,t.webGLProgram),e.executeProgram()}function H5(e,t,n){let s="";t.concat(n).forEach(a=>{const c=a.texData!=null&&a.texData.slice!=null&&a.texData.slice.flatOffset>0,h=a.isUniform?"uniform":a.texData.texShape;s+=`${a.shape}_${h}_${c}`});const i=e.userCode;let o=e.constructor.name;return o+="_"+s+"_"+i,o}class q5{constructor(e,t,n){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e;const{filterWidth:s,inChannels:i,strideWidth:o,strideHeight:a,padInfo:c,outWidth:h,dilationWidth:d,dilationHeight:m,dataFormat:y}=n,{left:b,top:w}=c,L=i*s,T=Wn(),A=y==="channelsLast",N=A?0:1,E=A?1:2;let D="";for(let F=0;F<=1;F++)for(let _=0;_<=1;_++)D+=` - blockIndex = rc.y + ${_}; - pos = rc.x + ${F}; + }`;return wK(e,n)}function P8(e){const t=new Float32Array([-1,1,0,0,1,-1,-1,0,0,0,1,1,0,1,1,1,-1,0,1,0]);return AK(e,t)}function z8(e){const t=new Uint16Array([0,1,2,2,1,3]);return vK(e,t)}function pu(e,t,n,s,i,o){CK(t,n);const a=NK(e),c=e.TEXTURE_2D;return Ee(e,()=>e.bindTexture(c,a)),Ee(e,()=>e.texParameteri(c,e.TEXTURE_WRAP_S,e.CLAMP_TO_EDGE)),Ee(e,()=>e.texParameteri(c,e.TEXTURE_WRAP_T,e.CLAMP_TO_EDGE)),Ee(e,()=>e.texParameteri(c,e.TEXTURE_MIN_FILTER,e.NEAREST)),Ee(e,()=>e.texParameteri(c,e.TEXTURE_MAG_FILTER,e.NEAREST)),Ee(e,()=>e.texImage2D(c,0,s,t,n,0,i,o,null)),Ee(e,()=>e.bindTexture(e.TEXTURE_2D,null)),a}function uC(e){return e.internalFormatFloat}function V8(e,t,n,s){const[i,o]=hu(t,n);return pu(e,i,o,uC(s),s.textureFormatFloat,e.FLOAT)}function dC(e){return e.internalFormatHalfFloat}function G8(e,t,n,s){const[i,o]=hu(t,n);return pu(e,i,o,dC(s),s.textureFormatFloat,s.textureTypeHalfFloat)}function pC(e){return e.downloadTextureFormat}function Y8(e,t,n,s){const[i,o]=hu(t,n);return pu(e,i,o,pC(s),e.RGBA,e.UNSIGNED_BYTE)}function mC(e){return e.internalFormatPackedFloat}function H8(e,t,n,s){const[i,o]=pc(t,n);return pu(e,i,o,mC(s),e.RGBA,e.FLOAT)}function fC(e){return e.internalFormatPackedHalfFloat}function q8(e,t,n,s){const[i,o]=pc(t,n);return pu(e,i,o,fC(s),e.RGBA,s.textureTypeHalfFloat)}function j8(e,t,n){const s=0,i=3*4,o=3*4+2*4;Ee(e,()=>e.bindBuffer(e.ARRAY_BUFFER,n));const a=q0(e,t,"clipSpacePos",n,3,o,s);return a&&q0(e,t,"uv",n,2,o,i)}function K8(e,t,n,s,i,o){Ee(e,()=>e.bindTexture(e.TEXTURE_2D,t));let a,c,h;i instanceof Uint8Array?(a=new Uint8Array(n*s*4),c=e.UNSIGNED_BYTE,h=e.RGBA):(a=new Float32Array(n*s*4),c=e.FLOAT,h=o.internalFormatPackedFloat),a.set(i),Ee(e,()=>e.texImage2D(e.TEXTURE_2D,0,h,n,s,0,e.RGBA,c,a)),Ee(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function X8(e,t,n){Ee(e,()=>e.bindTexture(e.TEXTURE_2D,t)),n.data instanceof Uint8Array?Ee(e,()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,n.width,n.height,0,e.RGBA,e.UNSIGNED_BYTE,n.data)):Ee(e,()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,e.RGBA,e.UNSIGNED_BYTE,n)),Ee(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function J8(e,t,n,s){const i=e.createBuffer();Ee(e,()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,i));const o=4,a=4,c=o*a*t*n;return Ee(e,()=>e.bufferData(e.PIXEL_PACK_BUFFER,c,e.STREAM_READ)),Ee(e,()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,0)),Ee(e,()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,null)),i}function Z8(e,t,n){const s=e,i=new Float32Array(n);return s.bindBuffer(s.PIXEL_PACK_BUFFER,t),s.getBufferSubData(s.PIXEL_PACK_BUFFER,0,i),s.bindBuffer(s.PIXEL_PACK_BUFFER,null),i}function Q8(e,t,n,s){const[i,o]=hu(t,n),a=4,c=new Uint8Array(dK(t*n,a));return Ee(e,()=>e.readPixels(0,0,i,o,s.downloadTextureFormat,e.UNSIGNED_BYTE,c)),new Float32Array(c.buffer)}function e6(e,t,n,s,i,o,a,c){const h=e,d=new Float32Array(pK(o,a));return h.bindBuffer(h.PIXEL_PACK_BUFFER,t),h.getBufferSubData(h.PIXEL_PACK_BUFFER,0,d),h.bindBuffer(h.PIXEL_PACK_BUFFER,null),d}function t6(e,t,n){const s=new Float32Array(t*n*4);return Ee(e,()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,s)),s}class n6{constructor(e){this.outputTexture=null,this.program=null,this.disposed=!1,this.vertexAttrsAreBound=!1,this.itemsToPoll=[];const t=oe().getNumber("WEBGL_VERSION");e!=null?(this.gl=e,lK(t,e)):this.gl=ki(t);let n="WEBGL_color_buffer_float";const s="EXT_color_buffer_half_float";if(oe().getNumber("WEBGL_VERSION")===1){const i="OES_texture_float",o="OES_texture_half_float";if(this.textureFloatExtension=vm(this.gl,i),Vs(this.gl,o))this.textureHalfFloatExtension=vm(this.gl,o);else if(oe().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support half float textures, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.");if(this.colorBufferFloatExtension=this.gl.getExtension(n),Vs(this.gl,s))this.colorBufferHalfFloatExtension=vm(this.gl,s);else if(oe().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support color renderable half floats, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.")}else if(n="EXT_color_buffer_float",Vs(this.gl,n))this.colorBufferFloatExtension=this.gl.getExtension(n);else if(Vs(this.gl,s))this.colorBufferHalfFloatExtension=this.gl.getExtension(s);else throw new Error("GL context does not support color renderable floats");this.vertexBuffer=P8(this.gl),this.indexBuffer=z8(this.gl),this.framebuffer=RK(this.gl),this.textureConfig=gS(this.gl,this.textureHalfFloatExtension)}get debug(){return oe().getBool("DEBUG")}dispose(){if(this.disposed)return;this.program!=null&&console.warn("Disposing a GPGPUContext that still has a bound WebGLProgram. This is probably a resource leak, delete the program with GPGPUContext.deleteProgram before disposing."),this.outputTexture!=null&&console.warn("Disposing a GPGPUContext that still has a bound output matrix texture. This is probably a resource leak, delete the output matrix texture with GPGPUContext.deleteMatrixTexture before disposing.");const e=this.gl;Ee(e,()=>e.finish()),Ee(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,null)),Ee(e,()=>e.deleteFramebuffer(this.framebuffer)),Ee(e,()=>e.bindBuffer(e.ARRAY_BUFFER,null)),Ee(e,()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,null)),Ee(e,()=>e.deleteBuffer(this.indexBuffer)),this.disposed=!0}createFloat32MatrixTexture(e,t){return this.throwIfDisposed(),V8(this.gl,e,t,this.textureConfig)}createFloat16MatrixTexture(e,t){return this.throwIfDisposed(),G8(this.gl,e,t,this.textureConfig)}createUnsignedBytesMatrixTexture(e,t){return this.throwIfDisposed(),Y8(this.gl,e,t,this.textureConfig)}uploadPixelDataToTexture(e,t){this.throwIfDisposed(),X8(this.gl,e,t)}uploadDenseMatrixToTexture(e,t,n,s){this.throwIfDisposed(),K8(this.gl,e,t,n,s,this.textureConfig)}createFloat16PackedMatrixTexture(e,t){return this.throwIfDisposed(),q8(this.gl,e,t,this.textureConfig)}createPackedMatrixTexture(e,t){return this.throwIfDisposed(),H8(this.gl,e,t,this.textureConfig)}deleteMatrixTexture(e){this.throwIfDisposed(),this.outputTexture===e&&(j0(this.gl,this.framebuffer),this.outputTexture=null),Ee(this.gl,()=>this.gl.deleteTexture(e))}downloadByteEncodedFloatMatrixFromOutputTexture(e,t,n){return this.downloadMatrixDriver(e,()=>Q8(this.gl,t,n,this.textureConfig))}downloadPackedMatrixFromBuffer(e,t,n,s,i,o){return e6(this.gl,e,t,n,s,i,o,this.textureConfig)}downloadFloat32MatrixFromBuffer(e,t){return Z8(this.gl,e,t)}createBufferFromTexture(e,t,n){this.bindTextureToFrameBuffer(e);const s=J8(this.gl,t,n,this.textureConfig);return this.unbindTextureToFrameBuffer(),s}createAndWaitForFence(){const e=this.createFence(this.gl);return this.pollFence(e)}createFence(e){let t,n;if(oe().getBool("WEBGL_FENCE_API_ENABLED")){const s=e,i=s.fenceSync(s.SYNC_GPU_COMMANDS_COMPLETE,0);e.flush(),n=()=>{const o=s.clientWaitSync(i,0,0);return o===s.ALREADY_SIGNALED||o===s.CONDITION_SATISFIED},t=i}else oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0?(t=this.beginQuery(),this.endQuery(),n=()=>this.isQueryAvailable(t,oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))):n=()=>!0;return{query:t,isFencePassed:n}}downloadMatrixFromPackedTexture(e,t,n){return this.downloadMatrixDriver(e,()=>t6(this.gl,t,n))}createProgram(e){this.throwIfDisposed();const t=this.gl,n=LK(t,e),s=M8(t),i=xK(t);return Ee(t,()=>t.attachShader(i,s)),Ee(t,()=>t.attachShader(i,n)),TK(t,i),this.debug&&yS(t,i),this.vertexAttrsAreBound||(this.setProgram(i),this.vertexAttrsAreBound=j8(t,this.program,this.vertexBuffer)),i}deleteProgram(e){this.throwIfDisposed(),e===this.program&&(this.program=null),e!=null&&Ee(this.gl,()=>this.gl.deleteProgram(e))}setProgram(e){this.throwIfDisposed(),this.program=e,this.program!=null&&this.debug&&yS(this.gl,this.program),Ee(this.gl,()=>this.gl.useProgram(e))}getUniformLocation(e,t,n=!0){return this.throwIfDisposed(),n?EK(this.gl,e,t):DK(this.gl,e,t)}getAttributeLocation(e,t){return this.throwIfDisposed(),Ee(this.gl,()=>this.gl.getAttribLocation(e,t))}getUniformLocationNoThrow(e,t){return this.throwIfDisposed(),this.gl.getUniformLocation(e,t)}setInputMatrixTexture(e,t,n){this.throwIfDisposed(),this.throwIfNoProgram(),kK(this.gl,e,t,n)}setOutputMatrixTexture(e,t,n){this.setOutputMatrixTextureDriver(e,n,t)}setOutputPackedMatrixTexture(e,t,n){this.throwIfDisposed();const[s,i]=pc(t,n);this.setOutputMatrixTextureDriver(e,s,i)}setOutputMatrixWriteRegion(e,t,n,s){this.setOutputMatrixWriteRegionDriver(n,e,s,t)}setOutputPackedMatrixWriteRegion(e,t,n,s){throw new Error("setOutputPackedMatrixWriteRegion not implemented.")}debugValidate(){this.program!=null&&yS(this.gl,this.program),Nm(this.gl)}executeProgram(){this.throwIfDisposed(),this.throwIfNoProgram();const e=this.gl;this.debug&&this.debugValidate(),Ee(e,()=>e.drawElements(e.TRIANGLES,6,e.UNSIGNED_SHORT,0))}blockUntilAllProgramsCompleted(){this.throwIfDisposed(),Ee(this.gl,()=>this.gl.finish())}getQueryTimerExtension(){return this.disjointQueryTimerExtension==null&&(this.disjointQueryTimerExtension=vm(this.gl,oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2?"EXT_disjoint_timer_query_webgl2":"EXT_disjoint_timer_query")),this.disjointQueryTimerExtension}getQueryTimerExtensionWebGL2(){return this.getQueryTimerExtension()}getQueryTimerExtensionWebGL1(){return this.getQueryTimerExtension()}beginQuery(){if(oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){const n=this.gl,s=this.getQueryTimerExtensionWebGL2(),i=n.createQuery();return n.beginQuery(s.TIME_ELAPSED_EXT,i),i}const e=this.getQueryTimerExtensionWebGL1(),t=e.createQueryEXT();return e.beginQueryEXT(e.TIME_ELAPSED_EXT,t),t}endQuery(){if(oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){const t=this.gl,n=this.getQueryTimerExtensionWebGL2();t.endQuery(n.TIME_ELAPSED_EXT);return}const e=this.getQueryTimerExtensionWebGL1();e.endQueryEXT(e.TIME_ELAPSED_EXT)}async waitForQueryAndGetTime(e){return await $t(()=>this.disposed||this.isQueryAvailable(e,oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))),this.getQueryTime(e,oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))}getQueryTime(e,t){if(t===0)return null;if(t===2){const n=this.gl,s=n.getQueryParameter(e,n.QUERY_RESULT);return s/1e6}else{const n=this.getQueryTimerExtensionWebGL1(),s=n.getQueryObjectEXT(e,n.QUERY_RESULT_EXT);return s/1e6}}isQueryAvailable(e,t){if(t===0)return!0;if(t===2){const n=this.gl,s=this.getQueryTimerExtensionWebGL2(),i=n.getQueryParameter(e,n.QUERY_RESULT_AVAILABLE);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(s.GPU_DISJOINT_EXT)),i&&!this.disjoint}else{const n=this.getQueryTimerExtensionWebGL1(),s=n.getQueryObjectEXT(e,n.QUERY_RESULT_AVAILABLE_EXT);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(n.GPU_DISJOINT_EXT)),s&&!this.disjoint}}pollFence(e){return new Promise(t=>{this.addItemToPoll(()=>e.isFencePassed(),()=>t())})}pollItems(){const e=s6(this.itemsToPoll.map(t=>t.isDoneFn));for(let t=0;t<=e;++t){const{resolveFn:n}=this.itemsToPoll[t];n()}this.itemsToPoll=this.itemsToPoll.slice(e+1)}addItemToPoll(e,t){if(this.itemsToPoll.push({isDoneFn:e,resolveFn:t}),this.itemsToPoll.length>1)return;$t(()=>(this.pollItems(),this.itemsToPoll.length===0))}bindTextureToFrameBuffer(e){this.throwIfDisposed(),bS(this.gl,e,this.framebuffer),this.debug&&Nm(this.gl)}unbindTextureToFrameBuffer(){this.outputTexture!=null?(bS(this.gl,this.outputTexture,this.framebuffer),this.debug&&Nm(this.gl)):j0(this.gl,this.framebuffer)}downloadMatrixDriver(e,t){this.bindTextureToFrameBuffer(e);const n=t();return this.unbindTextureToFrameBuffer(),n}setOutputMatrixTextureDriver(e,t,n){this.throwIfDisposed();const s=this.gl;bS(s,e,this.framebuffer),this.debug&&Nm(s),this.outputTexture=e,Ee(s,()=>s.viewport(0,0,t,n)),Ee(s,()=>s.scissor(0,0,t,n))}setOutputMatrixWriteRegionDriver(e,t,n,s){this.throwIfDisposed(),Ee(this.gl,()=>this.gl.scissor(e,t,n,s))}throwIfDisposed(){if(this.disposed)throw new Error("Attempted to use disposed GPGPUContext.")}throwIfNoProgram(){if(this.program==null)throw new Error("No GPU program is currently set.")}}function s6(e){let t=0;for(;t{const x={logicalShape:w.shape,texShape:w.isUniform?null:w.texData.texShape,isUniform:w.isUniform,isPacked:w.isUniform?!1:w.texData.isPacked,flatOffset:null};return w.texData!=null&&w.texData.slice!=null&&w.texData.slice.flatOffset>0&&(x.flatOffset=w.texData.slice.flatOffset),{name:t.variableNames[L],shapeInfo:x}}),a=o.map(w=>w.shapeInfo),c={logicalShape:s.shape,texShape:s.texData.texShape,isUniform:!1,isPacked:s.texData.isPacked,flatOffset:null},h=o5(o,c,i,t.packedInputs),d=e.createProgram(h);let m=null;const f=e.getUniformLocation(d,"NAN",!1);oe().getNumber("WEBGL_VERSION")===1&&(m=e.getUniformLocation(d,"INFINITY",!1));const b={};for(let w=0;w{const i=n.logicalShape,o=t[s],a=o.shape;if(!ae(i,a))throw Error(`Binary was compiled with different shapes than the current args. Shapes ${i} and ${a} must match`);if(n.isUniform&&o.isUniform)return;const c=n.texShape,h=o.isUniform?null:o.texData.texShape;if(!ae(c,h))throw Error(`Binary was compiled with different texture shapes than the current args. Shape ${c} and ${h} must match`)})}function r6(e,t,n,s,i){gC(t.inShapeInfos,n),gC([t.outShapeInfo],[s]);const o=s.texData.texture,a=s.texData.texShape;s.texData.isPacked?e.setOutputPackedMatrixTexture(o,a[0],a[1]):e.setOutputMatrixTexture(o,a[0],a[1]),e.setProgram(t.webGLProgram),oe().getNumber("WEBGL_VERSION")===1&&(t.infLoc!==null&&e.gl.uniform1f(t.infLoc,Infinity)),t.nanLoc!==null&&e.gl.uniform1f(t.nanLoc,NaN),n.forEach((c,h)=>{const d=t.program.variableNames[h],m=t.uniformLocations[d],f=t.uniformLocations[`offset${d}`];if(m==null)return;if(c.isUniform){if(P(c.shape)<2)e.gl.uniform1f(m,c.uniformValues[0]);else{let b=c.uniformValues;b instanceof Float32Array||(b=new Float32Array(b)),e.gl.uniform1fv(m,b)}return}c.texData.slice!=null&&f!=null&&e.gl.uniform1i(f,c.texData.slice.flatOffset),e.setInputMatrixTexture(c.texData.texture,m,h)}),i!=null&&i(e,t.webGLProgram),e.executeProgram()}function o6(e,t,n){let s="";t.concat(n).forEach(a=>{const c=a.texData!=null&&a.texData.slice!=null&&a.texData.slice.flatOffset>0,h=a.isUniform?"uniform":a.texData.texShape;s+=`${a.shape}_${h}_${c}`});const i=e.userCode;let o=e.constructor.name;return o+="_"+s+"_"+i,o}class a6{constructor(e,t,n){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e;const{filterWidth:s,inChannels:i,strideWidth:o,strideHeight:a,padInfo:c,outWidth:h,dilationWidth:d,dilationHeight:m,dataFormat:f}=n,{left:b,top:w}=c,L=i*s,x=Pn(),v=f==="channelsLast",N=v?0:1,O=v?1:2;let E="";for(let k=0;k<=1;k++)for(let F=0;F<=1;F++)E+=` + blockIndex = rc.y + ${F}; + pos = rc.x + ${k}; if(blockIndex < ${e[1]} && pos < ${e[0]}) { offsetY = int(blockIndex / (${h})) * ${a} - ${w}; @@ -2131,18 +2039,18 @@ return (round(mod(b, 2.0)) != 1) ? offsetX = int(mod(float(blockIndex), ${h}.) * ${o}. - ${b}.); d1 = offsetX + ${d} * (int(mod(float(pos), ${L}.) / ${i}.)); - if(d1 < ${t[E]} && d1 >= 0) { + if(d1 < ${t[O]} && d1 >= 0) { ch = int(mod(float(pos), ${i}.)); - if (${A}) { + if (${v}) { innerDims = vec2(d1, ch); - result[${F*2+_}] = getChannel( + result[${k*2+F}] = getChannel( getA(d0, int(innerDims.x), int(innerDims.y)), innerDims); } else { innerDims = vec2(d0, d1); - result[${F*2+_}] = getChannel( + result[${k*2+F}] = getChannel( getA(ch, int(innerDims.x), int(innerDims.y)), innerDims); } @@ -2158,11 +2066,11 @@ return (round(mod(b, 2.0)) != 1) ? int blockIndex, pos, offsetY, d0, offsetX, d1, ch; vec2 innerDims; - ${D} + ${E} - ${T.output} = result; + ${x.output} = result; } - `}}class j5{constructor(e,t,n,s,i){this.variableNames=["x"],this.outputShape=[];const o=t,a=e[3]-1;this.outputShape=e;let c;const h=`float(${n}) + float(${s}) * sum`;i===.5?c=`inversesqrt(${h})`:i===1?c=`1.0/(${h})`:c=`exp(log(${h}) * float(-${i}));`,this.userCode=` + `}}class c6{constructor(e,t,n,s,i){this.variableNames=["x"],this.outputShape=[];const o=t,a=e[3]-1;this.outputShape=e;let c;const h=`float(${n}) + float(${s}) * sum`;i===.5?c=`inversesqrt(${h})`:i===1?c=`1.0/(${h})`:c=`exp(log(${h}) * float(-${i}));`,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -2181,7 +2089,7 @@ return (round(mod(b, 2.0)) != 1) ? float val = x * ${c}; setOutput(val); } - `}}class K5{constructor(e,t,n,s,i){this.variableNames=["inputImage","outputImage","dy"],this.outputShape=[],this.outputShape=e,this.depth=e[3],this.depthRadius=t,this.bias=n,this.alpha=s,this.beta=i,this.userCode=` + `}}class l6{constructor(e,t,n,s,i){this.variableNames=["inputImage","outputImage","dy"],this.outputShape=[],this.outputShape=e,this.depth=e[3],this.depthRadius=t,this.bias=n,this.alpha=s,this.beta=i,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -2236,7 +2144,7 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(result); } - `}}class X5{constructor(e,t,n,s,i){this.variableNames=["x"],this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0;const o=t,a=e[3]-1;this.outputShape=e;let c;const h=`float(${n}) + float(${s}) * sum`;i===.5?c=`inversesqrt(${h})`:i===1?c=`1.0/(${h})`:c=`exp(log(${h}) * float(-${i}));`,this.userCode=` + `}}class h6{constructor(e,t,n,s,i){this.variableNames=["x"],this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0;const o=t,a=e[3]-1;this.outputShape=e;let c;const h=`float(${n}) + float(${s}) * sum`;i===.5?c=`inversesqrt(${h})`:i===1?c=`1.0/(${h})`:c=`exp(log(${h}) * float(-${i}));`,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords.x; @@ -2298,7 +2206,7 @@ return (round(mod(b, 2.0)) != 1) ? vec4 result = xAtOutputCoords * ${c}; setOutput(result); } - `}}class J5{constructor(e){this.variableNames=["dy","maxPos"],this.outputShape=e.inShape;const t=e.strideHeight,n=e.strideWidth,s=e.dilationHeight,i=e.effectiveFilterHeight,o=e.effectiveFilterWidth,a=i-1-e.padInfo.top,c=o-1-e.padInfo.left,h=i*o-1;this.userCode=` + `}}class u6{constructor(e){this.variableNames=["dy","maxPos"],this.outputShape=e.inShape;const t=e.strideHeight,n=e.strideWidth,s=e.dilationHeight,i=e.effectiveFilterHeight,o=e.effectiveFilterWidth,a=i-1-e.padInfo.top,c=o-1-e.padInfo.left,h=i*o-1;this.userCode=` const ivec2 pads = ivec2(${a}, ${c}); void main() { @@ -2344,8 +2252,8 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class Z5{constructor(e){this.variableNames=["dy","maxPos"],this.outputShape=e.inShape;const t=e.strideDepth,n=e.strideHeight,s=e.strideWidth,i=e.dilationDepth,o=e.dilationHeight,a=e.dilationWidth,c=e.effectiveFilterDepth,h=e.effectiveFilterHeight,d=e.effectiveFilterWidth,m=c-1-e.padInfo.front,y=h-1-e.padInfo.top,b=d-1-e.padInfo.left,w=c*h*d-1;this.userCode=` - const ivec3 pads = ivec3(${m}, ${y}, ${b}); + `}}class d6{constructor(e){this.variableNames=["dy","maxPos"],this.outputShape=e.inShape;const t=e.strideDepth,n=e.strideHeight,s=e.strideWidth,i=e.dilationDepth,o=e.dilationHeight,a=e.dilationWidth,c=e.effectiveFilterDepth,h=e.effectiveFilterHeight,d=e.effectiveFilterWidth,m=c-1-e.padInfo.front,f=h-1-e.padInfo.top,b=d-1-e.padInfo.left,w=c*h*d-1;this.userCode=` + const ivec3 pads = ivec3(${m}, ${f}, ${b}); void main() { ivec5 coords = getOutputCoords(); @@ -2408,26 +2316,28 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class tS{constructor(e,t,n=!1,s=!1,i=!1,o=null,a=!1){this.variableNames=["matrixA","matrixB"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t;const c=n?e[1]:e[2],h=Math.ceil(c/2),d=n?"i * 2, rc.y":"rc.y, i * 2",m=s?"rc.z, i * 2":"i * 2, rc.z",y=n?["a.xxyy","a.zzww"]:["a.xxzz","a.yyww"],b=s?["b.xzxz","b.ywyw"]:["b.xyxy","b.zwzw"];let w="",L="";o&&(a?w=`vec4 activation(vec4 a) { + `}}class xS{constructor(e,t,n,s=!1,i=!1,o=!1,a=null,c=!1){this.variableNames=["matrixA","matrixB"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=n;const h=s?e[1]:e[2],d=Math.ceil(h/2),m=s?"i * 2, rc.y":"rc.y, i * 2",f=i?"rc.z, i * 2":"i * 2, rc.z",b=s?["a.xxyy","a.zzww"]:["a.xxzz","a.yyww"],w=i?["b.xzxz","b.ywyw"]:["b.xyxy","b.zwzw"];let L="",x="";a&&(c?L=`vec4 activation(vec4 a) { vec4 b = getPreluActivationWeightsAtOutCoords(); - ${o} - }`:w=`vec4 activation(vec4 x) { - ${o} - }`,L="result = activation(result);");const T=i?"result += getBiasAtOutCoords();":"";i&&this.variableNames.push("bias"),a&&this.variableNames.push("preluActivationWeights"),this.userCode=` - ${w} + ${a} + }`:L=`vec4 activation(vec4 x) { + ${a} + }`,x="result = activation(result);");const v=o?"result += getBiasAtOutCoords();":"";o&&this.variableNames.push("bias"),c&&this.variableNames.push("preluActivationWeights");let N="rc.x",O="rc.x";e[0]{this.seedLoc==null&&(this.seedLoc=t.getUniformLocation(n,"seed")),t.gl.uniform1f(this.seedLoc,e)}}}class e8{constructor(e,t,n,s){this.variableNames=["indices"],this.outputShape=[e,t],this.userCode=` + `}getCustomSetupFunc(e){return(t,n)=>{this.seedLoc==null&&(this.seedLoc=t.getUniformLocation(n,"seed")),t.gl.uniform1f(this.seedLoc,e)}}}class m6{constructor(e,t,n,s){this.variableNames=["indices"],this.outputShape=[e,t],this.userCode=` void main() { ivec2 coords = getOutputCoords(); int index = round(getIndices(coords.x)); setOutput(mix(float(${s}), float(${n}), float(index == coords.y))); } - `}}class t8{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outputShape=e;const t=e.length;if(t===0)this.userCode=` + `}}class f6{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outputShape=e;const t=e.length;if(t===0)this.userCode=` void main() { setOutput(vec4(getA(), 0., 0., 0.)); } - `;else{const n=cs("rc",t),s=Et(t),i=s8(t,e,n),o=i8(t,e[e.length-1],e[e.length-2],n),a=r8(e,n);this.userCode=` + `;else{const n=Mn("rc",t),s=Rt(t),i=y6(t,e,n),o=b6(t,e[e.length-1],e[e.length-2],n),a=w6(e,n);this.userCode=` void main() { ${s} rc = getOutputCoords(); @@ -2487,7 +2397,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(vec4(${a})); } } - `}}}function n8(e,t){const n=[];for(let s=0;s<=1;s++)for(let i=0;i<=1;i++){let o=`${s===0?"r":"rp1"}, ${i===0?"c":"cp1"}`;for(let a=2;a ${t[0]}`;let s="";for(let i=e-2;i= ${t[i]}`,i ${t[0]}`;let s="";for(let i=e-2;i= ${t[i]}`,i= ${t}; bool rEdge = rp1 >= ${n}; - `}function r8(e,t){const n=e.length,s=n8(n,t);return n===1?`getA(rc), + `}function w6(e,t){const n=e.length,s=g6(n,t);return n===1?`getA(rc), rc + 1 >= ${e[0]} ? 0. : getA(rc + 1), 0, 0`:`getA(${s[0]}), cEdge ? 0. : getA(${s[1]}), rEdge ? 0. : getA(${s[2]}), - rEdge || cEdge ? 0. : getA(${s[3]})`}class o8{constructor(e,t,n){this.variableNames=["x"],this.outputShape=t.map((h,d)=>h[0]+e[d]+h[1]);const s=e.length,i=Et(s),o=t.map(h=>h[0]).join(","),a=t.map((h,d)=>h[0]+e[d]).join(","),c=["coords[0]","coords[1]","coords[2]","coords[3]"].slice(0,s);if(s===1){this.userCode=` + rEdge || cEdge ? 0. : getA(${s[3]})`}class L6{constructor(e,t,n){this.variableNames=["x"],this.outputShape=t.map((h,d)=>h[0]+e[d]+h[1]);const s=e.length,i=Rt(s),o=t.map(h=>h[0]).join(","),a=t.map((h,d)=>h[0]+e[d]).join(","),c=["coords[0]","coords[1]","coords[2]","coords[3]"].slice(0,s);if(s===1){this.userCode=` int start = ${o}; int end = ${a}; @@ -2525,14 +2435,14 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(getX(${c})); } } - `}}class a8{constructor(e,t,n){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t.map((L,T)=>L[0]+e[T]+L[1]);const s=e.length,i=Et(s),o=t.map(L=>L[0]).join(","),a=t.map((L,T)=>L[0]+e[T]).join(","),c=cs("rc",s),h=cs("source",s),d=`${c[s-1]} < ${this.outputShape[s-1]}`,m=s===1?"source":`vec2(${h.slice(-2).join()})`,y=[`${i} rc = outputLoc;`,`${c[s-1]} += 1; + `}}class S6{constructor(e,t,n){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t.map((L,x)=>L[0]+e[x]+L[1]);const s=e.length,i=Rt(s),o=t.map(L=>L[0]).join(","),a=t.map((L,x)=>L[0]+e[x]).join(","),c=Mn("rc",s),h=Mn("source",s),d=`${c[s-1]} < ${this.outputShape[s-1]}`,m=s===1?"source":`vec2(${h.slice(-2).join()})`,f=[`${i} rc = outputLoc;`,`${c[s-1]} += 1; if(${d}) { `,s===1?"":`} rc = outputLoc; ${c[s-2]} += 1; if(${c[s-2]} < ${this.outputShape[s-2]}) {`,s===1?"":` ${c[s-1]} += 1; - if(${d}) {`],b=s===1?"rc < start || rc >= end":"any(lessThan(rc, start)) || any(greaterThanEqual(rc, end))";let w="";for(let L=0,T=s===1?2:4;L= end":"any(lessThan(rc, start)) || any(greaterThanEqual(rc, end))";let w="";for(let L=0,x=s===1?2:4;L= ${e.inWidth}) { @@ -2741,7 +2651,7 @@ return (round(mod(b, 2.0)) != 1) ? // use the current value. float currMinMaxValue = mix( value, minMaxValue, minMaxValueFound); - if (value ${q} currMinMaxValue) { + if (value ${j} currMinMaxValue) { minMaxValue = value; minMaxValueFound = 1.0; minMaxPosition = ${s?i?`(((batch * ${e.inDepth} + xD) * ${e.inHeight} + xR) * ${e.inWidth} + xC) * ${e.inChannels} + ch`:`((xD * ${e.inHeight} + xR) * ${e.inWidth} + xC) * ${e.inChannels} + ch`:`wD * ${w} * ${L} + @@ -2752,17 +2662,17 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(float(minMaxPosition)); } - `;return}const F="max";let _=`${t}(${t}(${t}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;t==="avg"&&(_="avgValue / count");const B=Math.floor(o/4)*4,$=o%4,H=` - if (${E}) { + `;return}const k="max";let F=`${t}(${t}(${t}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;t==="avg"&&(F="avgValue / count");const U=Math.floor(o/4)*4,$=o%4,Y=` + if (${O}) { avgValue += dot(values, ones); } else { - minMaxValue = ${F}(values, minMaxValue); + minMaxValue = ${k}(values, minMaxValue); } `;this.userCode=` const ivec3 strides = ivec3(${a}, ${c}, ${h}); - const ivec3 pads = ivec3(${T}, ${A}, ${N}); - const float initializationValue = ${D}; + const ivec3 pads = ivec3(${x}, ${v}, ${N}); + const float initializationValue = ${E}; const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); float count = 0.0; @@ -2787,7 +2697,7 @@ return (round(mod(b, 2.0)) != 1) ? // max/min x(?, ?, ?, d) to get y(yD, yR, yC, ch). // ? = to be determined - vec4 minMaxValue = vec4(${D}); + vec4 minMaxValue = vec4(${E}); float avgValue = 0.0; count = 0.0; @@ -2807,20 +2717,20 @@ return (round(mod(b, 2.0)) != 1) ? continue; } - for (int wC = 0; wC < ${B}; wC += 4) { - int xC = xCCorner + wC * ${y}; + for (int wC = 0; wC < ${U}; wC += 4) { + int xC = xCCorner + wC * ${f}; vec4 values = vec4( getValue(batch, xD, xR, xC, ch), - getValue(batch, xD, xR, xC + ${y}, ch), - getValue(batch, xD, xR, xC + 2 * ${y}, ch), - getValue(batch, xD, xR, xC + 3 * ${y}, ch) + getValue(batch, xD, xR, xC + ${f}, ch), + getValue(batch, xD, xR, xC + 2 * ${f}, ch), + getValue(batch, xD, xR, xC + 3 * ${f}, ch) ); - ${H} + ${Y} } - int xC = xCCorner + ${B}; + int xC = xCCorner + ${U}; if (${$===1}) { vec4 values = vec4( getValue(batch, xD, xR, xC, ch), @@ -2829,31 +2739,31 @@ return (round(mod(b, 2.0)) != 1) ? initializationValue ); - ${H} + ${Y} } else if (${$===2}) { vec4 values = vec4( getValue(batch, xD, xR, xC, ch), - getValue(batch, xD, xR, xC + ${y}, ch), + getValue(batch, xD, xR, xC + ${f}, ch), initializationValue, initializationValue ); - ${H} + ${Y} } else if (${$===3}) { vec4 values = vec4( getValue(batch, xD, xR, xC, ch), - getValue(batch, xD, xR, xC + ${y}, ch), - getValue(batch, xD, xR, xC + 2 * ${y}, ch), + getValue(batch, xD, xR, xC + ${f}, ch), + getValue(batch, xD, xR, xC + 2 * ${f}, ch), initializationValue ); - ${H} + ${Y} } } - setOutput(${_}); + setOutput(${F}); } } - `}}class J0{constructor(e,t){this.variableNames=["x"];const{windowSize:n,batchSize:s,inSize:i,outSize:o}=e;this.outputShape=[s,o];let a="0.0",c="";t==="prod"?a="1.0":t==="min"?(a="1.0 / 1e-20",c="min"):t==="max"&&(a="-1.0 / 1e-20",c="max");let h=`${t}(${t}(${t}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;t==="sum"?h="sumValue":t==="prod"?h="prodValue":t==="all"?h="allValue":t==="any"&&(h="anyValue");const d=Math.floor(n/4)*4,m=n%4;let y=` + `}}class yC{constructor(e,t){this.variableNames=["x"];const{windowSize:n,batchSize:s,inSize:i,outSize:o}=e;this.outputShape=[s,o];let a="0.0",c="";t==="prod"?a="1.0":t==="min"?(a="1.0 / 1e-20",c="min"):t==="max"&&(a="-1.0 / 1e-20",c="max");let h=`${t}(${t}(${t}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;t==="sum"?h="sumValue":t==="prod"?h="prodValue":t==="all"?h="allValue":t==="any"&&(h="anyValue");const d=Math.floor(n/4)*4,m=n%4;let f=` if (${t==="sum"}) { sumValue += dot(values, ones); } else if (${t==="prod"}) { @@ -2862,11 +2772,11 @@ return (round(mod(b, 2.0)) != 1) ? } else { minMaxValue = ${c}(values, minMaxValue); } - `,b="vec4";t==="all"?(a="1.0",y=` + `,b="vec4";t==="all"?(a="1.0",f=` bool reducedAllValue = all(values); float floatedReducedAllValue = float(reducedAllValue); allValue = float(allValue >= 1.0 && floatedReducedAllValue >= 1.0); - `,b="bvec4"):t==="any"&&(a="0.0",y=` + `,b="bvec4"):t==="any"&&(a="0.0",f=` bool reducedAnyValue = any(values); float floatedReducedAnyValue = float(reducedAnyValue); anyValue = float(anyValue >= 1.0 || floatedReducedAnyValue >= 1.0); @@ -2904,7 +2814,7 @@ return (round(mod(b, 2.0)) != 1) ? getValue(batch, inIdx + 3) ); - ${y} + ${f} } int inIdx = inOffset + ${d}; @@ -2916,7 +2826,7 @@ return (round(mod(b, 2.0)) != 1) ? initializationValue ); - ${y} + ${f} } else if (${m===2}) { ${b} values = ${b}( getValue(batch, inIdx), @@ -2925,7 +2835,7 @@ return (round(mod(b, 2.0)) != 1) ? initializationValue ); - ${y} + ${f} } else if (${m===3}) { ${b} values = ${b}( getValue(batch, inIdx), @@ -2934,11 +2844,11 @@ return (round(mod(b, 2.0)) != 1) ? initializationValue ); - ${y} + ${f} } setOutput(${h}); } - `}}class Z0{constructor(e,t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e;let n="";for(let s=0;s<4;s++){let i="thisRC = rc;";s%2===1&&(i+="thisRC.z += 1;"),s>1&&(i+="thisRC.y += 1;"),n+=` + `}}class bC{constructor(e,t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e;let n="";for(let s=0;s<4;s++){let i="thisRC = rc;";s%2===1&&(i+="thisRC.z += 1;"),s>1&&(i+="thisRC.y += 1;"),n+=` ${i} ${s>0?"if(thisRC.y < rows && thisRC.z < cols){":""} int flatIndex = getFlatIndex(thisRC); @@ -2950,8 +2860,8 @@ return (round(mod(b, 2.0)) != 1) ? getChannel(getA(inputRC.x, inputRC.y, inputRC.z), inputRCInnerDims); ${s>0?"}":""} `}this.userCode=` - ${c8(t)} - ${ZL(e)} + ${I6(t)} + ${IS(e)} void main() { ivec3 rc = getOutputCoords(); @@ -2966,12 +2876,12 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(result); } - `}}function c8(e){const t=Bo(["r","c","d"],e);return` + `}}function I6(e){const t=Yo(["r","c","d"],e);return` ivec3 inputCoordsFromReshapedOutCoords(int index) { ${t} return ivec3(r, c, d); } - `}class l8{constructor(e,t,n){this.variableNames=["dy"],this.outputShape=[],this.outputShape=t.shape;const[,s,i]=t.shape,[,o,a]=e.shape,c=[n&&o>1?s-1:s,n&&a>1?i-1:i],h=[n&&o>1?o-1:o,n&&a>1?a-1:a],d=c[0]/h[0],m=c[1]/h[1],y=1/d,b=1/m,w=Math.ceil(y)*2+2,L=Math.ceil(b)*2+2;this.userCode=` + `}class x6{constructor(e,t,n){this.variableNames=["dy"],this.outputShape=[],this.outputShape=t.shape;const[,s,i]=t.shape,[,o,a]=e.shape,c=[n&&o>1?s-1:s,n&&a>1?i-1:i],h=[n&&o>1?o-1:o,n&&a>1?a-1:a],d=c[0]/h[0],m=c[1]/h[1],f=1/d,b=1/m,w=Math.ceil(f)*2+2,L=Math.ceil(b)*2+2;this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -2984,7 +2894,7 @@ return (round(mod(b, 2.0)) != 1) ? const float heightScale = float(${d}); const float widthScale = float(${m}); - const float invHeightScale = float(${y}); + const float invHeightScale = float(${f}); const float invWidthScale = float(${b}); const int winHeight = int(${w}); @@ -3052,7 +2962,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(accumulator); } - `}}class h8{constructor(e,t,n,s){this.variableNames=["A"],this.outputShape=[];const[i,o,a,c]=e;this.outputShape=[i,t,n,c];const h=[s&&t>1?o-1:o,s&&n>1?a-1:a],d=[s&&t>1?t-1:t,s&&n>1?n-1:n];this.userCode=` + `}}class T6{constructor(e,t,n,s){this.variableNames=["A"],this.outputShape=[];const[i,o,a,c]=e;this.outputShape=[i,t,n,c];const h=[s&&t>1?o-1:o,s&&n>1?a-1:a],d=[s&&t>1?t-1:t,s&&n>1?n-1:n];this.userCode=` const vec2 effectiveInputOverOutputRatioRC = vec2( ${h[0]/d[0]}, ${h[1]/d[1]}); @@ -3085,7 +2995,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(newValue); } - `}}class u8{constructor(e,t,n,s){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=[];const[i,o,a,c]=e;this.outputShape=[i,t,n,c];const h=[s&&t>1?o-1:o,s&&n>1?a-1:a],d=[s&&t>1?t-1:t,s&&n>1?n-1:n];this.userCode=` + `}}class A6{constructor(e,t,n,s){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=[];const[i,o,a,c]=e;this.outputShape=[i,t,n,c];const h=[s&&t>1?o-1:o,s&&n>1?a-1:a],d=[s&&t>1?t-1:t,s&&n>1?n-1:n];this.userCode=` const vec3 effectiveInputOverOutputRatioRC = vec3( ${h[0]/d[0]}, ${h[1]/d[1]}, @@ -3162,7 +3072,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(newValue); } - `}}class d8{constructor(e,t,n){this.variableNames=["dy"],this.outputShape=[],this.outputShape=t.shape;const[,s,i]=t.shape,[,o,a]=e.shape,c=[n&&o>1?s-1:s,n&&a>1?i-1:i],h=[n&&o>1?o-1:o,n&&a>1?a-1:a],d=c[0]/h[0],m=c[1]/h[1],y=1/d,b=1/m,w=Math.ceil(y)*2+2,L=Math.ceil(b)*2+2;this.userCode=` + `}}class v6{constructor(e,t,n){this.variableNames=["dy"],this.outputShape=[],this.outputShape=t.shape;const[,s,i]=t.shape,[,o,a]=e.shape,c=[n&&o>1?s-1:s,n&&a>1?i-1:i],h=[n&&o>1?o-1:o,n&&a>1?a-1:a],d=c[0]/h[0],m=c[1]/h[1],f=1/d,b=1/m,w=Math.ceil(f)*2+2,L=Math.ceil(b)*2+2;this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -3175,7 +3085,7 @@ return (round(mod(b, 2.0)) != 1) ? const float heightScale = float(${d}); const float widthScale = float(${m}); - const float invHeightScale = float(${y}); + const float invHeightScale = float(${f}); const float invWidthScale = float(${b}); const int winHeight = int(${w}); @@ -3232,7 +3142,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(accumulator); } - `}}class p8{constructor(e,t,n,s){this.variableNames=["A"],this.outputShape=[];const[i,o,a,c]=e;this.outputShape=[i,t,n,c];const h=[s&&t>1?o-1:o,s&&n>1?a-1:a],d=[s&&t>1?t-1:t,s&&n>1?n-1:n],m=s?"0.5":"0.0";this.userCode=` + `}}class N6{constructor(e,t,n,s){this.variableNames=["A"],this.outputShape=[];const[i,o,a,c]=e;this.outputShape=[i,t,n,c];const h=[s&&t>1?o-1:o,s&&n>1?a-1:a],d=[s&&t>1?t-1:t,s&&n>1?n-1:n],m=s?"0.5":"0.0";this.userCode=` const vec2 effectiveInputOverOutputRatioRC = vec2( ${h[0]/d[0]}, ${h[1]/d[1]}); @@ -3255,17 +3165,17 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(newValue); } - `}}class m8{constructor(e,t){this.variableNames=["x"];const n=e.length;if(n>4)throw new Error(`WebGL backend: Reverse of rank-${n} tensor is not yet supported`);if(this.outputShape=e,n===1){this.userCode=` + `}}class C6{constructor(e,t){this.variableNames=["x"];const n=e.length;if(n>4)throw new Error(`WebGL backend: Reverse of rank-${n} tensor is not yet supported`);if(this.outputShape=e,n===1){this.userCode=` void main() { int coord = getOutputCoords(); setOutput(getX(${e[0]} - coord - 1)); } - `;return}const s=a=>t.indexOf(a)!==-1&&e[a]!==1?`${e[a]} - coords[${a}] - 1`:`coords[${a}]`,i=e.map((a,c)=>s(c)).join(","),o=Et(n);this.userCode=` + `;return}const s=a=>t.indexOf(a)!==-1&&e[a]!==1?`${e[a]} - coords[${a}] - 1`:`coords[${a}]`,i=e.map((a,c)=>s(c)).join(","),o=Rt(n);this.userCode=` void main() { ${o} coords = getOutputCoords(); setOutput(getX(${i})); } - `}}class f8{constructor(e,t){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0;const n=e.length;if(n>4)throw new Error(`WebGL backend: Reverse of rank-${n} tensor is not yet supported`);this.outputShape=e;const s=cs("rc",n),i=`${s[n-1]} + 1 < ${this.outputShape[n-1]}`,o=`${s[n-2]} + 1 < ${this.outputShape[n-2]}`,a=Et(n);n===1?this.userCode=` + `}}class R6{constructor(e,t){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0;const n=e.length;if(n>4)throw new Error(`WebGL backend: Reverse of rank-${n} tensor is not yet supported`);this.outputShape=e;const s=Mn("rc",n),i=`${s[n-1]} + 1 < ${this.outputShape[n-1]}`,o=`${s[n-2]} + 1 < ${this.outputShape[n-2]}`,a=Rt(n);n===1?this.userCode=` void main(){ int rc = getOutputCoords(); vec4 result = vec4(0.); @@ -3293,7 +3203,7 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(result); } - `;function c(w){return y(w)}function h(w){return w[n-1]="("+w[n-1]+" + 1)",y(w)}function d(w){return w[n-2]="("+w[n-2]+" + 1)",y(w)}function m(w){return w[n-1]="("+w[n-1]+" + 1)",w[n-2]="("+w[n-2]+" + 1)",y(w)}function y(w){const L=e.map((N,E)=>b(E,w)),T=L.join(","),A=L.slice(-2).join(",");return`getChannel(getX(${T}), vec2(${A}))`}function b(w,L){return t.indexOf(w)!==-1&&e[w]!==1?`${e[w]} - ${L[w]} - 1`:`${L[w]}`}}}class Q0{constructor(e,t,n,s,i,o,a=!0){this.variableNames=["updates","indices","defaultValue"],this.outputShape=o;const c=Et(i.length),h=Et(o.length);let d="";n===1?d="i":n===2&&(d="i, j");const m=`getIndices(${d})`;let y="";s===1?y="i":s===2&&(y="i, coords[1]");const b=`getUpdates(${y})`,w=t>1?"strides[j]":"strides";this.userCode=` + `;function c(w){return f(w)}function h(w){return w[n-1]="("+w[n-1]+" + 1)",f(w)}function d(w){return w[n-2]="("+w[n-2]+" + 1)",f(w)}function m(w){return w[n-1]="("+w[n-1]+" + 1)",w[n-2]="("+w[n-2]+" + 1)",f(w)}function f(w){const L=e.map((N,O)=>b(O,w)),x=L.join(","),v=L.slice(-2).join(",");return`getChannel(getX(${x}), vec2(${v}))`}function b(w,L){return t.indexOf(w)!==-1&&e[w]!==1?`${e[w]} - ${L[w]} - 1`:`${L[w]}`}}}class wC{constructor(e,t,n,s,i,o,a=!0){this.variableNames=["updates","indices","defaultValue"],this.outputShape=o;const c=Rt(i.length),h=Rt(o.length);let d="";n===1?d="i":n===2&&(d="i, j");const m=`getIndices(${d})`;let f="";s===1?f="i":s===2&&(f="i, coords[1]");const b=`getUpdates(${f})`,w=t>1?"strides[j]":"strides";this.userCode=` ${c} strides = ${c}(${i}); void main() { @@ -3313,7 +3223,7 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(mix(getDefaultValue(), sum, float(found))); } - `}}class g8{constructor(e,t){this.variableNames=["x","segmentIds"];const n=e.windowSize,s=e.batchSize,i=e.inSize,o=e.numSegments,a=o*Math.ceil(i/n);this.outputShape=[s,a];const c="0.0",h="sumValue",d=Math.floor(n/4)*4,m=n%4,y=` + `}}class O6{constructor(e,t){this.variableNames=["x","segmentIds"];const n=e.windowSize,s=e.batchSize,i=e.inSize,o=e.numSegments,a=o*Math.ceil(i/n);this.outputShape=[s,a];const c="0.0",h="sumValue",d=Math.floor(n/4)*4,m=n%4,f=` sumValue += dot(values, segFilter); `;let b="";i%n>0&&(b=` if (inIdx < 0 || inIdx >= ${i}) { @@ -3362,7 +3272,7 @@ return (round(mod(b, 2.0)) != 1) ? int(getSegmentIdAtIndex(inIdx + 3)) == currentSeg ? 1 : 0 ); - ${y} + ${f} } int inIdx = inOffset + ${d}; @@ -3383,7 +3293,7 @@ return (round(mod(b, 2.0)) != 1) ? 0 ); - ${y} + ${f} } else if (${m===2}) { vec4 values = vec4( getValue(batch, inIdx), @@ -3399,7 +3309,7 @@ return (round(mod(b, 2.0)) != 1) ? 0 ); - ${y} + ${f} } else if (${m===3}) { vec4 values = vec4( getValue(batch, inIdx), @@ -3415,11 +3325,11 @@ return (round(mod(b, 2.0)) != 1) ? 0 ); - ${y} + ${f} } setOutput(${h}); } - `}}class y8{constructor(e,t,n){this.variableNames=["c","a","b"],this.outputShape=t;let s,i;if(n>4)throw Error(`Where for rank ${n} is not yet supported`);if(n===1)i="resRC",s="resRC";else{const a=["resRC.x","resRC.y","resRC.z","resRC.w"],c=[],h=[];for(let d=0;d4)throw Error(`Where for rank ${n} is not yet supported`);if(n===1)i="resRC",s="resRC";else{const a=["resRC.x","resRC.y","resRC.z","resRC.w"],c=[],h=[];for(let d=0;d`sourceLoc.${sS[c]} = start[${c}] + coords.${sS[c]};`);i=` + `}}class D6{constructor(e){this.variableNames=["source"],this.outputShape=e,this.rank=e.length;const t=Rt(this.rank),n=`uniform int start[${this.rank}];`,s=k6(this.rank);let i;const o=e.map((a,c)=>`sourceLoc.${AS[c]} = start[${c}] + coords.${AS[c]};`);i=` ${t} sourceLoc; ${t} coords = getOutputCoords(); ${o.join(` @@ -3440,7 +3350,7 @@ return (round(mod(b, 2.0)) != 1) ? ${i} setOutput(getSource(${s})); } - `}getCustomSetupFunc(e){if(e.length!==this.rank)throw Error(`The rank (${this.rank}) of the program must match the length of start (${e.length})`);return(t,n)=>{if(this.startLoc==null&&(this.startLoc=t.getUniformLocationNoThrow(n,"start"),this.startLoc==null))return;t.gl.uniform1iv(this.startLoc,e)}}}const sS=["x","y","z","w","u","v"];function w8(e){if(e===1)return"sourceLoc";if(e<=6)return sS.slice(0,e).map(t=>"sourceLoc."+t).join(",");throw Error(`Slicing for rank ${e} is not yet supported`)}class L8{constructor(e){this.variableNames=["source"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.rank=e.length;const t=Et(this.rank),n=cs("coords",this.rank),s=cs("sourceLoc",this.rank),i=this.rank===1?"sourceLoc":`vec2(${s.slice(-2).join()})`,o=`getChannel(getSource(${s.join()}), ${i})`,a=` + `}getCustomSetupFunc(e){if(e.length!==this.rank)throw Error(`The rank (${this.rank}) of the program must match the length of start (${e.length})`);return(t,n)=>{if(this.startLoc==null&&(this.startLoc=t.getUniformLocationNoThrow(n,"start"),this.startLoc==null))return;t.gl.uniform1iv(this.startLoc,e)}}}const AS=["x","y","z","w","u","v"];function k6(e){if(e===1)return"sourceLoc";if(e<=6)return AS.slice(0,e).map(t=>"sourceLoc."+t).join(",");throw Error(`Slicing for rank ${e} is not yet supported`)}class F6{constructor(e){this.variableNames=["source"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.rank=e.length;const t=Rt(this.rank),n=Mn("coords",this.rank),s=Mn("sourceLoc",this.rank),i=this.rank===1?"sourceLoc":`vec2(${s.slice(-2).join()})`,o=`getChannel(getSource(${s.join()}), ${i})`,a=` result.x = ${o}; if (++${n[this.rank-1]} < ${e[this.rank-1]}) { ++${s[this.rank-1]}; @@ -3470,7 +3380,7 @@ return (round(mod(b, 2.0)) != 1) ? ${c} setOutput(result); } - `}getCustomSetupFunc(e){if(e.length!==this.rank)throw Error(`The rank (${this.rank}) of the program must match the length of start (${e.length})`);return(t,n)=>{if(this.startLoc==null&&(this.startLoc=t.getUniformLocationNoThrow(n,"start"),this.startLoc==null))return;t.gl.uniform1iv(this.startLoc,e)}}}class S8{constructor(e,t,n){this.variableNames=["x"],this.outputShape=n;const s=n.length,i=Et(n.length),o=Et(n.length);let a="";if(s===1)a="coords * strides + begin";else{let c=0;a=n.map((h,d)=>(c++,n.length===1?`coords * strides[${d}] + begin[${d}]`:`coords[${c-1}] * strides[${d}] + begin[${d}]`)).join(",")}this.userCode=` + `}getCustomSetupFunc(e){if(e.length!==this.rank)throw Error(`The rank (${this.rank}) of the program must match the length of start (${e.length})`);return(t,n)=>{if(this.startLoc==null&&(this.startLoc=t.getUniformLocationNoThrow(n,"start"),this.startLoc==null))return;t.gl.uniform1iv(this.startLoc,e)}}}class _6{constructor(e,t,n){this.variableNames=["x"],this.outputShape=n;const s=n.length,i=Rt(n.length),o=Rt(n.length);let a="";if(s===1)a="coords * strides + begin";else{let c=0;a=n.map((h,d)=>(c++,n.length===1?`coords * strides[${d}] + begin[${d}]`:`coords[${c-1}] * strides[${d}] + begin[${d}]`)).join(",")}this.userCode=` ${i} begin = ${i}(${e}); ${i} strides = ${i}(${t}); @@ -3478,12 +3388,12 @@ return (round(mod(b, 2.0)) != 1) ? ${o} coords = getOutputCoords(); setOutput(getX(${a})); } - `}}class I8{constructor(e){this.gpgpu=e,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0,this.freeTextures={},this.logEnabled=!1,this.usedTextures={}}acquireTexture(e,t,n){const s=tC(t,n),i=nC(e,s,n);i in this.freeTextures||(this.freeTextures[i]=[]),i in this.usedTextures||(this.usedTextures[i]=[]);const o=eC(e,s,this.gpgpu.gl,this.gpgpu.textureConfig,n);if(this.freeTextures[i].length>0){this.numFreeTextures--,this.numUsedTextures++,this._numBytesFree-=o,this.log();const c=this.freeTextures[i].shift();return this.usedTextures[i].push(c),c}let a;return s===Sn.PACKED_2X2_FLOAT32?a=this.gpgpu.createPackedMatrixTexture(e[0],e[1]):s===Sn.PACKED_2X2_FLOAT16?a=this.gpgpu.createFloat16PackedMatrixTexture(e[0],e[1]):s===Sn.UNPACKED_FLOAT32?a=this.gpgpu.createFloat32MatrixTexture(e[0],e[1]):s===Sn.UNPACKED_FLOAT16?a=this.gpgpu.createFloat16MatrixTexture(e[0],e[1]):s===Sn.PACKED_4X1_UNSIGNED_BYTE&&(a=this.gpgpu.createUnsignedBytesMatrixTexture(e[0],e[1])),this.usedTextures[i].push(a),this.numUsedTextures++,this._numBytesAllocated+=o,this.log(),a}releaseTexture(e,t,n,s){if(this.freeTextures==null)return;const i=tC(n,s),o=nC(t,i,s);o in this.freeTextures||(this.freeTextures[o]=[]);const a=eC(t,i,this.gpgpu.gl,this.gpgpu.textureConfig,s),c=C().get("WEBGL_DELETE_TEXTURE_THRESHOLD");c!==-1&&this._numBytesAllocated>c?(this.gpgpu.deleteMatrixTexture(e),this._numBytesAllocated-=a):(this.freeTextures[o].push(e),this.numFreeTextures++,this._numBytesFree+=a),this.numUsedTextures--;const h=this.usedTextures[o],d=h.indexOf(e);if(d<0)throw new Error("Cannot release a texture that was never provided by this texture manager");h.splice(d,1),this.log()}log(){if(!this.logEnabled)return;const e=this.numFreeTextures+this.numUsedTextures;console.log("Free/Used",`${this.numFreeTextures} / ${this.numUsedTextures}`,`(${e})`);const t=this._numBytesFree/this._numBytesAllocated;console.log(`Bytes allocated: ${this._numBytesAllocated}`),console.log(`Bytes unused: ${this._numBytesFree} (${Math.round(100*t)}%)`)}get numBytesAllocated(){return this._numBytesAllocated}get numBytesFree(){return this._numBytesFree}getNumUsedTextures(){return this.numUsedTextures}getNumFreeTextures(){return this.numFreeTextures}dispose(){if(this.freeTextures==null)return;for(const e in this.freeTextures)this.freeTextures[e].forEach(t=>{this.gpgpu.deleteMatrixTexture(t)});for(const e in this.usedTextures)this.usedTextures[e].forEach(t=>{this.gpgpu.deleteMatrixTexture(t)});this.freeTextures=null,this.usedTextures=null,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0}}function x8(e,t){const n=e;if(t===n.R32F)return 4;if(t===n.R16F)return 2;if(t===n.RGBA32F)return 16;if(t===e.RGBA)return 16;if(t===n.RGBA16F)return 8;throw new Error(`Unknown internal format ${t}`)}function eC(e,t,n,s,i){const o=T8(t,s);let a;if(i){const[h,d]=sc(e[0],e[1]);a=h*d}else{const[h,d]=jh(e[0],e[1]);a=h*d}const c=x8(n,o);return a*c}function T8(e,t){switch(e){case Sn.PACKED_2X2_FLOAT32:return j0(t);case Sn.PACKED_2X2_FLOAT16:return K0(t);case Sn.UNPACKED_FLOAT32:return Y0(t);case Sn.UNPACKED_FLOAT16:return H0(t);case Sn.PACKED_4X1_UNSIGNED_BYTE:return q0(t);default:throw new Error(`Unknown physical texture type ${e}`)}}function A8(e){return C().getBool("WEBGL_RENDER_FLOAT32_ENABLED")?e?Sn.PACKED_2X2_FLOAT32:Sn.UNPACKED_FLOAT32:e?Sn.PACKED_2X2_FLOAT16:Sn.UNPACKED_FLOAT16}function tC(e,t){if(e===As.UPLOAD)return Sn.PACKED_2X2_FLOAT32;if(e===As.RENDER||e==null)return A8(t);if(e===As.DOWNLOAD||e===As.PIXELS)return Sn.PACKED_4X1_UNSIGNED_BYTE;throw new Error(`Unknown logical texture type ${e}`)}function nC(e,t,n){return`${e[0]}_${e[1]}_${t}_${n}`}class v8{constructor(e,t){this.variableNames=["A"];const n=new Array(e.length);for(let o=0;o0){this.numFreeTextures--,this.numUsedTextures++,this._numBytesFree-=o,this.log();const c=this.freeTextures[i].shift();return this.usedTextures[i].push(c),c}let a;return s===Cn.PACKED_2X2_FLOAT32?a=this.gpgpu.createPackedMatrixTexture(e[0],e[1]):s===Cn.PACKED_2X2_FLOAT16?a=this.gpgpu.createFloat16PackedMatrixTexture(e[0],e[1]):s===Cn.UNPACKED_FLOAT32?a=this.gpgpu.createFloat32MatrixTexture(e[0],e[1]):s===Cn.UNPACKED_FLOAT16?a=this.gpgpu.createFloat16MatrixTexture(e[0],e[1]):s===Cn.PACKED_4X1_UNSIGNED_BYTE&&(a=this.gpgpu.createUnsignedBytesMatrixTexture(e[0],e[1])),this.usedTextures[i].push(a),this.numUsedTextures++,this._numBytesAllocated+=o,this.log(),a}releaseTexture(e,t,n,s){if(this.freeTextures==null)return;const i=SC(n,s),o=IC(t,i,s);o in this.freeTextures||(this.freeTextures[o]=[]);const a=LC(t,i,this.gpgpu.gl,this.gpgpu.textureConfig,s),c=oe().get("WEBGL_DELETE_TEXTURE_THRESHOLD");c!==-1&&this._numBytesAllocated>c?(this.gpgpu.deleteMatrixTexture(e),this._numBytesAllocated-=a):(this.freeTextures[o].push(e),this.numFreeTextures++,this._numBytesFree+=a),this.numUsedTextures--;const h=this.usedTextures[o],d=h.indexOf(e);if(d<0)throw new Error("Cannot release a texture that was never provided by this texture manager");h.splice(d,1),this.log()}log(){if(!this.logEnabled)return;const e=this.numFreeTextures+this.numUsedTextures;console.log("Free/Used",`${this.numFreeTextures} / ${this.numUsedTextures}`,`(${e})`);const t=this._numBytesFree/this._numBytesAllocated;console.log(`Bytes allocated: ${this._numBytesAllocated}`),console.log(`Bytes unused: ${this._numBytesFree} (${Math.round(100*t)}%)`)}get numBytesAllocated(){return this._numBytesAllocated}get numBytesFree(){return this._numBytesFree}getNumUsedTextures(){return this.numUsedTextures}getNumFreeTextures(){return this.numFreeTextures}dispose(){if(this.freeTextures==null)return;for(const e in this.freeTextures)this.freeTextures[e].forEach(t=>{this.gpgpu.deleteMatrixTexture(t)});for(const e in this.usedTextures)this.usedTextures[e].forEach(t=>{this.gpgpu.deleteMatrixTexture(t)});this.freeTextures=null,this.usedTextures=null,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0}}function $6(e,t){const n=e;if(t===n.R32F)return 4;if(t===n.R16F)return 2;if(t===n.RGBA32F)return 16;if(t===e.RGBA)return 16;if(t===n.RGBA16F)return 8;throw new Error(`Unknown internal format ${t}`)}function LC(e,t,n,s,i){const o=U6(t,s);let a;if(i){const[h,d]=pc(e[0],e[1]);a=h*d}else{const[h,d]=hu(e[0],e[1]);a=h*d}const c=$6(n,o);return a*c}function U6(e,t){switch(e){case Cn.PACKED_2X2_FLOAT32:return mC(t);case Cn.PACKED_2X2_FLOAT16:return fC(t);case Cn.UNPACKED_FLOAT32:return uC(t);case Cn.UNPACKED_FLOAT16:return dC(t);case Cn.PACKED_4X1_UNSIGNED_BYTE:return pC(t);default:throw new Error(`Unknown physical texture type ${e}`)}}function B6(e){return oe().getBool("WEBGL_RENDER_FLOAT32_ENABLED")?e?Cn.PACKED_2X2_FLOAT32:Cn.UNPACKED_FLOAT32:e?Cn.PACKED_2X2_FLOAT16:Cn.UNPACKED_FLOAT16}function SC(e,t){if(e===Ns.UPLOAD)return Cn.PACKED_2X2_FLOAT32;if(e===Ns.RENDER||e==null)return B6(t);if(e===Ns.DOWNLOAD||e===Ns.PIXELS)return Cn.PACKED_4X1_UNSIGNED_BYTE;throw new Error(`Unknown logical texture type ${e}`)}function IC(e,t,n){return`${e[0]}_${e[1]}_${t}_${n}`}class M6{constructor(e,t){this.variableNames=["A"];const n=new Array(e.length);for(let o=0;o5)throw Error(`Tile for rank ${t} is not yet supported`);if(t===1)return`imod(resRC, ${e[0]})`;const n=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u"],s=[];for(let i=0;i5)throw Error(`Tile for rank ${t} is not yet supported`);if(t===1)return`imod(resRC, ${e[0]})`;const n=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u"],s=[];for(let i=0;i= 0.0) ? scale * x : scaleAlpha * (exp(x) - 1.0); -`;function O8(e=0){return or+` +`;function G6(e=0){return hr+` return x > 0.0 ? 1.0 : float(${e}); - `}const aC="return -x;",cC="return ceil(x);",lC="return floor(x);",E8=` + `}const NC="return -x;",CC="return ceil(x);",RC="return floor(x);",Y6=` if (isnan(x)) { return 0.0; } return sign(x); -`,D8="return float(isnan(x));",k8="return float(isinf(x));",F8="return float(!isnan(x) && !isinf(x));",_8=` +`,H6="return float(isnan(x));",q6="return float(isinf(x));",j6="return float(!isnan(x) && !isinf(x));",K6=` // OpenGL ES does not support round function. // The algorithm is based on banker's rounding. float base = floor(x); @@ -3524,8 +3434,8 @@ return (round(mod(b, 2.0)) != 1) ? return base + 1.0; } } -`,hC="return exp(x);",uC="return exp(x) - 1.0;",W8=`if (x < 0.0) return NAN; - return log(x);`,$8="return log(1.0 + x);",U8="return sqrt(x);",B8="return inversesqrt(x);",M8="return 1.0 / (1.0 + exp(-1.0 * x));",P8=` +`,OC="return exp(x);",EC="return exp(x) - 1.0;",X6=`if (x < 0.0) return NAN; + return log(x);`,J6="return log(1.0 + x);",Z6="return sqrt(x);",Q6="return inversesqrt(x);",eX="return 1.0 / (1.0 + exp(-1.0 * x));",tX=` float epsilon = 1.1920928955078125e-7; float threshold = log(epsilon) + 2.0; @@ -3545,47 +3455,47 @@ return (round(mod(b, 2.0)) != 1) ? result = log(exp_x + 1.0); } return result; -`,z8=or+` +`,nX=hr+` if (abs(x) > 1.) { return NAN; } return asin(x); -`,G8=or+` +`,sX=hr+` if (abs(x) > 1.) { return NAN; } return acos(x); -`,V8=or+` +`,iX=hr+` return atan(x); -`,Y8=` +`,rX=` float e2x = exp(x); return (e2x - 1.0 / e2x) / 2.0; -`,H8=` +`,oX=` float e2x = exp(-x); return (e2x + 1.0 / e2x) / 2.0; -`,q8=` +`,aX=` float e2x = exp(-2.0 * abs(x)); return sign(x) * (1.0 - e2x) / (1.0 + e2x); -`,j8=or+"return log(x + sqrt(x * x + 1.0));",K8=or+` +`,cX=hr+"return log(x + sqrt(x * x + 1.0));",lX=hr+` if (x < 1.0) return NAN; - return log(x + sqrt(x * x - 1.0));`,X8=or+` + return log(x + sqrt(x * x - 1.0));`,hX=hr+` if ((x < -1.0) || (x > 1.0)) return NAN; - return (log(1.0 + x) - log(1.0 - x)) / 2.0;`,J8=` + return (log(1.0 + x) - log(1.0 - x)) / 2.0;`,uX=` // Error function is calculated approximately with elementary function. // See "Handbook of Mathematical Functions with Formulas, // Graphs, and Mathematical Tables", Abramowitz and Stegun. - float p = ${Eb}; - float a1 = ${Db}; - float a2 = ${kb}; - float a3 = ${Fb}; - float a4 = ${_b}; - float a5 = ${Wb}; + float p = ${ew}; + float a1 = ${tw}; + float a2 = ${nw}; + float a3 = ${sw}; + float a4 = ${iw}; + float a5 = ${rw}; float sign = sign(x); x = abs(x); float t = 1.0 / (1.0 + p * x); return sign * (1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x)); -`,Z8="return 1.0 / x;",Q8="return float(!(x >= 1.0));",e6="return float(int(x));",im="return x;";const t6="return x;",n6=` +`,dX="return 1.0 / x;",pX="return float(!(x >= 1.0));",Fm="return x;";const mX="return x;",fX=` vec4 result = log(x); vec4 isNaN = vec4(lessThan(x, vec4(0.0))); result.r = isNaN.r == 1.0 ? NAN : result.r; @@ -3594,7 +3504,7 @@ return (round(mod(b, 2.0)) != 1) ? result.a = isNaN.a == 1.0 ? NAN : result.a; return result; -`,dC=` +`,DC=` vec4 result = x * vec4(greaterThanEqual(x, vec4(0.0))); bvec4 isNaN = isnan(x); @@ -3604,7 +3514,7 @@ return (round(mod(b, 2.0)) != 1) ? result.a = isNaN.a ? x.a : result.a; return result; -`,pC=` +`,kC=` vec4 result = min(x, vec4(6.)) * vec4(greaterThanEqual(x, vec4(0.0))); bvec4 isNaN = isnan(x); @@ -3614,7 +3524,7 @@ return (round(mod(b, 2.0)) != 1) ? result.a = isNaN.a ? x.a : result.a; return result; -`,mC=` +`,FC=` vec4 result; result.r = (x.r >= 0.0) ? x.r : (exp(x.r) - 1.0); @@ -3623,7 +3533,7 @@ return (round(mod(b, 2.0)) != 1) ? result.a = (x.a >= 0.0) ? x.a : (exp(x.a) - 1.0); return result; -`;class Qh{constructor(e,t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.userCode=` +`;class fu{constructor(e,t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.userCode=` vec4 unaryOperation(vec4 x) { ${t} } @@ -3634,29 +3544,29 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(y); } - `}}class s6{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outputShape=e;const t=e.length,n=cs("rc",t),s=Et(t),i=Pj(t,n),o=n.slice(-2),a=t<=1?"rc":`vec2(${o.join(",")})`;this.userCode=` + `}}class gX{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outputShape=e;const t=e.length,n=Mn("rc",t),s=Rt(t),i=r5(t,n),o=n.slice(-2),a=t<=1?"rc":`vec2(${o.join(",")})`;this.userCode=` void main() { ${s} rc = getOutputCoords(); vec4 packedInput = getA(${i}); setOutput(getChannel(packedInput, ${a})); } - `}}const{segment_util:fC}=Bb,i6=Mb,r6=Pb,o6=zb,a6=jd,c6=1e-7,l6=1e-4,rm={};function h6(e){return e in rm||(rm[e]={}),rm[e]}function om(e,t=!1){if(e==="linear")return t?t6:C8;if(e==="relu")return t?dC:iC;if(e==="elu")return t?mC:oC;if(e==="relu6")return t?pC:rC;if(e==="prelu")return t?W0:_0;throw new Error(`Activation ${e} has not been implemented for the WebGL backend.`)}const u6=128,d6=600;function p6(){return C().global.screen==null?1024:C().global.screen.height*C().global.screen.width*window.devicePixelRatio*d6/1024/1024}const gC=1e3;class m6 extends g{constructor(e){super();if(this.pendingRead=new WeakMap,this.pendingDisposal=new WeakSet,this.dataRefCount=new WeakMap,this.numBytesInGPU=0,this.uploadWaitMs=0,this.downloadWaitMs=0,this.warnedAboutMemory=!1,this.warnedAboutCPUBackend=!1,this.pendingDeletes=0,this.disposed=!1,!C().getBool("HAS_WEBGL"))throw new Error("WebGL is not supported on this device");if(e==null){const t=_i(C().getNumber("WEBGL_VERSION"));this.binaryCache=h6(C().getNumber("WEBGL_VERSION")),this.gpgpu=new z5(t),this.canvas=t.canvas,this.gpgpuCreatedLocally=!0}else this.gpgpu=e,this.binaryCache={},this.gpgpuCreatedLocally=!1,this.canvas=e.gl.canvas;this.textureManager=new I8(this.gpgpu),this.numMBBeforeWarning=p6(),this.texData=new p(this,Fs())}numDataIds(){return this.texData.numDataIds()+(this.cpuBackend?this.cpuBackend.numDataIds():0)-this.pendingDeletes}write(e,t,n){if((C().getBool("WEBGL_CHECK_NUMERICAL_PROBLEMS")||C().getBool("DEBUG"))&&this.checkNumericalProblems(e),n==="complex64"&&e!=null)throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");const s={};return this.texData.set(s,{shape:t,dtype:n,values:e,usage:As.UPLOAD,refCount:1}),s}incRef(e){const t=this.texData.get(e);t.refCount++}decRef(e){if(this.texData.has(e)){const t=this.texData.get(e);t.refCount--}}move(e,t,n,s){if(C().getBool("DEBUG")&&this.checkNumericalProblems(t),s==="complex64")throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");this.texData.set(e,{shape:n,dtype:s,values:t,usage:As.UPLOAD,refCount:1})}disposeIntermediateTensorInfo(e){const t=e.dataId;if(this.texData.has(t)){const n=this.texData.get(t);n.refCount--,n.refCount<1&&this.disposeData(t)}}readSync(e){const t=this.texData.get(e),{values:n,dtype:s,complexTensors:i,slice:o,shape:a,isPacked:c}=t;if(o!=null){let y;c?y=new Qh(a,im):y=new st(a,im);const b=this.runWebGLProgram(y,[{dataId:e,shape:a,dtype:s}],s),w=this.readSync(b.dataId);return this.disposeIntermediateTensorInfo(b),w}if(n!=null)return this.convertAndCacheOnCPU(e);if(s==="string")return n;const h=this.activeTimers!=null;let d;h&&(d=Vn());let m;if(s==="complex64"){const y=i.real.dataSync(),b=i.imag.dataSync();m=Zi(y,b)}else m=this.getValuesFromTexture(e);return h&&(this.downloadWaitMs+=Vn()-d),this.convertAndCacheOnCPU(e,m)}async read(e){if(this.pendingRead.has(e)){const w=this.pendingRead.get(e);return new Promise(L=>w.push(L))}const t=this.texData.get(e),{values:n,shape:s,slice:i,dtype:o,complexTensors:a,isPacked:c}=t;if(i!=null){let w;c?w=new Qh(s,im):w=new st(s,im);const L=this.runWebGLProgram(w,[{dataId:e,shape:s,dtype:o}],o),T=this.read(L.dataId);return this.disposeIntermediateTensorInfo(L),T}if(n!=null)return this.convertAndCacheOnCPU(e);if(!C().getBool("WEBGL_DOWNLOAD_FLOAT_ENABLED")&&C().getNumber("WEBGL_VERSION")===2)throw new Error("tensor.data() with WEBGL_DOWNLOAD_FLOAT_ENABLED=false and WEBGL_VERSION=2 not yet supported.");let h=null,d;if(o!=="complex64"&&C().get("WEBGL_BUFFER_SUPPORTED")){d=this.decode(e);const w=this.texData.get(d.dataId);h=this.gpgpu.createBufferFromTexture(w.texture,...Kh(s))}this.pendingRead.set(e,[]),o!=="complex64"&&await this.gpgpu.createAndWaitForFence();let m;if(o==="complex64"){const w=await Promise.all([a.real.data(),a.imag.data()]),L=w[0],T=w[1];m=Zi(L,T)}else if(h==null)m=this.getValuesFromTexture(e);else{const w=we(s);m=this.gpgpu.downloadFloat32MatrixFromBuffer(h,w)}d!=null&&this.disposeIntermediateTensorInfo(d);const y=this.convertAndCacheOnCPU(e,m),b=this.pendingRead.get(e);return this.pendingRead.delete(e),b.forEach(w=>w(y)),this.pendingDisposal.has(e)&&(this.pendingDisposal.delete(e),this.disposeData(e),this.pendingDeletes--),y}checkNumericalProblems(e){if(e==null)return;for(let t=0;tc.query)).filter(c=>c!=null),o=Yi(this.activeTimers.map(c=>c.name)).filter(c=>c!=null);this.activeTimers=t,s&&(this.programTimersStack=null);const a={uploadWaitMs:this.uploadWaitMs,downloadWaitMs:this.downloadWaitMs,kernelMs:null,wallMs:null};if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0){const c=await Promise.all(i);a.kernelMs=Ox(c),a.getExtraProfileInfo=()=>c.map((h,d)=>({name:o[d],ms:h})).map(h=>`${h.name}: ${h.ms}`).join(", ")}else a.kernelMs={error:"WebGL query timers are not supported in this environment."};return this.uploadWaitMs=0,this.downloadWaitMs=0,a}memory(){return{unreliable:!1,numBytesInGPU:this.numBytesInGPU,numBytesInGPUAllocated:this.textureManager.numBytesAllocated,numBytesInGPUFree:this.textureManager.numBytesFree}}startTimer(){return C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?this.gpgpu.beginQuery():{startMs:Vn(),endMs:null}}endTimer(e){return C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?(this.gpgpu.endQuery(),e):(e.endMs=Vn(),e)}async getQueryTime(e){if(C().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0)return this.gpgpu.waitForQueryAndGetTime(e);const t=e;return t.endMs-t.startMs}disposeData(e){if(this.pendingDisposal.has(e))return;if(this.pendingRead.has(e)){this.pendingDisposal.add(e),this.pendingDeletes++;return}if(!this.texData.has(e))return;this.releaseGPUData(e);const{complexTensors:t}=this.texData.get(e);t!=null&&(t.real.dispose(),t.imag.dispose()),this.texData.delete(e)}releaseGPUData(e){const{texture:t,dtype:n,texShape:s,usage:i,isPacked:o,slice:a}=this.texData.get(e),c=a&&a.origDataId||e,h=this.dataRefCount.get(c);h>1?this.dataRefCount.set(c,h-1):(this.dataRefCount.delete(c),t!=null&&(this.numBytesInGPU-=this.computeBytes(s,n),this.textureManager.releaseTexture(t,s,i,o)));const d=this.texData.get(e);d.texture=null,d.texShape=null,d.isPacked=!1,d.slice=null}getTexture(e){return this.uploadToGPU(e),this.texData.get(e).texture}getDataInfo(e){return this.texData.get(e)}getCPUBackend(){return C().getBool("WEBGL_CPU_FORWARD")?(this.cpuBackend==null&&(this.cpuBackend=Fs().findBackend("cpu")),this.cpuBackend):null}shouldExecuteOnCPU(e,t=u6){const n=this.getCPUBackend();return!this.warnedAboutCPUBackend&&n==null&&(console.warn("Your application contains ops that are small enough to be executed on the CPU backend, however the CPU backend cannot be found. Consider importing the CPU backend (@tensorflow/tfjs-backend-cpu) for better performance."),this.warnedAboutCPUBackend=!0),n!=null&&e.every(s=>this.texData.get(s.dataId).texture==null&&we(s.shape)this.cpuBackend.stridedSlice(e,t,n,s));if(i)return i;const o=bd(t,n,s);if(o.some(c=>c===0))return en([],o);const a=new S8(t,s,o);return this.compileAndRun(a,[e])}reverse(e,t){const n=C().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new f8(e.shape,t):new m8(e.shape,t);return this.compileAndRun(n,[e])}concat(e,t){if(e[0].dtype==="complex64"){const a=e.map(h=>xo(h)),c=e.map(h=>Ea(h));return xi(this.concat(a,t),this.concat(c,t))}if(e.length===1)return e[0];if(e.length>C().getNumber("WEBGL_MAX_TEXTURES_IN_SHADER")){const a=Math.floor(e.length/2),c=this.concat(e.slice(0,a),t),h=this.concat(e.slice(a),t);return this.concat([c,h],t)}if(C().getBool("WEBGL_PACK_ARRAY_OPERATIONS")&&e[0].rank>1){const a=new r5(e.map(c=>c.shape),t);return this.compileAndRun(a,e)}const n=Or(e.map(a=>a.shape),t),s=e.map(a=>a.as2D(-1,we(a.shape.slice(t)))),i=new i5(s.map(a=>a.shape)),o=this.compileAndRun(i,s);return o.reshape(n)}neg(e){const t=this.tryRunOnCpuOrThrow([e],()=>this.cpuBackend.neg(e));if(t)return t;if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,aC,e.dtype);const n=new st(e.shape,aC);return this.compileAndRun(n,[e])}batchMatMul(e,t,n,s){const i=n?e.shape[2]:e.shape[1],o=s?t.shape[1]:t.shape[2],a=n?e.shape[1]:e.shape[2],[c,,]=e.shape;if((i===1||o===1)&&a>gC){n&&(e=Me(e,[0,2,1])),s&&(t=Me(t,[0,2,1]));const m=o===1?e:e.as3D(c,a,1),y=o===1?2:1,b=o===1?t.as3D(c,1,a):t;return this.multiply(m,b).sum(y,!0)}const h=vn(e.dtype,t.dtype),d=new tS(e.shape,[c,i,o],n,s);return this.compileAndRun(d,[e,t],h)}fusedBatchMatMul({a:e,b:t,transposeA:n,transposeB:s,bias:i,activation:o,preluActivationWeights:a}){const c=n?e.shape[2]:e.shape[1],h=s?t.shape[1]:t.shape[2],[d,,]=e.shape,m=vn(e.dtype,t.dtype),y=i!=null,b=a!=null,w=o?om(o,!0):null,L=new tS(e.shape,[d,c,h],n,s,y,w,b),T=[e,t];return i&&T.push(i),a&&T.push(a),this.compileAndRun(L,T,m)}multiply(e,t){if(e.dtype==="complex64"){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),a=new D0(E0.REAL,e.shape,t.shape),c=new D0(E0.IMAG,e.shape,t.shape),h=[this.makeComplexComponentTensorInfo(e,i.complexTensors.real),this.makeComplexComponentTensorInfo(e,i.complexTensors.imag),this.makeComplexComponentTensorInfo(t,o.complexTensors.real),this.makeComplexComponentTensorInfo(t,o.complexTensors.imag)],d=this.compileAndRun(a,h),m=this.compileAndRun(c,h),y=this.complex(d,m);return d.dispose(),m.dispose(),y}const n=vn(e.dtype,t.dtype);if(this.shouldExecuteOnCPU([e,t])){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),[a,c]=kj(e.shape,t.shape,i.values,o.values,n);return this.makeOutput(c,n,a)}if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,F0,e.dtype);const s=new hn(F0,e.shape,t.shape);return this.compileAndRun(s,[e,t],e.dtype)}localResponseNormalization4D(e,t,n,s,i){const o=C().getBool("WEBGL_PACK_NORMALIZATION")?new X5(e.shape,t,n,s,i):new j5(e.shape,t,n,s,i);return this.compileAndRun(o,[e])}LRNGrad(e,t,n,s,i,o,a){const c=new K5(t.shape,s,i,o,a);return this.compileAndRun(c,[t,n,e])}tile(e,t){if(e.dtype==="string"){const s=this.readSync(e.dataId),i=s.map(a=>Dl(a)),o=Ze(e.shape,e.dtype,i);return r6(o,t)}const n=new v8(e.shape,t);return this.compileAndRun(n,[e])}pad(e,t,n){const s=C().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new a8(e.shape,t,n):new o8(e.shape,t,n);return this.compileAndRun(s,[e])}gather(e,t,n){const s=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.gather(e,t,n));if(s)return s;const i=new x5(e.shape,t.size,n);return this.compileAndRun(i,[e,t])}batchToSpaceND(e,t,n){k(e.rank<=4,()=>"batchToSpaceND for rank > 4 with a WebGL backend not implemented yet");const s=t.reduce((d,m)=>d*m),i=fh(e.shape,t,s),o=gh(i.length,t.length),a=yh(e.shape,t,s),c=Rb(n,t.length),h=Ob(a,n,t.length);return Me(e.reshape(i),o).reshape(a).slice(c,h)}spaceToBatchND(e,t,n){k(e.rank<=4,()=>"spaceToBatchND for rank > 4 with a WebGL backend not implemented yet");const s=t.reduce((m,y)=>m*y),i=[[0,0]];i.push(...n);for(let m=1+t.length;mthis.cpuBackend.prod(e,t));if(n)return n;const[s,i]=Cn(e.shape,t),o=we(i),a=e.as2D(-1,o),c=cd(e.dtype);return this.reduce(a,"prod",c).reshape(s)}unsortedSegmentSum(e,t,n){let s=0;const i=kn([s],e.rank);let o=e;i!=null&&(o=Me(e,i),s=ws(1,e.rank)[0]);const a=fC.computeOutShape(o.shape,s,n),c=we([o.shape[s]]),h=o.as2D(-1,c),d=cd(e.dtype);let m=this.segOpCompute(h,"unsortedSegmentSum",t,d,n).reshape(a);return i!=null&&(m=Me(m,Ml(i))),m}segOpCompute(e,t,n,s,i){const o=e.shape[0],a=e.shape[1],c=fC.segOpComputeOptimalWindowSize(a,i),h={windowSize:c,inSize:a,batchSize:o,numSegments:i},d=new g8(h,t),m=this.compileAndRun(d,[e,n],s);return m.shape[1]===i?m:(n=sh(0,i).tile([a/c]),this.segOpCompute(m,t,n,s,i))}argMinMaxReduce(e,t,n){const s=[t];if(es("arg"+n.charAt(0).toUpperCase()+n.slice(1),s,e.rank),!C().getBool("WEBGL_PACK_REDUCE")||e.rank<=2){const[i,o]=Cn(e.shape,s),a=we(o),c=e.as2D(-1,a);return this.argReduce(c,n).reshape(i)}return this.argReducePacked(e,n)}argMin(e,t){return this.argMinMaxReduce(e,t,"min")}argMax(e,t){return this.argMinMaxReduce(e,t,"max")}cumsum(e,t,n,s){if(t!==e.rank-1)throw new Error(`WebGL cumsum shader expects an inner-most axis=${e.rank-1} but got axis=${t}`);const i=e.shape[t];let o=e;for(let a=0;a<=Math.ceil(Math.log2(i))-1;a++){const c=new M0(e.shape,!1,s),h=c.getCustomSetupFunc(a),d=o;o=this.compileAndRun(c,[o],o.dtype,h),d.dispose()}if(n){const a=new M0(e.shape,n,s),c=o;o=this.compileAndRun(a,[o]),c.dispose()}return o}equal(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,VK,"bool");const n=new hn(RK,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}notEqual(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,YK,"bool");const n=new hn(OK,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}less(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.less(e,t));if(n)return n;if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,HK,"bool");const s=new hn(EK,e.shape,t.shape);return this.compileAndRun(s,[e,t],"bool")}lessEqual(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,qK,"bool");const n=new hn(DK,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}greater(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.greater(e,t));if(n)return n;if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,jK,"bool");const s=new hn(kK,e.shape,t.shape);return this.compileAndRun(s,[e,t],"bool")}greaterEqual(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,KK,"bool");const n=new hn(FK,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}logicalNot(e){const t=new st(e.shape,Q8);return this.compileAndRun(t,[e])}logicalAnd(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,XK,"bool");const n=new hn(_K,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}logicalOr(e,t){if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,JK,"bool");const n=new hn(WK,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}select(e,t,n){const s=new y8(e.rank,t.shape,t.rank);return this.compileAndRun(s,[e,t,n],vn(t.dtype,n.dtype))}where(e){Pa("tf.where() in webgl locks the UI thread. Call tf.whereAsync() instead");const t=e.dataSync();return a6(e.shape,t)}topk(e,t,n){const s=e.dataSync();return o6(s,e.shape,e.dtype,t,n)}min(e,t){es("min",t,e.rank);const[n,s]=Cn(e.shape,t),i=we(s),o=e.as2D(-1,i);return this.reduce(o,"min",o.dtype).reshape(n)}minimum(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.minimum(e,t));if(n)return n;const s=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new qr(QK,e.shape,t.shape):new hn(UK,e.shape,t.shape);return this.compileAndRun(s,[e,t])}mod(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new qr(e5,e.shape,t.shape):new hn(BK,e.shape,t.shape);return this.compileAndRun(n,[e,t])}maximum(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.maximum(e,t));if(n)return n;const s=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new qr(ZK,e.shape,t.shape):new hn($K,e.shape,t.shape);return this.compileAndRun(s,[e,t])}all(e,t){es("all",t,e.rank);const[n,s]=Cn(e.shape,t),i=we(s),o=e.as2D(-1,i);return this.reduce(o,"all",o.dtype).reshape(n)}any(e,t){es("any",t,e.rank);const[n,s]=Cn(e.shape,t),i=we(s),o=e.as2D(-1,i);return this.reduce(o,"any",o.dtype).reshape(n)}floorDiv(e,t){const n=NK,s="int32";if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,PK,s);const i=new hn(n,e.shape,t.shape);return this.compileAndRun(i,[e,t],s)}add(e,t){if(e.dtype==="complex64"&&t.dtype==="complex64")return this.complexSeparableBinaryOp(e,t,QL);const n=vn(e.dtype,t.dtype);if(this.shouldExecuteOnCPU([e,t])){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),[a,c]=vj(e.shape,t.shape,i.values,o.values,n);return this.makeOutput(c,n,a)}if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,QL,n);const s=new hn(QL,e.shape,t.shape);return this.compileAndRun(s,[e,t],n)}packedUnaryOp(e,t,n){const s=new Qh(e.shape,t);return this.compileAndRun(s,[e],n)}packedBinaryOp(e,t,n,s,i=!1){const o=new qr(n,e.shape,t.shape,i);return this.compileAndRun(o,[e,t],s)}complexSeparableBinaryOp(e,t,n){const s=this.texData.get(e.dataId),i=this.texData.get(t.dataId),[o,a]=[[s.complexTensors.real,i.complexTensors.real],[s.complexTensors.imag,i.complexTensors.imag]].map(h=>{const[d,m]=h,y=this.makeComplexComponentTensorInfo(e,d),b=this.makeComplexComponentTensorInfo(t,m),w=new hn(n,e.shape,t.shape);return this.compileAndRun(w,[y,b],vn(d.dtype,m.dtype))}),c=this.complex(o,a);return o.dispose(),a.dispose(),c}makeComplexComponentTensorInfo(e,t){return{dataId:t.dataId,dtype:t.dtype,shape:e.shape}}addN(e){if(e.length===1)return e[0];if(e.length>C().get("WEBGL_MAX_TEXTURES_IN_SHADER")){const o=Math.floor(e.length/2),a=this.addN(e.slice(0,o)),c=this.addN(e.slice(o));return this.addN([a,c])}const t=e.map(o=>o.dtype).reduce((o,a)=>vn(o,a)),n=e.map(o=>o.shape),s=C().getBool("WEBGL_PACK"),i=s?new Bj(e[0].shape,n):new Uj(e[0].shape,n);return this.compileAndRun(i,e,t)}subtract(e,t){if(e.dtype==="complex64"&&t.dtype==="complex64")return this.complexSeparableBinaryOp(e,t,eS);const n=vn(e.dtype,t.dtype);if(this.shouldExecuteOnCPU([e,t])){const i=this.texData.get(e.dataId),o=this.texData.get(t.dataId),[a,c]=Wj(e.shape,t.shape,i.values,o.values,n);return this.makeOutput(c,n,a)}if(C().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,eS,e.dtype);const s=new hn(eS,e.shape,t.shape);return this.compileAndRun(s,[e,t],n)}pow(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS"),s=n?new qr(zK,e.shape,t.shape):new hn(CK,e.shape,t.shape),i=vn(e.dtype,t.dtype);return this.compileAndRun(s,[e,t],i)}ceil(e){if(this.shouldExecuteOnCPU([e])){const n=Nj(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,cC,e.dtype);const t=new st(e.shape,cC);return this.compileAndRun(t,[e])}floor(e){if(this.shouldExecuteOnCPU([e])){const n=Oj(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,lC,e.dtype);const t=new st(e.shape,lC);return this.compileAndRun(t,[e])}sign(e){const t=new st(e.shape,E8);return this.compileAndRun(t,[e])}isNaN(e){const t=new st(e.shape,D8);return this.compileAndRun(t,[e],"bool")}isInf(e){const t=new st(e.shape,k8);return this.compileAndRun(t,[e],"bool")}isFinite(e){const t=new st(e.shape,F8);return this.compileAndRun(t,[e],"bool")}round(e){const t=new st(e.shape,_8);return this.compileAndRun(t,[e])}exp(e){if(this.shouldExecuteOnCPU([e])){const n=Cj(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,hC,e.dtype);const t=new st(e.shape,hC);return this.compileAndRun(t,[e])}expm1(e){if(this.shouldExecuteOnCPU([e])){const n=Rj(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,uC,e.dtype);const t=new st(e.shape,uC);return this.compileAndRun(t,[e])}softmax(e,t){const n=ft([t],e.shape),s=qn(e,n),i=Rn(s.shape,n),o=this.subtract(e,s.reshape(i)),a=this.exp(o),c=this.sum(a,n).reshape(i);return _e(a,c)}log(e){if(this.shouldExecuteOnCPU([e])){const n=Ej(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,n6,e.dtype);const t=new st(e.shape,W8);return this.compileAndRun(t,[e])}log1p(e){const t=new st(e.shape,$8);return this.compileAndRun(t,[e])}sqrt(e){const t=new st(e.shape,U8);return this.compileAndRun(t,[e])}rsqrt(e){if(this.shouldExecuteOnCPU([e])){const n=Fj(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}const t=new st(e.shape,B8);return this.compileAndRun(t,[e])}reciprocal(e){const t=new st(e.shape,Z8);return this.compileAndRun(t,[e])}relu(e){let t;return C().getBool("WEBGL_PACK")?t=new Qh(e.shape,dC):t=new st(e.shape,iC),this.compileAndRun(t,[e])}relu6(e){let t;return C().getBool("WEBGL_PACK")?t=new Qh(e.shape,pC):t=new st(e.shape,rC),this.compileAndRun(t,[e])}prelu(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new qr(W0,e.shape,t.shape):new hn(_0,e.shape,t.shape);return this.compileAndRun(n,[e,t])}elu(e){if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,mC,e.dtype);const t=new st(e.shape,oC);return this.compileAndRun(t,[e])}eluDer(e,t){const n=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new qr(GK,e.shape,t.shape):new hn(MK,e.shape,t.shape);return this.compileAndRun(n,[e,t])}selu(e){const t=new st(e.shape,R8);return this.compileAndRun(t,[e])}int(e){const t=new st(e.shape,e6);return this.compileAndRun(t,[e],"int32")}clip(e,t,n){let s;C().getBool("WEBGL_PACK_CLIP")?s=new n5(e.shape):s=new t5(e.shape);const i=s.getCustomSetupFunc(t,n);return this.compileAndRun(s,[e],null,i)}abs(e){if(this.shouldExecuteOnCPU([e])&&e.dtype!=="complex64"){const n=Aj(this.texData.get(e.dataId).values);return this.makeOutput(e.shape,e.dtype,n)}if(C().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,sC,e.dtype);const t=new st(e.shape,sC);return this.compileAndRun(t,[e])}complexAbs(e){const t=this.texData.get(e.dataId),n=new s5(e.shape),s=[this.makeComplexComponentTensorInfo(e,t.complexTensors.real),this.makeComplexComponentTensorInfo(e,t.complexTensors.imag)];return this.compileAndRun(n,s)}sigmoid(e){const t=new st(e.shape,M8);return this.compileAndRun(t,[e])}softplus(e){const t=new st(e.shape,P8);return this.compileAndRun(t,[e])}asin(e){const t=new st(e.shape,z8);return this.compileAndRun(t,[e])}acos(e){const t=new st(e.shape,G8);return this.compileAndRun(t,[e])}atan(e){const t=new st(e.shape,V8);return this.compileAndRun(t,[e])}sinh(e){const t=new st(e.shape,Y8);return this.compileAndRun(t,[e])}cosh(e){const t=new st(e.shape,H8);return this.compileAndRun(t,[e])}tanh(e){const t=new st(e.shape,q8);return this.compileAndRun(t,[e])}asinh(e){const t=new st(e.shape,j8);return this.compileAndRun(t,[e])}acosh(e){const t=new st(e.shape,K8);return this.compileAndRun(t,[e])}atanh(e){const t=new st(e.shape,X8);return this.compileAndRun(t,[e])}erf(e){const t=new st(e.shape,J8);return this.compileAndRun(t,[e])}step(e,t){const n=new st(e.shape,O8(t));return this.compileAndRun(n,[e])}conv2dByMatMul(e,t,n,s,i,o){const a=e.shape,c=this.texData.get(e.dataId),h=n.inChannels,d=a[0]*a[1]*a[2],m=n.outChannels,y=n.dataFormat==="channelsLast",b=!1,w=!1,L=(d===1||m===1)&&h>gC,T=a[2]%2!==0&&!!c.isPacked;if(L||!C().getBool("WEBGL_LAZILY_UNPACK")||!C().getBool("WEBGL_PACK_BINARY_OPERATIONS")||!T){const B=y?a[0]*a[1]*a[2]:a[0]*a[2]*a[3],$=K(e,[1,B,n.inChannels]),H=K(t,[1,n.inChannels,n.outChannels]),q=this.fusedBatchMatMul({a:$,b:H,transposeA:b,transposeB:w,bias:s,activation:i,preluActivationWeights:o});return K(q,n.outShape)}const A=y?a[0]*a[1]*(a[2]+1):a[0]*a[2]*(a[3]+1),N={dataId:e.dataId,shape:[1,A,n.inChannels],dtype:e.dtype},E=c.shape;c.shape=c.shape.slice(),c.shape[c.shape.length-2]++,k(Zp(c.shape,N.shape),()=>`packed reshape ${c.shape} to ${N.shape} isn't free`);const D=K(t,[1,n.inChannels,n.outChannels]),F=this.fusedBatchMatMul({a:N,b:D,transposeA:b,transposeB:w,bias:s,activation:i,preluActivationWeights:o}),_=this.texData.get(F.dataId);return k(_.isPacked,()=>"batchMatMul result is expected to be packed"),c.shape=E,_.shape=n.outShape,Fs().makeTensorFromDataId(F.dataId,n.outShape,F.dtype)}conv2dWithIm2Row(e,t,n,s,i,o){const{filterWidth:a,filterHeight:c,inChannels:h,outWidth:d,outHeight:m,dataFormat:y}=n,b=y==="channelsLast",w=a*c*h,L=m*d,T=[w,L],A=!0,N=!1,E=e.squeeze([0]),D=t.reshape([1,w,-1]),F=new q5(T,E.shape,n),_=this.compileAndRun(F,[E]).reshape([1,T[0],T[1]]),B=s!=null,$=o!=null,H=i?om(i,!0):null,q=new tS(_.shape,[1,L,n.outChannels],A,N,B,H,$),J=[_,D];s&&J.push(s),$&&J.push(o);const re=this.compileAndRun(q,J);return b?re.reshape([1,m,d,n.outChannels]):re.reshape([1,n.outChannels,m,d])}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){if(n.filterHeight===1&&n.filterWidth===1&&n.dilationHeight===1&&n.dilationWidth===1&&n.strideHeight===1&&n.strideWidth===1&&(n.padInfo.type==="SAME"||n.padInfo.type==="VALID"))return this.conv2dByMatMul(e,t,n,s,i,o);if(C().getBool("WEBGL_CONV_IM2COL")&&e.shape[0]===1)return this.conv2dWithIm2Row(e,t,n,s,i,o);const a=s!=null,c=o!=null,h=i?om(i,!1):null,d=new $0(n,a,h,c),m=[e,t];return s&&m.push(s),o&&m.push(o),this.compileAndRun(d,m)}conv2d(e,t,n){if(n.filterHeight===1&&n.filterWidth===1&&n.dilationHeight===1&&n.dilationWidth===1&&n.strideHeight===1&&n.strideWidth===1&&(n.padInfo.type==="SAME"||n.padInfo.type==="VALID"))return this.conv2dByMatMul(e,t,n);if(C().getBool("WEBGL_CONV_IM2COL")&&e.shape[0]===1)return this.conv2dWithIm2Row(e,t,n);const s=new $0(n);return this.compileAndRun(s,[e,t])}conv2dDerInput(e,t,n){const s=new a5(n);return this.compileAndRun(s,[e,t])}conv2dDerFilter(e,t,n){const s=new o5(n);return this.compileAndRun(s,[e,t])}fusedDepthwiseConv2D({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){const a=C().getBool("WEBGL_PACK_DEPTHWISECONV")&&n.strideWidth<=2&&n.outChannels/n.inChannels===1,c=i?om(i,a):null,h=[e,t],d=s!=null,m=o!=null;d&&h.push(s),m&&h.push(o);let y;return a?(y=new B0(n,d,c,m),this.compileAndRun(y,h)):(y=new U0(n,d,c,m),this.compileAndRun(y,h))}depthwiseConv2D(e,t,n){let s;return C().getBool("WEBGL_PACK_DEPTHWISECONV")&&n.strideWidth<=2&&n.outChannels/n.inChannels===1?(s=new B0(n),this.compileAndRun(s,[e,t])):(s=new U0(n),this.compileAndRun(s,[e,t]))}depthwiseConv2DDerInput(e,t,n){const s=new u5(n);return this.compileAndRun(s,[e,t])}depthwiseConv2DDerFilter(e,t,n){const s=new h5(n);return this.compileAndRun(s,[e,t])}conv3d(e,t,n){const s=new d5(n);return this.compileAndRun(s,[e,t])}conv3dDerInput(e,t,n){const s=new l5(n);return this.compileAndRun(s,[e,t])}conv3dDerFilter(e,t,n){const s=new c5(n);return this.compileAndRun(s,[e,t])}cast(e,t){return DA(e,t,this)}unstack(e,t){const n=e.shape[t],s=new Array(e.rank-1);let i=0;for(let h=0;h1,()=>`blockSize should be > 1 for depthToSpace, but was: ${t}`);const s=e.shape[0],i=n==="NHWC"?e.shape[1]:e.shape[2],o=n==="NHWC"?e.shape[2]:e.shape[3],a=n==="NHWC"?e.shape[3]:e.shape[1],c=i*t,h=o*t,d=a/(t*t),m=n==="NHWC"?[s,c,h,d]:[s,d,c,h],y=new g5(m,t,n);return this.compileAndRun(y,[e])}split(e,t,n){return i6(e,t,n)}scatterND(e,t,n){const{sliceRank:s,numUpdates:i,sliceSize:o,strides:a,outputSize:c}=va(t,e,n),h=[c/o,o],d=e.reshape([i,s]),m=t.reshape([i,o]);if(c===0)return kA(en([]),n);const y=Ne(0),b=new Q0(i,s,d.rank,m.rank,a,h),w=this.compileAndRun(b,[m,d,y]);return w.reshape(n)}sparseToDense(e,t,n,s){const{sliceRank:i,numUpdates:o,strides:a,outputSize:c}=va(t,e,n),h=!1,d=new Q0(o,i,e.rank,t.rank,a,[c,1],h),m=this.compileAndRun(d,[t,e,s]);return m.reshape(n)}fft(e){const t=!1;return this.fftImpl(e,t)}ifft(e){const t=!0;return this.fftImpl(e,t)}fftImpl(e,t){const n=this.texData.get(e.dataId),s=new V0(G0.REAL,e.shape,t),i=new V0(G0.IMAG,e.shape,t),o=[this.makeComplexComponentTensorInfo(e,n.complexTensors.real),this.makeComplexComponentTensorInfo(e,n.complexTensors.imag)],a=this.compileAndRun(s,o),c=this.compileAndRun(i,o),h=this.complex(a,c).as2D(e.shape[0],e.shape[1]);return a.dispose(),c.dispose(),h}gatherND(e,t){const n=t.shape,s=n[n.length-1],[i,o,a,c]=gd(e,t),h=t.reshape([o,s]),d=e.reshape([e.size/a,a]),m=new A5(s,c,[o,a]),y=this.compileAndRun(m,[d,h]);return y.reshape(i)}fill(e,t,n){if(n=n||ba(t),n==="string"){const s=lo(n,we(e));return s.fill(t),Fs().makeTensor(s,e,n,this)}else{const s=new I5(e,t),i=s.getCustomSetupFunc(t);return this.compileAndRun(s,[],n,i)}}onesLike(e){if(e.dtype==="string")throw new Error("onesLike is not supported under string dtype");return this.fill(e.shape,1,e.dtype)}zerosLike(e){return this.fill(e.shape,e.dtype==="string"?"":0,e.dtype)}linspace(e,t,n){return Ub(e,t,n)}makeTensorInfo(e,t,n){const s=this.write(n,e,t);return this.texData.get(s).usage=null,{dataId:s,shape:e,dtype:t}}makeOutput(e,t,n){const{dataId:s}=this.makeTensorInfo(e,t,n);return Fs().makeTensorFromDataId(s,e,t,this)}unpackTensor(e){const t=new s6(e.shape);return this.runWebGLProgram(t,[e],e.dtype)}packTensor(e){const t=new t8(e.shape),n=!0;return this.runWebGLProgram(t,[e],e.dtype,null,n)}packedReshape(e,t){const n=[ic(e.shape),...rc(e.shape)],s={dtype:e.dtype,shape:n,dataId:e.dataId},i=[ic(t),...rc(t)],o=new Z0(i,n),a=!0,c=this.runWebGLProgram(o,[s],e.dtype,null,a);return{dataId:c.dataId,shape:t,dtype:c.dtype}}decode(e){const t=this.texData.get(e),{isPacked:n,shape:s,dtype:i}=t,o=XL(s);let a;n?a=new f5(o):a=new m5(o);const c=!0,h=this.runWebGLProgram(a,[{shape:o,dtype:i,dataId:e}],i,null,c);return{dtype:i,shape:s,dataId:h.dataId}}runWebGLProgram(e,t,n,s,i=!1){const o=this.makeTensorInfo(e.outputShape,n),a=this.texData.get(o.dataId);if(e.packedOutput&&(a.isPacked=!0),e.outPackingScheme===qh.DENSE){const L=Kh(e.outputShape);a.texShape=L.map(T=>T*2)}if(e.outTexUsage!=null&&(a.usage=e.outTexUsage),we(o.shape)===0)return a.values=bn(o.dtype,0),o;const c=[],h=t.map(L=>{if(L.dtype==="complex64")throw new Error("GPGPUProgram does not support complex64 input. For complex64 dtypes, please separate the program into real and imaginary parts.");let T=this.texData.get(L.dataId);if(T.texture==null){if(!e.packedInputs&&we(L.shape)<=C().getNumber("WEBGL_SIZE_UPLOAD_UNIFORM"))return{shape:L.shape,texData:null,isUniform:!0,uniformValues:T.values};e.packedInputs&&(T.isPacked=!0,T.shape=L.shape)}else if(!!T.isPacked!==!!e.packedInputs)L=T.isPacked?this.unpackTensor(L):this.packTensor(L),c.push(L),T=this.texData.get(L.dataId);else if(T.isPacked&&!Zp(T.shape,L.shape)){const A=L,N=L.shape;L.shape=T.shape,L=this.packedReshape(L,N),c.push(L),T=this.texData.get(L.dataId),A.shape=N}return this.uploadToGPU(L.dataId),{shape:L.shape,texData:T,isUniform:!1}});this.uploadToGPU(o.dataId);const d={shape:o.shape,texData:a,isUniform:!1},m=H5(e,h,d),y=this.getAndSaveBinary(m,()=>V5(this.gpgpu,e,h,d)),b=this.activeTimers!=null;let w;if(b&&(w=this.startTimer()),Y5(this.gpgpu,y,h,d,s),c.forEach(L=>this.disposeIntermediateTensorInfo(L)),b&&(w=this.endTimer(w),this.activeTimers.push({name:e.constructor.name,query:this.getQueryTime(w)})),!C().getBool("WEBGL_LAZILY_UNPACK")&&a.isPacked&&i===!1){const L=this.unpackTensor(o);return this.disposeIntermediateTensorInfo(o),L}return o}compileAndRun(e,t,n,s,i=!1){n=n||t[0].dtype;const o=this.runWebGLProgram(e,t,n,s,i);return Fs().makeTensorFromDataId(o.dataId,o.shape,o.dtype)}getAndSaveBinary(e,t){return e in this.binaryCache||(this.binaryCache[e]=t()),this.binaryCache[e]}getTextureManager(){return this.textureManager}dispose(){if(this.disposed)return;if(!C().getBool("IS_TEST")){const e=Object.keys(this.binaryCache);e.forEach(t=>{this.gpgpu.deleteProgram(this.binaryCache[t].webGLProgram),delete this.binaryCache[t]})}this.textureManager.dispose(),this.canvas!=null&&typeof HTMLCanvasElement!="undefined"&&this.canvas instanceof HTMLCanvasElement?this.canvas.remove():this.canvas=null,this.gpgpuCreatedLocally&&(this.gpgpu.program=null,this.gpgpu.dispose()),this.disposed=!0}floatPrecision(){return this.floatPrecisionValue==null&&(this.floatPrecisionValue=ee(()=>{if(!C().get("WEBGL_RENDER_FLOAT32_ENABLED")){const e=C().getBool("DEBUG");C().set("DEBUG",!1);const t=this.abs(Ne(1e-8)).dataSync()[0];if(C().set("DEBUG",e),t>0)return 32}return 16})),this.floatPrecisionValue}epsilon(){return this.floatPrecision()===32?c6:l6}uploadToGPU(e){const t=this.texData.get(e),{shape:n,dtype:s,values:i,texture:o,usage:a,isPacked:c}=t;if(o!=null)return;const h=this.activeTimers!=null;let d;h&&(d=Vn());let m=t.texShape;if(m==null&&(m=yj(n,c),t.texShape=m),i!=null){const y=XL(n);let b,w=m[1],L=m[0];const T=i instanceof Uint8Array;c?([w,L]=sc(m[0],m[1]),b=new S5(y,[L,w],T)):b=new L5(y,[L,w],T);const A=this.makeTensorInfo([L,w],s);T?this.texData.get(A.dataId).usage=As.PIXELS:this.texData.get(A.dataId).usage=As.UPLOAD,this.gpgpu.uploadDenseMatrixToTexture(this.getTexture(A.dataId),w,L,i);const N=!0,E=this.runWebGLProgram(b,[A],s,null,N),D=this.texData.get(E.dataId);t.texture=D.texture,t.texShape=D.texShape,t.isPacked=D.isPacked,t.usage=D.usage,this.disposeIntermediateTensorInfo(A),this.texData.delete(E.dataId),t.values=null,h&&(this.uploadWaitMs+=Vn()-d)}else{const y=this.acquireTexture(m,a,s,c);t.texture=y}}convertAndCacheOnCPU(e,t){const n=this.texData.get(e),{dtype:s}=n;return this.releaseGPUData(e),t!=null&&(n.values=f6(t,s)),n.values}acquireTexture(e,t,n,s){if(this.numBytesInGPU+=this.computeBytes(e,n),!this.warnedAboutMemory&&this.numBytesInGPU>this.numMBBeforeWarning*1024*1024){const i=(this.numBytesInGPU/1024/1024).toFixed(2);this.warnedAboutMemory=!0,console.warn(`High memory usage in GPU: ${i} MB, most likely due to a memory leak`)}return this.textureManager.acquireTexture(e,t,s)}computeBytes(e,t){return e[0]*e[1]*ry(t)}tryRunOnCpuOrThrow(e,t){if(this.shouldExecuteOnCPU(e))try{return t()}catch(n){if(C().getBool("IS_TEST"))throw new Error("CPU forwarding failed")}return null}}function f6(e,t){if(t==="float32"||t==="complex64")return e;if(t==="int32"||t==="bool"){const n=t==="int32"?new Int32Array(e.length):new Uint8Array(e.length);for(let s=0;snew m6,2);const bee={forceHalfFloat:y6},yC="if (isnan(x)) return x;",b6=` + `}}const{segment_util:_C}=cw,yX=lw,bX=hw,wX=uw,LX=Ap,SX=1e-7,IX=1e-4,_m={};function xX(e){return e in _m||(_m[e]={}),_m[e]}function Wm(e,t=!1){if(e==="linear")return t?mX:z6;if(e==="relu")return t?DC:TC;if(e==="elu")return t?FC:vC;if(e==="relu6")return t?kC:AC;if(e==="prelu")return t?iC:sC;throw new Error(`Activation ${e} has not been implemented for the WebGL backend.`)}const TX=128,AX=600;function vX(){return oe().global.screen==null?1024:oe().global.screen.height*oe().global.screen.width*window.devicePixelRatio*AX/1024/1024}const WC=1e3;class NX extends y{constructor(e){super();if(this.pendingRead=new WeakMap,this.pendingDisposal=new WeakSet,this.dataRefCount=new WeakMap,this.numBytesInGPU=0,this.uploadWaitMs=0,this.downloadWaitMs=0,this.warnedAboutMemory=!1,this.warnedAboutCPUBackend=!1,this.pendingDeletes=0,this.disposed=!1,!oe().getBool("HAS_WEBGL"))throw new Error("WebGL is not supported on this device");if(e==null){const t=ki(oe().getNumber("WEBGL_VERSION"));this.binaryCache=xX(oe().getNumber("WEBGL_VERSION")),this.gpgpu=new n6(t),this.canvas=t.canvas,this.gpgpuCreatedLocally=!0}else this.gpgpu=e,this.binaryCache={},this.gpgpuCreatedLocally=!1,this.canvas=e.gl.canvas;this.textureManager=new W6(this.gpgpu),this.numMBBeforeWarning=vX(),this.texData=new p(this,Ki())}numDataIds(){return this.texData.numDataIds()+(this.cpuBackend?this.cpuBackend.numDataIds():0)-this.pendingDeletes}write(e,t,n){if((oe().getBool("WEBGL_CHECK_NUMERICAL_PROBLEMS")||oe().getBool("DEBUG"))&&this.checkNumericalProblems(e),n==="complex64"&&e!=null)throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");const s={};return this.texData.set(s,{shape:t,dtype:n,values:e,usage:Ns.UPLOAD,refCount:1,complexParentRefCount:0}),s}incRef(e){const t=this.texData.get(e);t.refCount++}decRef(e){if(this.texData.has(e)){const t=this.texData.get(e);t.refCount--}}move(e,t,n,s){if(oe().getBool("DEBUG")&&this.checkNumericalProblems(t),s==="complex64")throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");this.texData.set(e,{shape:n,dtype:s,values:t,usage:Ns.UPLOAD,refCount:1,complexParentRefCount:0})}disposeIntermediateTensorInfo(e){const t=e.dataId;if(this.texData.has(t)){const n=this.texData.get(t);n.refCount--,n.refCount<1&&this.disposeData(t)}}readSync(e){const t=this.texData.get(e),{values:n,dtype:s,complexTensorInfos:i,slice:o,shape:a,isPacked:c}=t;if(o!=null){let f;c?f=new fu(a,Fm):f=new st(a,Fm);const b=this.runWebGLProgram(f,[{dataId:e,shape:a,dtype:s}],s),w=this.readSync(b.dataId);return this.disposeIntermediateTensorInfo(b),w}if(n!=null)return this.convertAndCacheOnCPU(e);if(s==="string")return n;const h=this.activeTimers!=null;let d;h&&(d=jn());let m;if(s==="complex64"){const f=this.readSync(i.real.dataId),b=this.readSync(i.imag.dataId);m=tr(f,b)}else m=this.getValuesFromTexture(e);return h&&(this.downloadWaitMs+=jn()-d),this.convertAndCacheOnCPU(e,m)}async read(e){if(this.pendingRead.has(e)){const w=this.pendingRead.get(e);return new Promise(L=>w.push(L))}const t=this.texData.get(e),{values:n,shape:s,slice:i,dtype:o,complexTensorInfos:a,isPacked:c}=t;if(i!=null){let w;c?w=new fu(s,Fm):w=new st(s,Fm);const L=this.runWebGLProgram(w,[{dataId:e,shape:s,dtype:o}],o),x=this.read(L.dataId);return this.disposeIntermediateTensorInfo(L),x}if(n!=null)return this.convertAndCacheOnCPU(e);if(!oe().getBool("WEBGL_DOWNLOAD_FLOAT_ENABLED")&&oe().getNumber("WEBGL_VERSION")===2)throw new Error("tensor.data() with WEBGL_DOWNLOAD_FLOAT_ENABLED=false and WEBGL_VERSION=2 not yet supported.");let h=null,d;if(o!=="complex64"&&oe().get("WEBGL_BUFFER_SUPPORTED")){d=this.decode(e);const w=this.texData.get(d.dataId);h=this.gpgpu.createBufferFromTexture(w.texture,...uu(s))}this.pendingRead.set(e,[]),o!=="complex64"&&await this.gpgpu.createAndWaitForFence();let m;if(o==="complex64"){const w=await Promise.all([this.read(a.real.dataId),this.read(a.imag.dataId)]),L=w[0],x=w[1];m=tr(L,x)}else if(h==null)m=this.getValuesFromTexture(e);else{const w=P(s);m=this.gpgpu.downloadFloat32MatrixFromBuffer(h,w)}d!=null&&this.disposeIntermediateTensorInfo(d);const f=this.convertAndCacheOnCPU(e,m),b=this.pendingRead.get(e);return this.pendingRead.delete(e),b.forEach(w=>w(f)),this.pendingDisposal.has(e)&&(this.pendingDisposal.delete(e),this.disposeData(e),this.pendingDeletes--),f}checkNumericalProblems(e){if(e==null)return;for(let t=0;tc.query)).filter(c=>c!=null),o=te(this.activeTimers.map(c=>c.name)).filter(c=>c!=null);this.activeTimers=t,s&&(this.programTimersStack=null);const a={uploadWaitMs:this.uploadWaitMs,downloadWaitMs:this.downloadWaitMs,kernelMs:null,wallMs:null};if(oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0){const c=await Promise.all(i);a.kernelMs=C(c),a.getExtraProfileInfo=()=>c.map((h,d)=>({name:o[d],ms:h})).map(h=>`${h.name}: ${h.ms}`).join(", ")}else a.kernelMs={error:"WebGL query timers are not supported in this environment."};return this.uploadWaitMs=0,this.downloadWaitMs=0,a}memory(){return{unreliable:!1,numBytesInGPU:this.numBytesInGPU,numBytesInGPUAllocated:this.textureManager.numBytesAllocated,numBytesInGPUFree:this.textureManager.numBytesFree}}startTimer(){return oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?this.gpgpu.beginQuery():{startMs:jn(),endMs:null}}endTimer(e){return oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?(this.gpgpu.endQuery(),e):(e.endMs=jn(),e)}async getQueryTime(e){if(oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0)return this.gpgpu.waitForQueryAndGetTime(e);const t=e;return t.endMs-t.startMs}disposeData(e){if(this.pendingDisposal.has(e))return;if(this.pendingRead.has(e)){this.pendingDisposal.add(e),this.pendingDeletes++;return}if(!this.texData.has(e))return;if(this.texData.get(e).complexParentRefCount>0){this.texData.get(e).refCount--;return}this.releaseGPUData(e);const{complexTensorInfos:t}=this.texData.get(e);t!=null&&(this.texData.get(t.real.dataId).complexParentRefCount--,this.disposeIntermediateTensorInfo(t.real),this.texData.get(t.imag.dataId).complexParentRefCount--,this.disposeIntermediateTensorInfo(t.imag)),this.texData.delete(e)}releaseGPUData(e){const{texture:t,dtype:n,texShape:s,usage:i,isPacked:o,slice:a}=this.texData.get(e),c=a&&a.origDataId||e,h=this.dataRefCount.get(c);h>1?this.dataRefCount.set(c,h-1):(this.dataRefCount.delete(c),t!=null&&(this.numBytesInGPU-=this.computeBytes(s,n),this.textureManager.releaseTexture(t,s,i,o)));const d=this.texData.get(e);d.texture=null,d.texShape=null,d.isPacked=!1,d.slice=null}getTexture(e){return this.uploadToGPU(e),this.texData.get(e).texture}getDataInfo(e){return this.texData.get(e)}getCPUBackend(){return oe().getBool("WEBGL_CPU_FORWARD")?(this.cpuBackend==null&&(this.cpuBackend=Ki().findBackend("cpu")),this.cpuBackend):null}shouldExecuteOnCPU(e,t=TX){const n=this.getCPUBackend();return!this.warnedAboutCPUBackend&&n==null&&(console.warn("Your application contains ops that are small enough to be executed on the CPU backend, however the CPU backend cannot be found. Consider importing the CPU backend (@tensorflow/tfjs-backend-cpu) for better performance."),this.warnedAboutCPUBackend=!0),n!=null&&e.every(s=>this.texData.get(s.dataId).texture==null&&P(s.shape)this.cpuBackend.stridedSlice(e,t,n,s));if(i)return i;const o=jd(t,n,s);if(o.some(c=>c===0))return sn([],o);const a=new _6(t,s,o);return this.compileAndRun(a,[e])}reverse(e,t){const n=oe().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new R6(e.shape,t):new C6(e.shape,t);return this.compileAndRun(n,[e])}neg(e){const t=this.tryRunOnCpuOrThrow([e],()=>this.cpuBackend.neg(e));if(t)return t;if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,NC,e.dtype);const n=new st(e.shape,NC);return this.compileAndRun(n,[e])}batchMatMul(e,t,n,s){const i=n?e.shape[2]:e.shape[1],o=s?t.shape[1]:t.shape[2],a=n?e.shape[1]:e.shape[2],c=Math.max(e.shape[0],t.shape[0]);if((i===1||o===1)&&a>WC){n&&(e=Ye(e,[0,2,1])),s&&(t=Ye(t,[0,2,1]));const m=o===1?e:e.as3D(c,a,1),f=o===1?2:1,b=o===1?t.as3D(c,1,a):t,w=X(m,b);return w.sum(f,!0)}const h=$n(e.dtype,t.dtype),d=new xS(e.shape,t.shape,[c,i,o],n,s);return this.compileAndRun(d,[e,t],h)}fusedBatchMatMul({a:e,b:t,transposeA:n,transposeB:s,bias:i,activation:o,preluActivationWeights:a}){const c=n?e.shape[2]:e.shape[1],h=s?t.shape[1]:t.shape[2],d=Math.max(e.shape[0],t.shape[0]),m=$n(e.dtype,t.dtype),f=i!=null,b=a!=null,w=o?Wm(o,!0):null,L=new xS(e.shape,t.shape,[d,c,h],n,s,f,w,b),x=[e,t];return i&&x.push(i),a&&x.push(a),this.compileAndRun(L,x,m)}localResponseNormalization4D(e,t,n,s,i){const o=oe().getBool("WEBGL_PACK_NORMALIZATION")?new h6(e.shape,t,n,s,i):new c6(e.shape,t,n,s,i);return this.compileAndRun(o,[e])}LRNGrad(e,t,n,s,i,o,a){const c=new l6(t.shape,s,i,o,a);return this.compileAndRun(c,[t,n,e])}tile(e,t){if(e.dtype==="string"){const s=this.readSync(e.dataId),i=s.map(a=>Kl(a)),o=wt(e.shape,e.dtype,i);return bX(o,t)}const n=new M6(e.shape,t);return this.compileAndRun(n,[e])}pad(e,t,n){const s=oe().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new S6(e.shape,t,n):new L6(e.shape,t,n);return this.compileAndRun(s,[e])}gather(e,t,n){const s=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.gather(e,t,n));if(s)return s;const i=new $8(e.shape,t.size,n);return this.compileAndRun(i,[e,t])}batchToSpaceND(e,t,n){A(e.rank<=4,()=>"batchToSpaceND for rank > 4 with a WebGL backend not implemented yet");const s=t.reduce((d,m)=>d*m),i=Oh(e.shape,t,s),o=Eh(i.length,t.length),a=Dh(e.shape,t,s),c=Zb(n,t.length),h=Qb(a,n,t.length);return Ye(e.reshape(i),o).reshape(a).slice(c,h)}spaceToBatchND(e,t,n){A(e.rank<=4,()=>"spaceToBatchND for rank > 4 with a WebGL backend not implemented yet");const s=t.reduce((m,f)=>m*f),i=[[0,0]];i.push(...n);for(let m=1+t.length;mthis.cpuBackend.prod(e,t));if(n)return n;const[s,i]=An(e.shape,t),o=P(i),a=e.as2D(-1,o),c=Ud(e.dtype);return this.reduce(a,"prod",c).reshape(s)}unsortedSegmentSum(e,t,n){let s=0;const i=Xn([s],e.rank);let o=e;i!=null&&(o=Ye(e,i),s=as(1,e.rank)[0]);const a=_C.computeOutShape(o.shape,s,n),c=P([o.shape[s]]),h=o.as2D(-1,c),d=Ud(e.dtype);let m=this.segOpCompute(h,"unsortedSegmentSum",t,d,n).reshape(a);return i!=null&&(m=Ye(m,sh(i))),m}segOpCompute(e,t,n,s,i){const o=e.shape[0],a=e.shape[1],c=_C.segOpComputeOptimalWindowSize(a,i),h={windowSize:c,inSize:a,batchSize:o,numSegments:i},d=new O6(h,t),m=this.compileAndRun(d,[e,n],s);return m.shape[1]===i?m:(n=bh(0,i).tile([a/c]),this.segOpCompute(m,t,n,s,i))}argMinMaxReduce(e,t,n){const s=[t];if(Kn("arg"+n.charAt(0).toUpperCase()+n.slice(1),s,e.rank),!oe().getBool("WEBGL_PACK_REDUCE")||e.rank<=2){const[i,o]=An(e.shape,s),a=P(o),c=e.as2D(-1,a);return this.argReduce(c,n).reshape(i)}return this.argReducePacked(e,n)}argMin(e,t){return this.argMinMaxReduce(e,t,"min")}argMax(e,t){return this.argMinMaxReduce(e,t,"max")}cumsum(e,t,n,s){if(t!==e.rank-1)throw new Error(`WebGL cumsum shader expects an inner-most axis=${e.rank-1} but got axis=${t}`);const i=e.shape[t];let o=e;for(let a=0;a<=Math.ceil(Math.log2(i))-1;a++){const c=new cC(e.shape,!1,s),h=c.getCustomSetupFunc(a),d=o;o=this.compileAndRun(c,[o],o.dtype,h),d.dispose()}if(n){const a=new cC(e.shape,n,s),c=o;o=this.compileAndRun(a,[o]),c.dispose()}return o}equal(e,t){if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,a8,"bool");const n=new _n(q5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}less(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.less(e,t));if(n)return n;if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,c8,"bool");const s=new _n(j5,e.shape,t.shape);return this.compileAndRun(s,[e,t],"bool")}lessEqual(e,t){if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,l8,"bool");const n=new _n(K5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}greater(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.greater(e,t));if(n)return n;if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,h8,"bool");const s=new _n(X5,e.shape,t.shape);return this.compileAndRun(s,[e,t],"bool")}greaterEqual(e,t){if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,u8,"bool");const n=new _n(J5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}logicalNot(e){const t=new st(e.shape,pX);return this.compileAndRun(t,[e])}logicalAnd(e,t){if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,d8,"bool");const n=new _n(Z5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}logicalOr(e,t){if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,p8,"bool");const n=new _n(Q5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}select(e,t,n){const s=new E6(e.rank,t.shape,t.rank);return this.compileAndRun(s,[e,t,n],$n(t.dtype,n.dtype))}where(e){Za("tf.where() in webgl locks the UI thread. Call tf.whereAsync() instead");const t=e.dataSync();return LX(e.shape,t)}topk(e,t,n){const s=e.dataSync();return wX(s,e.shape,e.dtype,t,n)}min(e,t){Kn("min",t,e.rank);const[n,s]=An(e.shape,t),i=P(s),o=e.as2D(-1,i);return this.reduce(o,"min",o.dtype).reshape(n)}minimum(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.minimum(e,t));if(n)return n;const s=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new lr(f8,e.shape,t.shape):new _n(t8,e.shape,t.shape);return this.compileAndRun(s,[e,t])}mod(e,t){const n=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new lr(g8,e.shape,t.shape):new _n(n8,e.shape,t.shape);return this.compileAndRun(n,[e,t])}maximum(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.maximum(e,t));if(n)return n;const s=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new lr(m8,e.shape,t.shape):new _n(e8,e.shape,t.shape);return this.compileAndRun(s,[e,t])}all(e,t){Kn("all",t,e.rank);const[n,s]=An(e.shape,t),i=P(s),o=e.as2D(-1,i);return this.reduce(o,"all",o.dtype).reshape(n)}any(e,t){Kn("any",t,e.rank);const[n,s]=An(e.shape,t),i=P(s),o=e.as2D(-1,i);return this.reduce(o,"any",o.dtype).reshape(n)}floorDiv(e,t){const n=Y5,s="int32";if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,i8,s);const i=new _n(n,e.shape,t.shape);return this.compileAndRun(i,[e,t],s)}packedUnaryOp(e,t,n){const s=new fu(e.shape,t);return this.compileAndRun(s,[e],n)}packedBinaryOp(e,t,n,s,i=!1){const o=new lr(n,e.shape,t.shape,i);return this.compileAndRun(o,[e,t],s)}makeComplexComponentTensorInfo(e,t){return{dataId:t.dataId,dtype:t.dtype,shape:e.shape}}addN(e){if(e.length===1)return e[0];if(e.length>oe().get("WEBGL_MAX_TEXTURES_IN_SHADER")){const o=Math.floor(e.length/2),a=this.addN(e.slice(0,o)),c=this.addN(e.slice(o));return this.addN([a,c])}const t=e.map(o=>o.dtype).reduce((o,a)=>$n(o,a)),n=e.map(o=>o.shape),s=oe().getBool("WEBGL_PACK"),i=s?new s5(e[0].shape,n):new n5(e[0].shape,n);return this.compileAndRun(i,e,t)}pow(e,t){const n=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"),s=n?new lr(r8,e.shape,t.shape):new _n(H5,e.shape,t.shape),i=$n(e.dtype,t.dtype);return this.compileAndRun(s,[e,t],i)}ceil(e){if(this.shouldExecuteOnCPU([e])){const n=YK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,CC,e.dtype);const t=new st(e.shape,CC);return this.compileAndRun(t,[e])}floor(e){if(this.shouldExecuteOnCPU([e])){const n=jK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,RC,e.dtype);const t=new st(e.shape,RC);return this.compileAndRun(t,[e])}sign(e){const t=new st(e.shape,Y6);return this.compileAndRun(t,[e])}isNaN(e){const t=new st(e.shape,H6);return this.compileAndRun(t,[e],"bool")}isInf(e){const t=new st(e.shape,q6);return this.compileAndRun(t,[e],"bool")}isFinite(e){const t=new st(e.shape,j6);return this.compileAndRun(t,[e],"bool")}round(e){const t=new st(e.shape,K6);return this.compileAndRun(t,[e])}exp(e){if(this.shouldExecuteOnCPU([e])){const n=HK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,OC,e.dtype);const t=new st(e.shape,OC);return this.compileAndRun(t,[e])}expm1(e){if(this.shouldExecuteOnCPU([e])){const n=qK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,EC,e.dtype);const t=new st(e.shape,EC);return this.compileAndRun(t,[e])}softmax(e,t){const n=qe([t],e.shape),s=Qn(e,n),i=vn(s.shape,n),o=Re(e,s.reshape(i)),a=this.exp(o),c=this.sum(a,n).reshape(i);return We(a,c)}log(e){if(this.shouldExecuteOnCPU([e])){const n=KK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,fX,e.dtype);const t=new st(e.shape,X6);return this.compileAndRun(t,[e])}log1p(e){const t=new st(e.shape,J6);return this.compileAndRun(t,[e])}sqrt(e){const t=new st(e.shape,Z6);return this.compileAndRun(t,[e])}rsqrt(e){if(this.shouldExecuteOnCPU([e])){const n=ZK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}const t=new st(e.shape,Q6);return this.compileAndRun(t,[e])}reciprocal(e){const t=new st(e.shape,dX);return this.compileAndRun(t,[e])}relu(e){let t;return oe().getBool("WEBGL_PACK")?t=new fu(e.shape,DC):t=new st(e.shape,TC),this.compileAndRun(t,[e])}relu6(e){let t;return oe().getBool("WEBGL_PACK")?t=new fu(e.shape,kC):t=new st(e.shape,AC),this.compileAndRun(t,[e])}prelu(e,t){const n=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new lr(iC,e.shape,t.shape):new _n(sC,e.shape,t.shape);return this.compileAndRun(n,[e,t])}elu(e){if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,FC,e.dtype);const t=new st(e.shape,vC);return this.compileAndRun(t,[e])}eluDer(e,t){const n=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new lr(o8,e.shape,t.shape):new _n(s8,e.shape,t.shape);return this.compileAndRun(n,[e,t])}selu(e){const t=new st(e.shape,V6);return this.compileAndRun(t,[e])}clip(e,t,n){let s;oe().getBool("WEBGL_PACK_CLIP")?s=new b8(e.shape):s=new y8(e.shape);const i=s.getCustomSetupFunc(t,n);return this.compileAndRun(s,[e],null,i)}abs(e){if(this.shouldExecuteOnCPU([e])&&e.dtype!=="complex64"){const n=VK(this.texData.get(e.dataId).values);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,xC,e.dtype);const t=new st(e.shape,xC);return this.compileAndRun(t,[e])}complexAbs(e){const t=this.texData.get(e.dataId),n=new w8(e.shape),s=[this.makeComplexComponentTensorInfo(e,t.complexTensorInfos.real),this.makeComplexComponentTensorInfo(e,t.complexTensorInfos.imag)];return this.compileAndRun(n,s)}sigmoid(e){const t=new st(e.shape,eX);return this.compileAndRun(t,[e])}softplus(e){const t=new st(e.shape,tX);return this.compileAndRun(t,[e])}asin(e){const t=new st(e.shape,nX);return this.compileAndRun(t,[e])}acos(e){const t=new st(e.shape,sX);return this.compileAndRun(t,[e])}atan(e){const t=new st(e.shape,iX);return this.compileAndRun(t,[e])}sinh(e){const t=new st(e.shape,rX);return this.compileAndRun(t,[e])}cosh(e){const t=new st(e.shape,oX);return this.compileAndRun(t,[e])}tanh(e){const t=new st(e.shape,aX);return this.compileAndRun(t,[e])}asinh(e){const t=new st(e.shape,cX);return this.compileAndRun(t,[e])}acosh(e){const t=new st(e.shape,lX);return this.compileAndRun(t,[e])}atanh(e){const t=new st(e.shape,hX);return this.compileAndRun(t,[e])}erf(e){const t=new st(e.shape,uX);return this.compileAndRun(t,[e])}step(e,t){const n=new st(e.shape,G6(t));return this.compileAndRun(n,[e])}conv2dByMatMul(e,t,n,s,i,o){const a=e.shape,c=this.texData.get(e.dataId),h=n.inChannels,d=a[0]*a[1]*a[2],m=n.outChannels,f=n.dataFormat==="channelsLast",b=!1,w=!1,L=(d===1||m===1)&&h>WC,x=a[2]%2!==0&&!!c.isPacked;if(L||!oe().getBool("WEBGL_LAZILY_UNPACK")||!oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")||!x){const U=f?a[0]*a[1]*a[2]:a[0]*a[2]*a[3],$=K(e,[1,U,n.inChannels]),Y=K(t,[1,n.inChannels,n.outChannels]),j=this.fusedBatchMatMul({a:$,b:Y,transposeA:b,transposeB:w,bias:s,activation:i,preluActivationWeights:o});return K(j,n.outShape)}const v=f?a[0]*a[1]*(a[2]+1):a[0]*a[2]*(a[3]+1),N={dataId:e.dataId,shape:[1,v,n.inChannels],dtype:e.dtype},O=c.shape;c.shape=c.shape.slice(),c.shape[c.shape.length-2]++,A(Rm(c.shape,N.shape),()=>`packed reshape ${c.shape} to ${N.shape} isn't free`);const E=K(t,[1,n.inChannels,n.outChannels]),k=this.fusedBatchMatMul({a:N,b:E,transposeA:b,transposeB:w,bias:s,activation:i,preluActivationWeights:o}),F=this.texData.get(k.dataId);return A(F.isPacked,()=>"batchMatMul result is expected to be packed"),c.shape=O,F.shape=n.outShape,Ki().makeTensorFromDataId(k.dataId,n.outShape,k.dtype)}conv2dWithIm2Row(e,t,n,s,i,o){const{filterWidth:a,filterHeight:c,inChannels:h,outWidth:d,outHeight:m,dataFormat:f}=n,b=f==="channelsLast",w=a*c*h,L=m*d,x=[w,L],v=!0,N=!1,O=e.squeeze([0]),E=t.reshape([1,w,-1]),k=new a6(x,O.shape,n),F=this.compileAndRun(k,[O]).reshape([1,x[0],x[1]]),U=s!=null,$=o!=null,Y=i?Wm(i,!0):null,j=new xS(F.shape,E.shape,[1,L,n.outChannels],v,N,U,Y,$),Z=[F,E];s&&Z.push(s),$&&Z.push(o);const ie=this.compileAndRun(j,Z);return b?ie.reshape([1,m,d,n.outChannels]):ie.reshape([1,n.outChannels,m,d])}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){if(n.filterHeight===1&&n.filterWidth===1&&n.dilationHeight===1&&n.dilationWidth===1&&n.strideHeight===1&&n.strideWidth===1&&(n.padInfo.type==="SAME"||n.padInfo.type==="VALID"))return this.conv2dByMatMul(e,t,n,s,i,o);if(oe().getBool("WEBGL_CONV_IM2COL")&&e.shape[0]===1)return this.conv2dWithIm2Row(e,t,n,s,i,o);const a=s!=null,c=o!=null,h=i?Wm(i,!1):null,d=new rC(n,a,h,c),m=[e,t];return s&&m.push(s),o&&m.push(o),this.compileAndRun(d,m)}conv2d(e,t,n){if(n.filterHeight===1&&n.filterWidth===1&&n.dilationHeight===1&&n.dilationWidth===1&&n.strideHeight===1&&n.strideWidth===1&&(n.padInfo.type==="SAME"||n.padInfo.type==="VALID"))return this.conv2dByMatMul(e,t,n);if(oe().getBool("WEBGL_CONV_IM2COL")&&e.shape[0]===1)return this.conv2dWithIm2Row(e,t,n);const s=new rC(n);return this.compileAndRun(s,[e,t])}conv2dDerInput(e,t,n){const s=new S8(n);return this.compileAndRun(s,[e,t])}conv2dDerFilter(e,t,n){const s=new L8(n);return this.compileAndRun(s,[e,t])}fusedDepthwiseConv2D({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){const a=oe().getBool("WEBGL_PACK_DEPTHWISECONV")&&n.strideWidth<=2&&n.outChannels/n.inChannels===1,c=i?Wm(i,a):null,h=[e,t],d=s!=null,m=o!=null;d&&h.push(s),m&&h.push(o);let f;return a?(f=new aC(n,d,c,m),this.compileAndRun(f,h)):(f=new oC(n,d,c,m),this.compileAndRun(f,h))}depthwiseConv2D(e,t,n){let s;return oe().getBool("WEBGL_PACK_DEPTHWISECONV")&&n.strideWidth<=2&&n.outChannels/n.inChannels===1?(s=new aC(n),this.compileAndRun(s,[e,t])):(s=new oC(n),this.compileAndRun(s,[e,t]))}depthwiseConv2DDerInput(e,t,n){const s=new A8(n);return this.compileAndRun(s,[e,t])}depthwiseConv2DDerFilter(e,t,n){const s=new T8(n);return this.compileAndRun(s,[e,t])}conv3d(e,t,n){const s=new v8(n);return this.compileAndRun(s,[e,t])}conv3dDerInput(e,t,n){const s=new x8(n);return this.compileAndRun(s,[e,t])}conv3dDerFilter(e,t,n){const s=new I8(n);return this.compileAndRun(s,[e,t])}unstack(e,t){const n=e.shape[t],s=new Array(e.rank-1);let i=0;for(let h=0;h1,()=>`blockSize should be > 1 for depthToSpace, but was: ${t}`);const s=e.shape[0],i=n==="NHWC"?e.shape[1]:e.shape[2],o=n==="NHWC"?e.shape[2]:e.shape[3],a=n==="NHWC"?e.shape[3]:e.shape[1],c=i*t,h=o*t,d=a/(t*t),m=n==="NHWC"?[s,c,h,d]:[s,d,c,h],f=new O8(m,t,n);return this.compileAndRun(f,[e])}split(e,t,n){return yX(e,t,n)}scatterND(e,t,n){const{sliceRank:s,numUpdates:i,sliceSize:o,strides:a,outputSize:c}=_a(t,e,n),h=[c/o,o],d=e.reshape([i,s]),m=t.reshape([i,o]);if(c===0)return JA(sn([]),n);const f=Ce(0),b=new wC(i,s,d.rank,m.rank,a,h),w=this.compileAndRun(b,[m,d,f]);return w.reshape(n)}sparseToDense(e,t,n,s){const{sliceRank:i,numUpdates:o,strides:a,outputSize:c}=_a(t,e,n),h=!1,d=new wC(o,i,e.rank,t.rank,a,[c,1],h),m=this.compileAndRun(d,[t,e,s]);return m.reshape(n)}gatherND(e,t){const n=t.shape,s=n[n.length-1],[i,o,a,c]=Hd(e,t),h=t.reshape([o,s]),d=e.reshape([e.size/a,a]),m=new B8(s,c,[o,a]),f=this.compileAndRun(m,[d,h]);return f.reshape(i)}fill(e,t,n){if(n=n||wa(t),n==="string"){const s=ws(n,P(e));return s.fill(t),Ki().makeTensor(s,e,n,this)}else{const s=new W8(e,t),i=s.getCustomSetupFunc(t);return this.compileAndRun(s,[],n,i)}}onesLike(e){if(e.dtype==="string")throw new Error("onesLike is not supported under string dtype");return this.fill(e.shape,1,e.dtype)}zerosLike(e){return this.fill(e.shape,e.dtype==="string"?"":0,e.dtype)}linspace(e,t,n){return aw(e,t,n)}makeTensorInfo(e,t,n){const s=this.write(n,e,t);return this.texData.get(s).usage=null,{dataId:s,shape:e,dtype:t}}makeOutput(e,t,n){const{dataId:s}=this.makeTensorInfo(e,t,n);return Ki().makeTensorFromDataId(s,e,t,this)}unpackTensor(e){const t=new gX(e.shape);return this.runWebGLProgram(t,[e],e.dtype)}packTensor(e){const t=new f6(e.shape),n=!0;return this.runWebGLProgram(t,[e],e.dtype,null,n)}packedReshape(e,t){const n=[mc(e.shape),...fc(e.shape)],s={dtype:e.dtype,shape:n,dataId:e.dataId},i=[mc(t),...fc(t)],o=new bC(i,n),a=!0,c=this.runWebGLProgram(o,[s],e.dtype,null,a);return{dataId:c.dataId,shape:t,dtype:c.dtype}}decode(e){const t=this.texData.get(e),{isPacked:n,shape:s,dtype:i}=t,o=wS(s);let a;n?a=new R8(o):a=new C8(o);const c=!0,h=this.runWebGLProgram(a,[{shape:o,dtype:i,dataId:e}],i,null,c);return{dtype:i,shape:s,dataId:h.dataId}}runWebGLProgram(e,t,n,s,i=!1){const o=this.makeTensorInfo(e.outputShape,n),a=this.texData.get(o.dataId);if(e.packedOutput&&(a.isPacked=!0),e.outPackingScheme===lu.DENSE){const L=uu(e.outputShape);a.texShape=L.map(x=>x*2)}if(e.outTexUsage!=null&&(a.usage=e.outTexUsage),P(o.shape)===0)return a.values=bt(o.dtype,0),o;const c=[],h=t.map(L=>{if(L.dtype==="complex64")throw new Error("GPGPUProgram does not support complex64 input. For complex64 dtypes, please separate the program into real and imaginary parts.");let x=this.texData.get(L.dataId);if(x.texture==null){if(!e.packedInputs&&P(L.shape)<=oe().getNumber("WEBGL_SIZE_UPLOAD_UNIFORM"))return{shape:L.shape,texData:null,isUniform:!0,uniformValues:x.values};e.packedInputs&&(x.isPacked=!0,x.shape=L.shape)}else if(!!x.isPacked!==!!e.packedInputs)L=x.isPacked?this.unpackTensor(L):this.packTensor(L),c.push(L),x=this.texData.get(L.dataId);else if(x.isPacked&&!Rm(x.shape,L.shape)){const v=L,N=L.shape;L.shape=x.shape,L=this.packedReshape(L,N),c.push(L),x=this.texData.get(L.dataId),v.shape=N}return this.uploadToGPU(L.dataId),{shape:L.shape,texData:x,isUniform:!1}});this.uploadToGPU(o.dataId);const d={shape:o.shape,texData:a,isUniform:!1},m=o6(e,h,d),f=this.getAndSaveBinary(m,()=>i6(this.gpgpu,e,h,d)),b=this.activeTimers!=null;let w;if(b&&(w=this.startTimer()),r6(this.gpgpu,f,h,d,s),c.forEach(L=>this.disposeIntermediateTensorInfo(L)),b&&(w=this.endTimer(w),this.activeTimers.push({name:e.constructor.name,query:this.getQueryTime(w)})),!oe().getBool("WEBGL_LAZILY_UNPACK")&&a.isPacked&&i===!1){const L=this.unpackTensor(o);return this.disposeIntermediateTensorInfo(o),L}return o}compileAndRun(e,t,n,s,i=!1){n=n||t[0].dtype;const o=this.runWebGLProgram(e,t,n,s,i);return Ki().makeTensorFromDataId(o.dataId,o.shape,o.dtype)}getAndSaveBinary(e,t){return e in this.binaryCache||(this.binaryCache[e]=t()),this.binaryCache[e]}getTextureManager(){return this.textureManager}dispose(){if(this.disposed)return;if(!oe().getBool("IS_TEST")){const e=Object.keys(this.binaryCache);e.forEach(t=>{this.gpgpu.deleteProgram(this.binaryCache[t].webGLProgram),delete this.binaryCache[t]})}this.textureManager.dispose(),this.canvas!=null&&typeof HTMLCanvasElement!="undefined"&&this.canvas instanceof HTMLCanvasElement?this.canvas.remove():this.canvas=null,this.gpgpuCreatedLocally&&(this.gpgpu.program=null,this.gpgpu.dispose()),this.disposed=!0}floatPrecision(){return this.floatPrecisionValue==null&&(this.floatPrecisionValue=Q(()=>{if(!oe().get("WEBGL_RENDER_FLOAT32_ENABLED")){const e=oe().getBool("DEBUG");oe().set("DEBUG",!1);const t=this.abs(Ce(1e-8)).dataSync()[0];if(oe().set("DEBUG",e),t>0)return 32}return 16})),this.floatPrecisionValue}epsilon(){return this.floatPrecision()===32?SX:IX}uploadToGPU(e){const t=this.texData.get(e),{shape:n,dtype:s,values:i,texture:o,usage:a,isPacked:c}=t;if(o!=null)return;const h=this.activeTimers!=null;let d;h&&(d=jn());let m=t.texShape;if(m==null&&(m=_K(n,c),t.texShape=m),i!=null){const f=wS(n);let b,w=m[1],L=m[0];const x=i instanceof Uint8Array;c?([w,L]=pc(m[0],m[1]),b=new _8(f,[L,w],x)):b=new F8(f,[L,w],x);const v=this.makeTensorInfo([L,w],s);x?this.texData.get(v.dataId).usage=Ns.PIXELS:this.texData.get(v.dataId).usage=Ns.UPLOAD,this.gpgpu.uploadDenseMatrixToTexture(this.getTexture(v.dataId),w,L,i);const N=!0,O=this.runWebGLProgram(b,[v],s,null,N),E=this.texData.get(O.dataId);t.texture=E.texture,t.texShape=E.texShape,t.isPacked=E.isPacked,t.usage=E.usage,this.disposeIntermediateTensorInfo(v),this.texData.delete(O.dataId),t.values=null,h&&(this.uploadWaitMs+=jn()-d)}else{const f=this.acquireTexture(m,a,s,c);t.texture=f}}convertAndCacheOnCPU(e,t){const n=this.texData.get(e),{dtype:s}=n;return this.releaseGPUData(e),t!=null&&(n.values=CX(t,s)),n.values}acquireTexture(e,t,n,s){if(this.numBytesInGPU+=this.computeBytes(e,n),!this.warnedAboutMemory&&this.numBytesInGPU>this.numMBBeforeWarning*1024*1024){const i=(this.numBytesInGPU/1024/1024).toFixed(2);this.warnedAboutMemory=!0,console.warn(`High memory usage in GPU: ${i} MB, most likely due to a memory leak`)}return this.textureManager.acquireTexture(e,t,s)}computeBytes(e,t){return e[0]*e[1]*Bg(t)}tryRunOnCpuOrThrow(e,t){if(this.shouldExecuteOnCPU(e))try{return t()}catch(n){if(oe().getBool("IS_TEST"))throw new Error("CPU forwarding failed")}return null}}function CX(e,t){if(t==="float32"||t==="complex64")return e;if(t==="int32"||t==="bool"){const n=t==="int32"?new Int32Array(e.length):new Uint8Array(e.length);for(let s=0;snew NX,2);const one={forceHalfFloat:OX};function ur(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const EX={kernelName:xl,backendName:"webgl",kernelFunc:ur};function Lc(e){const{inputs:t,backend:n}=e,{real:s,imag:i}=t,o=n.makeTensorInfo(s.shape,"complex64"),a=n.texData.get(o.dataId),c=ur({inputs:{x:s},backend:n}),h=n.texData.get(c.dataId);h.complexParentRefCount++;const d=ur({inputs:{x:i},backend:n}),m=n.texData.get(d.dataId);return m.complexParentRefCount++,a.complexTensorInfos={real:c,imag:d},o}const DX={kernelName:rd,backendName:"webgl",kernelFunc:Lc};const $C="if (isnan(x)) return x;",kX=` if (isnan(a)) return a; if (isnan(b)) return b; -`,w6=` +`,FX=` result.r = isNaN.r > 0. ? NAN : result.r; result.g = isNaN.g > 0. ? NAN : result.g; result.b = isNaN.b > 0. ? NAN : result.b; result.a = isNaN.a > 0. ? NAN : result.a; -`;function am(e){return({inputs:t,backend:n})=>{const{x:s}=t,i=n,o=new st(s.shape,e);return i.runWebGLProgram(o,[s],s.dtype)}}function iS(e,t,n,s){return({inputs:i,backend:o})=>{const{a,b:c}=i,h=o,d=C().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new qr(t,a.shape,c.shape,!!n):new hn(e,a.shape,c.shape),m=s||a.dtype,y=h.runWebGLProgram(d,[a,c],m);return y}}const L6=b6+` +`;function $m(e){return({inputs:t,backend:n})=>{const{x:s}=t,i=n,o=new st(s.shape,e);return i.runWebGLProgram(o,[s],s.dtype)}}function Sc({opSnippet:e,packedOpSnippet:t,checkOutOfBounds:n=!1,supportsComplex:s=!1,cpuKernelImpl:i,dtype:o}){return({inputs:a,backend:c})=>{const{a:h,b:d}=a,m=c;if(s&&h.dtype==="complex64"){const L=m.texData.get(h.dataId),x=m.texData.get(d.dataId),[v,N]=[[L.complexTensorInfos.real,x.complexTensorInfos.real],[L.complexTensorInfos.imag,x.complexTensorInfos.imag]].map(E=>{const[k,F]=E,U={dataId:k.dataId,dtype:k.dtype,shape:h.shape},$={dataId:F.dataId,dtype:F.dtype,shape:d.shape},Y=new _n(e,h.shape,d.shape);return m.runWebGLProgram(Y,[U,$],$n(k.dtype,F.dtype))}),O=Lc({inputs:{real:v,imag:N},backend:m});return m.disposeIntermediateTensorInfo(v),m.disposeIntermediateTensorInfo(N),O}const f=o||$n(h.dtype,d.dtype);if(m.shouldExecuteOnCPU([h,d])&&i!=null){const L=m.texData.get(h.dataId),x=m.texData.get(d.dataId),[v,N]=i(h.shape,d.shape,L.values,x.values,f),O=m.makeTensorInfo(N,f),E=m.texData.get(O.dataId);return E.values=v,O}const b=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")&&t!=null;let w;return b?w=new lr(t,h.shape,d.shape,n):w=new _n(e,h.shape,d.shape),m.runWebGLProgram(w,[h,d],f)}}const UC="return a + b;",_X=Sc({opSnippet:UC,packedOpSnippet:UC,supportsComplex:!0,cpuKernelImpl:GK}),WX={kernelName:wo,backendName:"webgl",kernelFunc:_X};const $X=kX+` return atan(a, b); -`,S6=` +`,UX=` vec4 result = atan(a, b); vec4 isNaN = min(vec4(isnan(a)) + vec4(isnan(b)), vec4(1.0)); - `+w6+` + `+FX+` return result; -`,I6=iS(L6,S6),x6={kernelName:Li,backendName:"webgl",kernelFunc:I6};function rS(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const T6={kernelName:al,backendName:"webgl",kernelFunc:rS};function A6(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t;Xh(i,"avgPool");const{filterSize:o,strides:a,pad:c,dimRoundingMode:h}=s,d=1;k(rn(a,d),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Fn(i.shape,o,a,d,c,h);if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))return rS({inputs:{x:i},backend:n});const y=new Zh(m,"avg",!1);return n.runWebGLProgram(y,[i],"float32")}const v6={kernelName:Xs,backendName:"webgl",kernelFunc:A6};function N6(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o}=t,a=o;Xh([i,o],"avgPoolBackprop");const{filterSize:c,strides:h,pad:d}=s,m=Fn(a.shape,c,h,1,d),y=new AK(m);return n.runWebGLProgram(y,[i],a.dtype)}const C6={kernelName:ua,backendName:"webgl",kernelFunc:N6};class R6{constructor(e,t,n,s,i,o){this.outputShape=[],this.variableNames=["x","mean","variance"],tt(e,t),tt(e,n);let a="0.0";s!=null&&(tt(e,s),this.variableNames.push("offset"),a="getOffsetAtOutCoords()");let c="1.0";i!=null&&(tt(e,i),this.variableNames.push("scale"),c="getScaleAtOutCoords()"),this.outputShape=e,this.userCode=` +`,BX=Sc({opSnippet:$X,packedOpSnippet:UX}),MX={kernelName:nd,backendName:"webgl",kernelFunc:BX};function PX(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t;du(i,"avgPool");const{filterSize:o,strides:a,pad:c,dimRoundingMode:h}=s,d=1;A(cn(a,d),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Un(i.shape,o,a,d,c,h);if(m.filterWidth===1&&m.filterHeight===1&&ae(m.inShape,m.outShape))return ur({inputs:{x:i},backend:n});const f=new mu(m,"avg",!1);return n.runWebGLProgram(f,[i],"float32")}const zX={kernelName:dl,backendName:"webgl",kernelFunc:PX};function VX(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o}=t,a=o;du([i,o],"avgPoolBackprop");const{filterSize:c,strides:h,pad:d}=s,m=Un(a.shape,c,h,1,d),f=new V5(m);return n.runWebGLProgram(f,[i],a.dtype)}const GX={kernelName:sd,backendName:"webgl",kernelFunc:VX};class YX{constructor(e,t,n,s,i,o){this.outputShape=[],this.variableNames=["x","mean","variance"],nt(e,t),nt(e,n);let a="0.0";s!=null&&(nt(e,s),this.variableNames.push("offset"),a="getOffsetAtOutCoords()");let c="1.0";i!=null&&(nt(e,i),this.variableNames.push("scale"),c="getScaleAtOutCoords()"),this.outputShape=e,this.userCode=` void main() { float x = getXAtOutCoords(); float mean = getMeanAtOutCoords(); @@ -3666,7 +3576,7 @@ return (round(mod(b, 2.0)) != 1) ? float inv = scale * inversesqrt(variance + float(${o})); setOutput(dot(vec3(x, -mean, offset), vec3(inv, inv, 1))); } - `}}class O6{constructor(e,t,n,s,i,o){this.packedInputs=!0,this.packedOutput=!0,this.variableNames=["x","mean","variance"],tt(e,t),tt(e,n);let a="vec4(0.0)";s!=null&&(tt(e,s),this.variableNames.push("offset"),a="getOffsetAtOutCoords()");let c="vec4(1.0)";i!=null&&(tt(e,i),this.variableNames.push("scale"),c="getScaleAtOutCoords()"),this.outputShape=e,this.userCode=` + `}}class HX{constructor(e,t,n,s,i,o){this.packedInputs=!0,this.packedOutput=!0,this.variableNames=["x","mean","variance"],nt(e,t),nt(e,n);let a="vec4(0.0)";s!=null&&(nt(e,s),this.variableNames.push("offset"),a="getOffsetAtOutCoords()");let c="vec4(1.0)";i!=null&&(nt(e,i),this.variableNames.push("scale"),c="getScaleAtOutCoords()"),this.outputShape=e,this.userCode=` void main() { vec4 offset = ${a}; vec4 scale = ${c}; @@ -3679,13 +3589,59 @@ return (round(mod(b, 2.0)) != 1) ? setOutput((x - mean) * inv + offset); } - `}}const E6=({inputs:e,backend:t,attrs:n})=>{const{x:s,mean:i,variance:o,offset:a,scale:c}=e;k(i.shape.length===o.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),k(a==null||i.shape.length===a.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),k(c==null||i.shape.length===c.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");let{varianceEpsilon:h}=n;h==null&&(h=.001);const d=[s,i,o];let m=null;a!=null&&(m=a.shape,d.push(a));let y=null;c!=null&&(y=c.shape,d.push(c));const b=C().getBool("WEBGL_PACK_NORMALIZATION")?new O6(s.shape,i.shape,o.shape,m,y,h):new R6(s.shape,i.shape,o.shape,m,y,h),w=t.runWebGLProgram(b,d,d[0].dtype);return w},D6={kernelName:ol,backendName:"webgl",kernelFunc:E6};const k6=yC+` + `}}const qX=({inputs:e,backend:t,attrs:n})=>{const{x:s,mean:i,variance:o,offset:a,scale:c}=e;A(i.shape.length===o.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),A(a==null||i.shape.length===a.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),A(c==null||i.shape.length===c.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");let{varianceEpsilon:h}=n;h==null&&(h=.001);const d=[s,i,o];let m=null;a!=null&&(m=a.shape,d.push(a));let f=null;c!=null&&(f=c.shape,d.push(c));const b=oe().getBool("WEBGL_PACK_NORMALIZATION")?new HX(s.shape,i.shape,o.shape,m,f,h):new YX(s.shape,i.shape,o.shape,m,f,h),w=t.runWebGLProgram(b,d,d[0].dtype);return w},jX={kernelName:Il,backendName:"webgl",kernelFunc:qX};const KX="return float(a != b);",BC=Sc({opSnippet:KX,dtype:"bool"}),XX={kernelName:Dl,backendName:"webgl",kernelFunc:BC};function vS(e){const{inputs:t,backend:n}=e,{input:s}=t,i=n.texData.get(s.dataId);return ur({inputs:{x:i.complexTensorInfos.real},backend:n})}const JX={kernelName:Td,backendName:"webgl",kernelFunc:vS};const ZX="return float(int(x));";function QX(e,t){const n=new st(e.shape,ZX),s=t.runWebGLProgram(n,[e],"int32");return{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}function NS(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{dtype:o}=s;if(o==="complex64"){if(i.dtype==="complex64")return ur({inputs:{x:i},backend:n});const a=dt(i.shape),c=NS({inputs:{x:i},backend:n,attrs:{dtype:"float32"}}),h=Lc({inputs:{real:c,imag:a},backend:n});return a.dispose(),n.disposeIntermediateTensorInfo(c),h}if(i.dtype==="complex64"){const a=vS({inputs:{input:i},backend:n}),c=NS({inputs:{x:a},backend:n,attrs:{dtype:o}});return n.disposeIntermediateTensorInfo(a),c}if(!ba(i.dtype,o)){const a=ur({inputs:{x:i},backend:n});return{dataId:a.dataId,shape:a.shape,dtype:o}}if(o==="int32")return QX(i,n);if(o==="bool"){const a=n.makeTensorInfo([],"bool",bt("bool",1)),c={a:i,b:a},h=BC({inputs:c,backend:n});return n.disposeIntermediateTensorInfo(a),h}throw new Error(`Error in Cast: failed to cast ${i.dtype} to ${o}`)}const e7={kernelName:Sa,backendName:"webgl",kernelFunc:NS};class t7{constructor(e){this.outputShape=[],this.outputShape=Xi(e,1),this.variableNames=e.map((o,a)=>`T${a}`);const t=new Array(e.length-1);t[0]=e[0][1];for(let o=1;o`T${x}`);const c=new Array(e.length-1);c[0]=e[0][t];for(let L=1;L= ${c[L-1]}) { + return getChannel( + getT${L}(${Um(a,h,x)}), + vec2(${Um(d,h,x)})); + }`}const b=c.length,w=c[c.length-1];f+=` + return getChannel( + getT${b}(${Um(a,h,w)}), + vec2(${Um(d,h,w)}));`,this.userCode=` + float getValue(${a.map(L=>"int "+L)}) { + ${f} + } + + void main() { + ${i} coords = getOutputCoords(); + vec4 result = vec4(getValue(${o}), 0., 0., 0.); + + ${o[s-1]} = ${o[s-1]} + 1; + if (${o[s-1]} < ${n[s-1]}) { + result.g = getValue(${o}); + } + + ${o[s-2]} = ${o[s-2]} + 1; + if (${o[s-2]} < ${n[s-2]}) { + result.a = getValue(${o}); + } + + ${o[s-1]} = ${o[s-1]} - 1; + if (${o[s-2]} < ${n[s-2]} && + ${o[s-1]} < ${n[s-1]}) { + result.b = getValue(${o}); + } + setOutput(result); + } + `}}function Um(e,t,n){const s=e.indexOf(t),i=e.map((o,a)=>a===s?`${o} - ${n}`:o);return i.join()}function MC(e){const{inputs:t,backend:n}=e,{input:s}=t,i=n.texData.get(s.dataId);return ur({inputs:{x:i.complexTensorInfos.imag},backend:n})}const s7={kernelName:gd,backendName:"webgl",kernelFunc:MC};function i7(e,t,n){const s=[mc(e.shape),...fc(e.shape)],i={dtype:e.dtype,shape:s,dataId:e.dataId},o=[mc(t),...fc(t)],a=new bC(o,s),c=!0,h=n.runWebGLProgram(a,[i],e.dtype,null,c);return{dataId:h.dataId,shape:t,dtype:h.dtype}}function dr(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{shape:o}=s,a=n,c=P(i.shape),h=Vt(o,c),d=P(h);A(c===d,()=>`The new shape (${h}) has ${d} elements and the old shape (${i.shape}) has ${c} elements. The new shape and old shape must have the same number of elements.`);const m=a.texData.get(i.dataId);return m.isPacked&&!Rm(i.shape,h)&&!(m.texture!==null&&Rm(m.shape,h))?i7(i,h,a):(a.incRef(i.dataId),{dataId:i.dataId,shape:h,dtype:i.dtype})}const r7={kernelName:_l,backendName:"webgl",kernelFunc:dr};function Ic(e,t,n){const s=e[0].dtype;if(s==="complex64"){const d=e.map(L=>vS({inputs:{input:L},backend:n})),m=e.map(L=>MC({inputs:{input:L},backend:n})),f=Ic(d,t,n),b=Ic(m,t,n),w=Lc({inputs:{real:f,imag:b},backend:n});return d.forEach(L=>n.disposeIntermediateTensorInfo(L)),m.forEach(L=>n.disposeIntermediateTensorInfo(L)),n.disposeIntermediateTensorInfo(f),n.disposeIntermediateTensorInfo(b),w}if(e.length>oe().getNumber("WEBGL_MAX_TEXTURES_IN_SHADER")){const d=Math.floor(e.length/2),m=Ic(e.slice(0,d),t,n),f=Ic(e.slice(d),t,n),b=Ic([m,f],t,n);return n.disposeIntermediateTensorInfo(m),n.disposeIntermediateTensorInfo(f),b}if(oe().getBool("WEBGL_PACK_ARRAY_OPERATIONS")&&e[0].shape.length>1){const d=new n7(e.map(m=>m.shape),t);return n.runWebGLProgram(d,e,s)}const i=Xi(e.map(d=>d.shape),t),o=e.map(d=>dr({inputs:{x:d},attrs:{shape:[-1,P(d.shape.slice(t))]},backend:n})),a=new t7(o.map(d=>d.shape)),c=n.runWebGLProgram(a,o,s);o.forEach(d=>n.disposeIntermediateTensorInfo(d));const h=dr({inputs:{x:c},attrs:{shape:i},backend:n});return n.disposeIntermediateTensorInfo(c),h}function o7(e){const{inputs:t,backend:n,attrs:s}=e,{axis:i}=s,o=qe(i,t[0].shape)[0],a=Xi(t.map(d=>d.shape),o);if(P(a)===0)return n.makeTensorInfo(a,t[0].dtype,[]);const c=t.filter(d=>P(d.shape)>0);if(c.length===1)return c[0];const h=c.map(d=>d.shape);return np(h,o),Ic(c,o,n)}const a7={kernelName:fl,backendName:"webgl",kernelFunc:o7};const c7=$C+` return cos(x); -`,F6=am(k6),_6={kernelName:da,backendName:"webgl",kernelFunc:F6};const W6=` +`,l7=$m(c7),h7={kernelName:Ia,backendName:"webgl",kernelFunc:l7};const u7=` if (a == b) { return 1.0; }; -return a / b;`,$6=` +return a / b;`,d7=` // vec4 one = vec4(equal(a, b)); // return one + (vec4(1.0) - one) * a / b; vec4 result = a / b; @@ -3703,7 +3659,40 @@ return a / b;`,$6=` } return result; -`,U6=iS(W6,$6,!0),B6={kernelName:pa,backendName:"webgl",kernelFunc:U6};class M6{constructor(e){this.variableNames=["Image"],this.outputShape=[];const t=e[2];this.outputShape=e,this.userCode=` +`,p7=Sc({opSnippet:u7,packedOpSnippet:d7,checkOutOfBounds:!0}),m7={kernelName:xa,backendName:"webgl",kernelFunc:p7};class PC{constructor(e,t,n){this.variableNames=["real","imag"];const s=t[1];this.outputShape=t;const i=n?`2.0 * ${Math.PI}`:`-2.0 * ${Math.PI}`,o=n?`${s}.0`:"1.0";let a;if(e==="real")a="return real * expR - imag * expI;";else if(e==="imag")a="return real * expI + imag * expR;";else throw new Error(`FFT component must be either "real" or "imag", got ${e}.`);this.userCode=` + const float exponentMultiplier = ${i}; + + float unaryOpComplex(float real, float expR, float imag, float expI) { + ${a} + } + + float mulMatDFT(int batch, int index) { + float indexRatio = float(index) / float(${s}); + float exponentMultiplierTimesIndexRatio = + exponentMultiplier * indexRatio; + + float result = 0.0; + + for (int i = 0; i < ${s}; i++) { + // x = (-2|2 * PI / N) * index * i; + float x = exponentMultiplierTimesIndexRatio * float(i); + float expR = cos(x); + float expI = sin(x); + float real = getReal(batch, i); + float imag = getImag(batch, i); + + result += + unaryOpComplex(real, expR, imag, expI) / ${o}; + } + + return result; + } + + void main() { + ivec2 coords = getOutputCoords(); + setOutput(mulMatDFT(coords[0], coords[1])); + } + `}}function zC(e,t,n){const s=n.texData.get(e.dataId),i=P(e.shape),o=e.shape[e.shape.length-1],a=i/o,c=dr({inputs:{x:e},backend:n,attrs:{shape:[a,o]}}),h=c.shape,d=new PC("real",h,t),m=new PC("imag",h,t),f=[{dataId:s.complexTensorInfos.real.dataId,dtype:s.complexTensorInfos.real.dtype,shape:h},{dataId:s.complexTensorInfos.imag.dataId,dtype:s.complexTensorInfos.imag.dtype,shape:h}],b=n.runWebGLProgram(d,f,"float32"),w=n.runWebGLProgram(m,f,"float32"),L=Lc({inputs:{real:b,imag:w},backend:n});n.disposeIntermediateTensorInfo(b),n.disposeIntermediateTensorInfo(w);const x=dr({inputs:{x:L},backend:n,attrs:{shape:e.shape}});return n.disposeIntermediateTensorInfo(x),x}function f7(e){const{inputs:t,backend:n}=e,{input:s}=t;return zC(s,!1,n)}const g7={kernelName:pd,backendName:"webgl",kernelFunc:f7};class y7{constructor(e){this.variableNames=["Image"],this.outputShape=[];const t=e[2];this.outputShape=e,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int x = coords[2]; @@ -3717,7 +3706,7 @@ return a / b;`,$6=` } setOutput(outputValue); } - `}}const P6={kernelName:Pu,backendName:"webgl",kernelFunc:({inputs:e,backend:t})=>{const{image:n}=e,s=t,i=new M6(n.shape),o=s.runWebGLProgram(i,[n],n.dtype);return o}};class z6{constructor(e){this.variableNames=["A"];const t=Wn(),[n,s]=e;this.outputShape=e,this.userCode=` + `}}const b7={kernelName:md,backendName:"webgl",kernelFunc:({inputs:e,backend:t})=>{const{image:n}=e,s=t,i=new y7(n.shape),o=s.runWebGLProgram(i,[n],n.dtype);return o}};class w7{constructor(e){this.variableNames=["A"];const t=Pn(),[n,s]=e;this.outputShape=e,this.userCode=` void main() { ivec3 coords = getOutputCoords(); int texR = coords[0]; @@ -3739,7 +3728,7 @@ return a / b;`,$6=` setOutput(floor(value * 255.0 + 0.5)); } - `}}class G6{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0;const t=Wn(),[n,s]=e;this.outputShape=e,this.userCode=` + `}}class L7{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0;const t=Pn(),[n,s]=e;this.outputShape=e,this.userCode=` void main() { ivec3 coords = getOutputCoords(); int texR = coords[0]; @@ -3773,12 +3762,65 @@ return a / b;`,$6=` ${t.output} = result; } - `}}const V6={kernelName:Qu,backendName:"webgl",kernelFunc:Y6};let hc;function Y6(e){const{inputs:t,backend:n,attrs:s}=e;let{pixels:i}=t;const{numChannels:o}=s,a=typeof HTMLVideoElement!="undefined"&&i instanceof HTMLVideoElement,c=typeof HTMLImageElement!="undefined"&&i instanceof HTMLImageElement,[h,d]=a?[i.videoWidth,i.videoHeight]:[i.width,i.height],m=[d,h],y=[d,h,o];(c||a)&&(hc==null&&(hc=document.createElement("canvas").getContext("2d")),hc.canvas.width=h,hc.canvas.height=d,hc.drawImage(i,0,0,h,d),i=hc.canvas);const b=n.makeTensorInfo(m,"int32");n.texData.get(b.dataId).usage=As.PIXELS,n.gpgpu.uploadPixelDataToTexture(n.getTexture(b.dataId),i);const w=C().getBool("WEBGL_PACK")?new G6(y):new z6(y),L=n.runWebGLProgram(w,[b],"int32");return n.disposeData(b.dataId),L}function H6(e){const t=[];for(;t.length===0||t[t.length-1].outSize!==1;){const n=t.length?t[t.length-1].outSize:e[1],s=Jl(n);t.push({inSize:n,windowSize:s,outSize:Math.ceil(n/s)})}return t}function q6(e,t,n,s){const i=H6(e.shape);let o=e;for(let a=0;a`The new shape (${h}) has ${d} elements and the old shape (${i.shape}) has ${c} elements. The new shape and old shape must have the same number of elements.`);const m=a.texData.get(i.dataId);return m.isPacked&&!Zp(i.shape,h)&&!(m.texture!==null&&Zp(m.shape,h))?j6(i,h,a):(a.incRef(i.dataId),{dataId:i.dataId,shape:h,dtype:i.dtype})}const K6={kernelName:yl,backendName:"webgl",kernelFunc:oS};function X6(e,t,n,s){const i=we(t),o=we(e.shape),a=o/i,c=oS({inputs:{x:e},attrs:{shape:[a,i]},backend:s}),h=q6(c,e.dtype,"max",s),d=oS({inputs:{x:h},attrs:{shape:n},backend:s});return s.disposeIntermediateTensorInfo(c),s.disposeIntermediateTensorInfo(h),d}class J6{constructor(e,t){this.variableNames=["A"];const n=new Array(e.length);for(let o=0;o0&&(d=` + if (inIdx < 0 || inIdx >= ${i}) { + return 0.0; + } + `),this.userCode=` + const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); + + float getValue(int batch, int inIdx) { + ${d} + return getX(batch, inIdx); + } + + void main() { + ivec2 coords = getOutputCoords(); + int batch = coords[0]; + int outIdx = coords[1]; + int inOffset = outIdx * ${n}; + + float sumValue = 0.0; + + for (int i = 0; i < ${a}; i += 4) { + int inIdx = inOffset + i; + vec4 values = vec4( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), + getValue(batch, inIdx + 2), + getValue(batch, inIdx + 3) + ); + + ${h} + } + + int inIdx = inOffset + ${a}; + if (${c===1}) { + vec4 values = vec4(getValue(batch, inIdx), 0.0, 0.0, 0.0); + + ${h} + } else if (${c===2}) { + vec4 values = vec4( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), 0.0, 0.0); + + ${h} + } else if (${c===3}) { + vec4 values = vec4( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), + getValue(batch, inIdx + 2), 0.0); + + ${h} + } + setOutput(sumValue); + } + `}}function A7(e){const t=[];for(;t.length===0||t[t.length-1].outSize!==1;){const n=t.length?t[t.length-1].outSize:e[1],s=uh(n);t.push({inSize:n,windowSize:s,outSize:Math.ceil(n/s)})}return t}function GC(e,t,n,s){const i=A7(e.shape);let o=e;for(let a=0;a6)throw Error(`Transpose for rank ${t} is not yet supported`);const n=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u","resRC.v"],s=new Array(t);for(let i=0;i6)throw Error(`Packed transpose for rank ${this.rank} is not yet supported.`);const s=Et(this.rank),i=v0("rc",this.rank),o=new Array(this.rank);for(let d=0;d6)throw Error(`Transpose for rank ${t} is not yet supported`);const n=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u","resRC.v"],s=new Array(t);for(let i=0;i6)throw Error(`Packed transpose for rank ${this.rank} is not yet supported.`);const s=Rt(this.rank),i=J0("rc",this.rank),o=new Array(this.rank);for(let d=0;d{const{x:s}=e,{reductionIndices:i,keepDims:o}=t,a=n,c=s.shape.length,h=ft(i,s.shape);let d=h;const m=kn(d,c),y=m!=null,b=a.shouldExecuteOnCPU([s]);let w=s;if(y){if(b){const E=a.texData.get(w.dataId),D=E.values,F=new Array(c);for(let $=0;$`Error in maxPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Fn(i.shape,o,a,d,c,h);if(m.filterWidth===1&&m.filterHeight===1&&ot(m.inShape,m.outShape))return rS({inputs:{x:i},backend:n});const y=new Zh(m,"max",!1);return n.runWebGLProgram(y,[i],i.dtype)}const nX={kernelName:ml,backendName:"webgl",kernelFunc:tX};function sX(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o,output:a}=t,c=o;Xh([o,a],"maxPoolBackprop");const{filterSize:h,strides:d,pad:m,dimRoundingMode:y}=s,b=Fn(c.shape,h,d,1,m,y),w=!0,L=new Zh(b,"max",w),T=n.runWebGLProgram(L,[c],c.dtype),A=new J5(b),N=n.runWebGLProgram(A,[i,T],c.dtype);return n.disposeIntermediateTensorInfo(T),N}const iX={kernelName:Gu,backendName:"webgl",kernelFunc:sX};function rX(e,t,n,s){let i=new Zh(n,"max",!1);const o=s.runWebGLProgram(i,[e],"float32");i=new Zh(n,"max",!0,!0,t);const a=s.runWebGLProgram(i,[e],"float32");return[o,a]}const oX={kernelName:Vu,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:i,strides:o,pad:a,includeBatchInIndex:c}=t,h=n;k(s.shape.length===4,()=>`Error in maxPool: input must be rank 4 but got rank ${s.shape.length}.`);const d=[1,1];k(rn(o,d),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${o} and dilations '${d}'`);const m=Fn(s.shape,i,o,d,a),[y,b]=rX(s,c,m,h);return[y,b]}};const aX={kernelName:Og,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Pa("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=n,h=t,d=h.readSync(s.dataId),m=h.readSync(i.dataId),y=o,b=a,w=c;return tp(d,m,y,b,w)}};const cX=np,lX={kernelName:Hu,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Pa("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,padToMaxOutputSize:h}=n,d=t,m=d.readSync(s.dataId),y=d.readSync(i.dataId),{selectedIndices:b,validOutputs:w}=cX(m,y,o,a,c,h);return[b,w]}};const hX=sp,uX={kernelName:qu,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Pa("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}=n,d=t,m=d.readSync(s.dataId),y=d.readSync(i.dataId),b=o,w=a,L=c,T=h,{selectedIndices:A,selectedScores:N}=hX(m,y,b,w,L,T);return[A,N]}};class dX{constructor(e,t,n,s){this.variableNames=["Image"],this.outputShape=[];const i=e[1],o=e[2],a=Math.sin(t).toFixed(3),c=Math.cos(t).toFixed(3);this.outputShape=e;const[h,d]=Cb(s,i,o),m=h.toFixed(3),y=d.toFixed(3);let b="";typeof n=="number"?b=`float outputValue = ${n.toFixed(2)};`:b=` + `}}function CS(e,t,n){const s=oe().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new R7(e.shape,t):new N7(e.shape,t);return n.runWebGLProgram(s,[e],e.dtype)}const O7={kernelName:Rl,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{reductionIndices:i,keepDims:o}=t,a=n,c=s.shape.length,h=qe(i,s.shape);let d=h;const m=Xn(d,c),f=m!=null,b=a.shouldExecuteOnCPU([s]);let w=s;if(f){if(b){const O=a.texData.get(w.dataId),E=O.values,k=new Array(c);for(let $=0;$`Error in maxPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Un(i.shape,o,a,d,c,h);if(m.filterWidth===1&&m.filterHeight===1&&ae(m.inShape,m.outShape))return ur({inputs:{x:i},backend:n});const f=new mu(m,"max",!1);return n.runWebGLProgram(f,[i],i.dtype)}const D7={kernelName:Ol,backendName:"webgl",kernelFunc:E7};function k7(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o,output:a}=t,c=o;du([o,a],"maxPoolBackprop");const{filterSize:h,strides:d,pad:m,dimRoundingMode:f}=s,b=Un(c.shape,h,d,1,m,f),w=!0,L=new mu(b,"max",w),x=n.runWebGLProgram(L,[c],c.dtype),v=new u6(b),N=n.runWebGLProgram(v,[i,x],c.dtype);return n.disposeIntermediateTensorInfo(x),N}const F7={kernelName:bd,backendName:"webgl",kernelFunc:k7};function _7(e,t,n,s){let i=new mu(n,"max",!1);const o=s.runWebGLProgram(i,[e],"float32");i=new mu(n,"max",!0,!0,t);const a=s.runWebGLProgram(i,[e],"float32");return[o,a]}const W7={kernelName:wd,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:i,strides:o,pad:a,includeBatchInIndex:c}=t,h=n;A(s.shape.length===4,()=>`Error in maxPool: input must be rank 4 but got rank ${s.shape.length}.`);const d=[1,1];A(cn(o,d),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${o} and dilations '${d}'`);const m=Un(s.shape,i,o,d,a),[f,b]=_7(s,c,m,h);return[f,b]}};function $7(e,t,n,s){const i=P(t),o=P(e.shape),a=o/i,c=dr({inputs:{x:e},attrs:{shape:[a,i]},backend:s}),h=GC(c,"float32","mean",s),d=dr({inputs:{x:h},attrs:{shape:n},backend:s});return s.disposeIntermediateTensorInfo(c),s.disposeIntermediateTensorInfo(h),d}const U7={kernelName:hy,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{keepDims:i,axis:o}=t,a=n,c=s.shape.length,h=qe(o,s.shape);let d=h;const m=Xn(d,c),f=m!=null,b=a.shouldExecuteOnCPU([s]),w=[];let L=s;if(f){if(b){const E=a.texData.get(L.dataId),k=E.values,F=new Array(c);for(let Y=0;Yd[0]+e[m]+d[1]);const s=e.length,i=Rt(s),o=t.map(d=>d[0]).join(","),a=t.map((d,m)=>d[0]+e[m]).join(","),c=["coords[0]","coords[1]","coords[2]","coords[3]"].slice(0,s),h=n==="reflect"?0:1;if(s===1){this.userCode=` + int start = ${o}; + int end = ${a}; + + void main() { + int outC = getOutputCoords(); + if (outC < start) { + outC = start * 2 - outC - ${h}; + } else if(outC >= end) { + outC = (end - 1) * 2 - outC + ${h}; + } + setOutput(getX(outC - start)); + } + `;return}this.userCode=` + ${i} start = ${i}(${o}); + ${i} end = ${i}(${a}); + + void main() { + ${i} outC = getOutputCoords(); + for (int i = 0; i < ${s}; i++) { + if (outC[i] < start[i]) { + outC[i] = start[i] * 2 - outC[i] - ${h}; + } else if(outC[i] >= end[i]) { + outC[i] = (end[i] - 1) * 2 - outC[i] + ${h}; + } + } + ${i} coords = outC - start; + setOutput(getX(${c})); + } + `}}class M7{constructor(e,t,n){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t.map((w,L)=>w[0]+e[L]+w[1]);const s=e.length,i=Rt(s),o=t.map(w=>w[0]).join(","),a=t.map((w,L)=>w[0]+e[L]).join(","),c=Mn("rc",s),h=Mn("source",s),d=`${c[s-1]} < ${this.outputShape[s-1]}`,m=s===1?"source":`vec2(${h.slice(-2).join()})`,f=n==="reflect"?0:1;let b="";if(s===1){const w=` + ${i} source = rc; + if (source < start) { + source = start * 2 - source - ${f}; + } else if (source >= end) { + source = (end - 1) * 2 - source + ${f}; + } + source -= start; + `;b=` + ${i} rc = outputLoc; + ${w} + result[0] = getChannel(getX(${h.join()}), ${m}); + ${c[s-1]} += 1; + if(${d}) { + ${w} + result[1] = getChannel(getX(${h.join()}), ${m}); + } + `}else{const w=` + ${i} source = rc; + ${i} lt = ${i}(lessThan(source, start)); + ${i} gte = ${i}(greaterThanEqual(source, end)); + ${i} orig = 1 - (lt + gte); + source = orig * source + + lt * (start * 2 - source - ${f}) + + gte * ((end - 1) * 2 - source + ${f}); + source -= start; + `;b=` + ${i} rc = outputLoc; + ${w} + result[0] = getChannel(getX(${h.join()}), ${m}); + ${c[s-1]} += 1; + if(${d}) { + ${w} + result[1] = getChannel(getX(${h.join()}), ${m}); + } + rc = outputLoc; + ${c[s-2]} += 1; + if(${c[s-2]} < ${this.outputShape[s-2]}) { + ${w} + result[2] = getChannel(getX(${h.join()}), ${m}); + ${c[s-1]} += 1; + if(${d}) { + ${w} + result[3] = getChannel(getX(${h.join()}), ${m}); + } + } + `}this.userCode=` + const ${i} start = ${i}(${o}); + const ${i} end = ${i}(${a}); + + void main() { + ${i} outputLoc = getOutputCoords(); + vec4 result = vec4(0.); + ${b} + setOutput(result); + } + `}}const P7=({inputs:e,backend:t,attrs:n})=>{const{x:s}=e,{paddings:i,mode:o}=n,a=oe().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new M7(s.shape,i,o):new B7(s.shape,i,o),c=t.runWebGLProgram(a,[s],s.dtype);return c},z7={kernelName:El,backendName:"webgl",kernelFunc:P7};const YC={REAL:"return areal * breal - aimag * bimag;",IMAG:"return areal * bimag + aimag * breal;"};class HC{constructor(e,t,n){this.variableNames=["AReal","AImag","BReal","BImag"],this.outputShape=nt(t,n),this.userCode=` + float binaryOpComplex( + float areal, float aimag, float breal, float bimag) { + ${e} + } + + void main() { + float areal = getARealAtOutCoords(); + float aimag = getAImagAtOutCoords(); + float breal = getBRealAtOutCoords(); + float bimag = getBImagAtOutCoords(); + setOutput(binaryOpComplex(areal, aimag, breal, bimag)); + } + `}}const qC="return a * b;";function V7(e){const{inputs:t,backend:n}=e,{a:s,b:i}=t,o=$n(s.dtype,i.dtype);if(s.dtype==="complex64"){const c=n.texData.get(s.dataId),h=n.texData.get(i.dataId),d=new HC(YC.REAL,s.shape,i.shape),m=new HC(YC.IMAG,s.shape,i.shape),f=[{dataId:c.complexTensorInfos.real.dataId,dtype:c.complexTensorInfos.real.dtype,shape:s.shape},{dataId:c.complexTensorInfos.imag.dataId,dtype:c.complexTensorInfos.imag.dtype,shape:s.shape},{dataId:h.complexTensorInfos.real.dataId,dtype:h.complexTensorInfos.real.dtype,shape:i.shape},{dataId:h.complexTensorInfos.imag.dataId,dtype:h.complexTensorInfos.imag.dtype,shape:i.shape}],b=n.runWebGLProgram(d,f,"float32"),w=n.runWebGLProgram(m,f,"float32"),L=Lc({inputs:{real:b,imag:w},backend:n});return n.disposeIntermediateTensorInfo(b),n.disposeIntermediateTensorInfo(w),L}if(n.shouldExecuteOnCPU([s,i])){const c=n.texData.get(s.dataId),h=n.texData.get(i.dataId),[d,m]=JK(s.shape,i.shape,c.values,h.values,o),f=n.makeTensorInfo(m,o),b=n.texData.get(f.dataId);return b.values=d,f}let a;return oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?a=new lr(qC,s.shape,i.shape):a=new _n(qC,s.shape,i.shape),n.runWebGLProgram(a,[s,i],o)}const G7={kernelName:Ta,backendName:"webgl",kernelFunc:V7};const Y7={kernelName:fy,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Za("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=n,h=t,d=h.readSync(s.dataId),m=h.readSync(i.dataId),f=o,b=a,w=c;return Dp(d,m,f,b,w)}};const H7=kp,q7={kernelName:Ld,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Za("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,padToMaxOutputSize:h}=n,d=t,m=d.readSync(s.dataId),f=d.readSync(i.dataId),{selectedIndices:b,validOutputs:w}=H7(m,f,o,a,c,h);return[b,w]}};const j7=Fp,K7={kernelName:Sd,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Za("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}=n,d=t,m=d.readSync(s.dataId),f=d.readSync(i.dataId),b=o,w=a,L=c,x=h,{selectedIndices:v,selectedScores:N}=j7(m,f,b,w,L,x);return[v,N]}};class X7{constructor(e,t,n,s){this.variableNames=["Image"],this.outputShape=[];const i=e[1],o=e[2],a=Math.sin(t).toFixed(3),c=Math.cos(t).toFixed(3);this.outputShape=e;const[h,d]=Jb(s,i,o),m=h.toFixed(3),f=d.toFixed(3);let b="";typeof n=="number"?b=`float outputValue = ${n.toFixed(2)};`:b=` vec3 fill = vec3(${n.join(",")}); float outputValue = fill[coords[3]];`,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int x = coords[2]; int y = coords[1]; - float coordXFloat = (float(x) - ${m}) * ${c} - (float(y) - ${y}) * ${a}; - float coordYFloat = (float(x) - ${m}) * ${a} + (float(y) - ${y}) * ${c}; + float coordXFloat = (float(x) - ${m}) * ${c} - (float(y) - ${f}) * ${a}; + float coordYFloat = (float(x) - ${m}) * ${a} + (float(y) - ${f}) * ${c}; int coordX = int(round(coordXFloat + ${m})); - int coordY = int(round(coordYFloat + ${y})); + int coordY = int(round(coordYFloat + ${f})); ${b} if(coordX >= 0 && coordX < ${o} && coordY >= 0 && coordY < ${i}) { outputValue = getImage(coords[0], coordY, coordX, coords[3]); } setOutput(outputValue); } - `}}const pX={kernelName:ed,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,{radians:i,fillValue:o,center:a}=t,c=n,h=new dX(s.shape,i,o,a),d=c.runWebGLProgram(h,[s],s.dtype);return d}};const mX=yC+` + `}}const J7={kernelName:Od,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,{radians:i,fillValue:o,center:a}=t,c=n,h=new X7(s.shape,i,o,a),d=c.runWebGLProgram(h,[s],s.dtype);return d}};const Z7=$C+` return sin(x); -`,fX=am(mX),gX={kernelName:ma,backendName:"webgl",kernelFunc:fX};const yX="return x * x;",bX=am(yX),wX={kernelName:Ju,backendName:"webgl",kernelFunc:bX};const wC="return (a - b) * (a - b);",LX=iS(wC,wC),SX={kernelName:fa,backendName:"webgl",kernelFunc:LX};const IX="return tan(x);",xX=am(IX),TX={kernelName:ga,backendName:"webgl",kernelFunc:xX};const AX={kernelName:Cl,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{perm:i}=t,o=n,a=s.shape.length,c=new Array(a);for(let d=0;d{hm($X,{isNodejs:()=>UX});function UX(){return typeof global=="object"&&!0&&typeof TC!="undefined"&&typeof process!="undefined"&&!!process.version}});function cr(r,l,u=!1){if(r.beginPath(),l.slice(1).forEach(({x:p,y:g},f)=>{const I=l[f];r.moveTo(I.x,I.y),r.lineTo(p,g)}),u){const p=l[l.length-1],g=l[0];if(!p||!g)return;r.moveTo(p.x,p.y),r.lineTo(g.x,g.y)}r.stroke()}class us{constructor(r,l){if(!ui(r)||!ui(l))throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({width:r,height:l})}`);this._width=r,this._height=l}get width(){return this._width}get height(){return this._height}reverse(){return new us(1/this.width,1/this.height)}}const hS={};hm(hS,{computeReshapedDimensions:()=>pS,getCenterPoint:()=>Go,isDimensions:()=>pm,isEven:()=>dm,isFloat:()=>dS,isTensor:()=>Po,isTensor1D:()=>BX,isTensor2D:()=>uS,isTensor3D:()=>lr,isTensor4D:()=>Ns,isValidNumber:()=>ui,isValidProbablitiy:()=>fc,range:()=>Wi,round:()=>zo});const vC=Ke(Xe());function Po(r,l){return r instanceof vC.Tensor&&r.shape.length===l}function BX(r){return Po(r,1)}function uS(r){return Po(r,2)}function lr(r){return Po(r,3)}function Ns(r){return Po(r,4)}function dS(r){return r%1!==0}function dm(r){return r%2===0}function zo(r,l=2){const u=Math.pow(10,l);return Math.floor(r*u)/u}function pm(r){return r&&r.width&&r.height}function pS({width:r,height:l},u){const p=u/Math.max(l,r);return new us(Math.round(r*p),Math.round(l*p))}function Go(r){return r.reduce((l,u)=>l.add(u),new Je(0,0)).div(new Je(r.length,r.length))}function Wi(r,l,u){return Array(r).fill(0).map((p,g)=>l+g*u)}function ui(r){return!!r&&r!==Infinity&&r!==-Infinity&&!isNaN(r)||r===0}function fc(r){return ui(r)&&0<=r&&r<=1}class Je{constructor(r,l){this._x=r,this._y=l}get x(){return this._x}get y(){return this._y}add(r){return new Je(this.x+r.x,this.y+r.y)}sub(r){return new Je(this.x-r.x,this.y-r.y)}mul(r){return new Je(this.x*r.x,this.y*r.y)}div(r){return new Je(this.x/r.x,this.y/r.y)}abs(){return new Je(Math.abs(this.x),Math.abs(this.y))}magnitude(){return Math.sqrt(Math.pow(this.x,2)+Math.pow(this.y,2))}floor(){return new Je(Math.floor(this.x),Math.floor(this.y))}}class Ct{static isRect(r){return!!r&&[r.x,r.y,r.width,r.height].every(ui)}static assertIsValidBox(r,l,u=!1){if(!Ct.isRect(r))throw new Error(`${l} - invalid box: ${JSON.stringify(r)}, expected object with properties x, y, width, height`);if(!u&&(r.width<0||r.height<0))throw new Error(`${l} - width (${r.width}) and height (${r.height}) must be positive numbers`)}constructor(r,l=!0){const u=r||{},p=[u.left,u.top,u.right,u.bottom].every(ui),g=[u.x,u.y,u.width,u.height].every(ui);if(!g&&!p)throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(u)}`);const[f,I,S,x]=g?[u.x,u.y,u.width,u.height]:[u.left,u.top,u.right-u.left,u.bottom-u.top];Ct.assertIsValidBox({x:f,y:I,width:S,height:x},"Box.constructor",l),this._x=f,this._y=I,this._width=S,this._height=x}get x(){return this._x}get y(){return this._y}get width(){return this._width}get height(){return this._height}get left(){return this.x}get top(){return this.y}get right(){return this.x+this.width}get bottom(){return this.y+this.height}get area(){return this.width*this.height}get topLeft(){return new Je(this.left,this.top)}get topRight(){return new Je(this.right,this.top)}get bottomLeft(){return new Je(this.left,this.bottom)}get bottomRight(){return new Je(this.right,this.bottom)}round(){const[r,l,u,p]=[this.x,this.y,this.width,this.height].map(g=>Math.round(g));return new Ct({x:r,y:l,width:u,height:p})}floor(){const[r,l,u,p]=[this.x,this.y,this.width,this.height].map(g=>Math.floor(g));return new Ct({x:r,y:l,width:u,height:p})}toSquare(){let{x:r,y:l,width:u,height:p}=this;const g=Math.abs(u-p);return ul&&(I=-O+l+u,O=l),C>r&&(S=-C+r+p,C=r),x<1&&(S=2-x,x=1),v<1&&(S=2-v,v=1),{dy:f,edy:S,dx:g,edx:I,y:v,ey:C,x,ex:O,w:u,h:p}}calibrate(r){return new Ct({left:this.left+r.left*this.width,top:this.top+r.top*this.height,right:this.right+r.right*this.width,bottom:this.bottom+r.bottom*this.height}).toSquare().round()}}class tu extends Ct{constructor(r,l,u,p,g=!1){super({left:r,top:l,right:u,bottom:p},g)}}class gc{constructor(r,l,u,p,g){this._imageDims=new us(g.width,g.height),this._score=r,this._classScore=l,this._className=u,this._box=new Ct(p).rescale(this._imageDims)}get score(){return this._score}get classScore(){return this._classScore}get className(){return this._className}get box(){return this._box}get imageDims(){return this._imageDims}get imageWidth(){return this.imageDims.width}get imageHeight(){return this.imageDims.height}get relativeBox(){return new Ct(this._box).rescale(this.imageDims.reverse())}forSize(r,l){return new gc(this.score,this.classScore,this.className,this.relativeBox,{width:r,height:l})}}class Ht extends gc{constructor(r,l,u){super(r,r,"",l,u)}forSize(r,l){const{score:u,relativeBox:p,imageDims:g}=super.forSize(r,l);return new Ht(u,p,g)}}function mS(r,l,u=!0){const p=Math.max(0,Math.min(r.right,l.right)-Math.max(r.left,l.left)),g=Math.max(0,Math.min(r.bottom,l.bottom)-Math.max(r.top,l.top)),f=p*g;return u?f/(r.area+l.area-f):f/Math.min(r.area,l.area)}function fS(r){const l=r.map(S=>S.x),u=r.map(S=>S.y),p=l.reduce((S,x)=>xxSS({score:I,boxIndex:S})).sort((I,S)=>I.score-S.score).map(I=>I.boxIndex);const f=[];for(;g.length>0;){const I=g.pop();f.push(I);const S=g,x=[];for(let v=0;vx[O]<=u)}return f}const $i=Ke(Xe());function di(r,l){return $i.tidy(()=>{const[u,p,g]=l,f=$i.fill([...r.shape.slice(0,3),1],u,"float32"),I=$i.fill([...r.shape.slice(0,3),1],p,"float32"),S=$i.fill([...r.shape.slice(0,3),1],g,"float32"),x=$i.concat([f,I,S],3);return $i.sub(r,x)})}const jr=Ke(Xe());function yS(r,l=!1){return jr.tidy(()=>{const[u,p]=r.shape.slice(1);if(u===p)return r;const g=Math.abs(u-p),f=Math.round(g*(l?.5:1)),I=u>p?2:1,S=U=>{const G=r.shape.slice();return G[I]=U,jr.fill(G,0,"float32")},x=S(f),v=g-x.shape[I],O=l&&v?S(v):null,C=[O,r,x].filter(U=>!!U).map(U=>jr.cast(U,"float32"));return jr.concat(C,I)})}function MX(r){const l=r.slice();for(let u=l.length-1;u>0;u--){const p=Math.floor(Math.random()*(u+1)),g=l[u];l[u]=l[p],l[p]=g}return l}function nu(r){return 1/(1+Math.exp(-r))}function PX(r){return Math.log(r/(1-r))}class su extends Ct{constructor(r,l,u,p,g=!1){super({x:r,y:l,width:u,height:p},g)}}const zX=.5,GX=.43,VX=.45;class Gs{constructor(r,l,u=new Je(0,0)){const{width:p,height:g}=l;this._imgDims=new us(p,g),this._shift=u,this._positions=r.map(f=>f.mul(new Je(p,g)).add(u))}get shift(){return new Je(this._shift.x,this._shift.y)}get imageWidth(){return this._imgDims.width}get imageHeight(){return this._imgDims.height}get positions(){return this._positions}get relativePositions(){return this._positions.map(r=>r.sub(this._shift).div(new Je(this.imageWidth,this.imageHeight)))}forSize(r,l){return new this.constructor(this.relativePositions,{width:r,height:l})}shiftBy(r,l){return new this.constructor(this.relativePositions,this._imgDims,new Je(r,l))}shiftByPoint(r){return this.shiftBy(r.x,r.y)}align(r,l={}){if(r){const g=r instanceof Ht?r.box.floor():new Ct(r);return this.shiftBy(g.x,g.y).align(null,l)}const{useDlibAlignment:u,minBoxPadding:p}=Object.assign({},{useDlibAlignment:!1,minBoxPadding:.2},l);return u?this.alignDlib():this.alignMinBbox(p)}alignDlib(){const r=this.getRefPointsForAlignment(),[l,u,p]=r,g=O=>p.sub(O).magnitude(),f=(g(l)+g(u))/2,I=Math.floor(f/VX),S=Go(r),x=Math.floor(Math.max(0,S.x-zX*I)),v=Math.floor(Math.max(0,S.y-GX*I));return new su(x,v,Math.min(I,this.imageWidth+x),Math.min(I,this.imageHeight+v))}alignMinBbox(r){const l=fS(this.positions);return l.pad(l.width*r,l.height*r)}getRefPointsForAlignment(){throw new Error("getRefPointsForAlignment not implemented by base class")}}class YX extends Gs{getRefPointsForAlignment(){const r=this.positions;return[r[0],r[1],Go([r[3],r[4]])]}}class iu extends Gs{getJawOutline(){return this.positions.slice(0,17)}getLeftEyeBrow(){return this.positions.slice(17,22)}getRightEyeBrow(){return this.positions.slice(22,27)}getNose(){return this.positions.slice(27,36)}getLeftEye(){return this.positions.slice(36,42)}getRightEye(){return this.positions.slice(42,48)}getMouth(){return this.positions.slice(48,68)}getRefPointsForAlignment(){return[this.getLeftEye(),this.getRightEye(),this.getMouth()].map(Go)}}class mm{constructor(r,l){this._label=r,this._distance=l}get label(){return this._label}get distance(){return this._distance}toString(r=!0){return`${this.label}${r?` (${zo(this.distance)})`:""}`}}class fm extends Ct{static assertIsValidLabeledBox(r,l){if(Ct.assertIsValidBox(r,l),!ui(r.label))throw new Error(`${l} - expected property label (${r.label}) to be a number`)}constructor(r,l){super(r);this._label=l}get label(){return this._label}}class Vo{constructor(r,l){if(!(typeof r=="string"))throw new Error("LabeledFaceDescriptors - constructor expected label to be a string");if(!Array.isArray(l)||l.some(u=>!(u instanceof Float32Array)))throw new Error("LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array");this._label=r,this._descriptors=l}get label(){return this._label}get descriptors(){return this._descriptors}toJSON(){return{label:this.label,descriptors:this.descriptors.map(r=>Array.from(r))}}static fromJSON(r){const l=r.descriptors.map(u=>new Float32Array(u));return new Vo(r.label,l)}}class HX extends fm{static assertIsValidPredictedBox(r,l){if(fm.assertIsValidLabeledBox(r,l),!fc(r.score)||!fc(r.classScore))throw new Error(`${l} - expected properties score (${r.score}) and (${r.classScore}) to be a number between [0, 1]`)}constructor(r,l,u,p){super(r,l);this._score=u,this._classScore=p}get score(){return this._score}get classScore(){return this._classScore}}function Ui(r){return r.detection instanceof Ht}function Yo(r,l){const u={detection:l};return Object.assign({},r,u)}function bS(){const r=window.fetch||function(){throw new Error("fetch - missing fetch implementation for browser environment")},l=function(){throw new Error("readFile - filesystem not available for browser environment")};return{Canvas:HTMLCanvasElement,CanvasRenderingContext2D,Image:HTMLImageElement,ImageData,Video:HTMLVideoElement,createCanvasElement:()=>document.createElement("canvas"),createImageElement:()=>document.createElement("img"),fetch:r,readFile:l}}function gm(r){let l="";if(!r)try{r=require("fs")}catch(p){l=p.toString()}const u=r?function(p){return new Promise((g,f)=>{r.readFile(p,function(I,S){return I?f(I):g(S)})})}:function(){throw new Error(`readFile - failed to require fs in nodejs environment with error: ${l}`)};return{readFile:u}}function wS(){const r=global.Canvas||global.HTMLCanvasElement,l=global.Image||global.HTMLImageElement,u=function(){if(r)return new r;throw new Error("createCanvasElement - missing Canvas implementation for nodejs environment")},p=function(){if(l)return new l;throw new Error("createImageElement - missing Image implementation for nodejs environment")},g=global.fetch||function(){throw new Error("fetch - missing fetch implementation for nodejs environment")},f=gm();return{Canvas:r||class{},CanvasRenderingContext2D:global.CanvasRenderingContext2D||class{},Image:l||class{},ImageData:global.ImageData||class{},Video:global.HTMLVideoElement||class{},createCanvasElement:u,createImageElement:p,fetch:g,...f}}function LS(){return typeof window=="object"&&typeof document!="undefined"&&typeof HTMLImageElement!="undefined"&&typeof HTMLCanvasElement!="undefined"&&typeof HTMLVideoElement!="undefined"&&typeof ImageData!="undefined"&&typeof CanvasRenderingContext2D!="undefined"}const SS=Ke(AC());let gn;function qX(){if(!gn)throw new Error("getEnv - environment is not defined, check isNodejs() and isBrowser()");return gn}function IS(r){gn=r}function xS(){if(LS())return IS(bS());if(SS.isNodejs())return IS(wS())}function jX(r){if(gn||xS(),!gn)throw new Error("monkeyPatch - environment is not defined, check isNodejs() and isBrowser()");const{Canvas:l=gn.Canvas,Image:u=gn.Image}=r;gn.Canvas=l,gn.Image=u,gn.createCanvasElement=r.createCanvasElement||(()=>new l),gn.createImageElement=r.createImageElement||(()=>new u),gn.ImageData=r.ImageData||gn.ImageData,gn.Video=r.Video||gn.Video,gn.fetch=r.fetch||gn.fetch,gn.readFile=r.readFile||gn.readFile}const gt={getEnv:qX,setEnv:IS,initialize:xS,createBrowserEnv:bS,createFileSystem:gm,createNodejsEnv:wS,monkeyPatch:jX,isBrowser:LS,isNodejs:SS.isNodejs};xS();function Ho(r){return!gt.isNodejs()&&typeof r=="string"?document.getElementById(r):r}function Jn(r){const{Canvas:l,CanvasRenderingContext2D:u}=gt.getEnv();if(r instanceof u)return r;const p=Ho(r);if(!(p instanceof l))throw new Error("resolveContext2d - expected canvas to be of instance of Canvas");const g=p.getContext("2d");if(!g)throw new Error("resolveContext2d - canvas 2d context is null");return g}var Bi;(function(r){r.TOP_LEFT="TOP_LEFT",r.TOP_RIGHT="TOP_RIGHT",r.BOTTOM_LEFT="BOTTOM_LEFT",r.BOTTOM_RIGHT="BOTTOM_RIGHT"})(Bi||(Bi={}));class ym{constructor(r={}){const{anchorPosition:l,backgroundColor:u,fontColor:p,fontSize:g,fontStyle:f,padding:I}=r;this.anchorPosition=l||Bi.TOP_LEFT,this.backgroundColor=u||"rgba(0, 0, 0, 0.5)",this.fontColor=p||"rgba(255, 255, 255, 1)",this.fontSize=g||14,this.fontStyle=f||"Georgia",this.padding=I||4}}class yc{constructor(r,l,u={}){this.text=typeof r=="string"?[r]:r instanceof yc?r.text:r,this.anchor=l,this.options=new ym(u)}measureWidth(r){const{padding:l}=this.options;return this.text.map(u=>r.measureText(u).width).reduce((u,p)=>u{const G=S+O.x,ne=S+O.y+(U+1)*f;u.fillText(C,G,ne)})}}class NC{constructor(r={}){const{boxColor:l,lineWidth:u,label:p,drawLabelOptions:g}=r;this.boxColor=l||"rgba(0, 0, 255, 1)",this.lineWidth=u||2,this.label=p;const f={anchorPosition:Bi.BOTTOM_LEFT,backgroundColor:this.boxColor};this.drawLabelOptions=new ym(Object.assign({},f,g))}}class TS{constructor(r,l={}){this.box=new Ct(r),this.options=new NC(l)}draw(r){const l=Jn(r),{boxColor:u,lineWidth:p}=this.options,{x:g,y:f,width:I,height:S}=this.box;l.strokeStyle=u,l.lineWidth=p,l.strokeRect(g,f,I,S);const{label:x}=this.options;x&&new yc([x],{x:g-p/2,y:f},this.options.drawLabelOptions).draw(r)}}function KX(r,l){const u=Array.isArray(l)?l:[l];u.forEach(p=>{const g=p instanceof Ht?p.score:Ui(p)?p.detection.score:void 0,f=p instanceof Ht?p.box:Ui(p)?p.detection.box:new Ct(p),I=g?`${zo(g)}`:void 0;new TS(f,{label:I}).draw(r)})}function ru(r){const{Image:l,Video:u}=gt.getEnv();return r instanceof l&&r.complete||r instanceof u&&r.readyState>=3}function AS(r){return new Promise((l,u)=>{if(r instanceof gt.getEnv().Canvas||ru(r))return l(null);function p(f){if(!f.currentTarget)return;f.currentTarget.removeEventListener("load",p),f.currentTarget.removeEventListener("error",g),l(f)}function g(f){if(!f.currentTarget)return;f.currentTarget.removeEventListener("load",p),f.currentTarget.removeEventListener("error",g),u(f)}r.addEventListener("load",p),r.addEventListener("error",g)})}function vS(r){return new Promise((l,u)=>{if(!(r instanceof Blob))return u("bufferToImage - expected buf to be of type: Blob");const p=new FileReader;p.onload=()=>{if(typeof p.result!="string")return u("bufferToImage - expected reader.result to be a string, in onload");const g=gt.getEnv().createImageElement();g.onload=()=>l(g),g.onerror=u,g.src=p.result},p.onerror=u,p.readAsDataURL(r)})}function qo(r){const{Image:l,Video:u}=gt.getEnv();return r instanceof l?new us(r.naturalWidth,r.naturalHeight):r instanceof u?new us(r.videoWidth,r.videoHeight):new us(r.width,r.height)}function bc({width:r,height:l}){const{createCanvasElement:u}=gt.getEnv(),p=u();return p.width=r,p.height=l,p}function ou(r,l){const{ImageData:u}=gt.getEnv();if(!(r instanceof u)&&!ru(r))throw new Error("createCanvasFromMedia - media has not finished loading yet");const{width:p,height:g}=l||qo(r),f=bc({width:p,height:g});return r instanceof u?Jn(f).putImageData(r,0,0):Jn(f).drawImage(r,0,0,p,g),f}const bm=Ke(Xe());async function NS(r,l){const u=l||gt.getEnv().createCanvasElement(),[p,g,f]=r.shape.slice(Ns(r)?1:0),I=bm.tidy(()=>r.as3D(p,g,f).toInt());return await bm.browser.toPixels(I,u),I.dispose(),u}function wm(r){const{Image:l,Canvas:u,Video:p}=gt.getEnv();return r instanceof l||r instanceof u||r instanceof p}const XX=1e-7,JX=1e-4;class CC{time(r){return se("time")}read(r){return se("read")}readSync(r){return se("readSync")}numDataIds(){return se("numDataIds")}disposeData(r){return se("disposeData")}write(r,l,u){return se("write")}move(r,l,u,p){return se("move")}memory(){return se("memory")}floatPrecision(){return se("floatPrecision")}epsilon(){return this.floatPrecision()===32?XX:JX}batchMatMul(r,l,u,p){return se("batchMatMul")}fusedBatchMatMul({a:r,b:l,transposeA:u,transposeB:p,bias:g,activation:f,preluActivationWeights:I}){return se("fusedBatchMatMul")}slice(r,l,u){return se("slice")}stridedSlice(r,l,u,p){return se("stridedSlice")}unstack(r,l){return se("unstack")}reverse(r,l){return se("reverse")}concat(r,l){return se("concat")}neg(r){return se("neg")}add(r,l){return se("add")}addN(r){return se("addN")}subtract(r,l){return se("subtract")}multiply(r,l){return se("multiply")}realDivide(r,l){return se("realDivide")}floorDiv(r,l){return se("floorDiv")}sum(r,l){return se("sum")}prod(r,l){return se("prod")}unsortedSegmentSum(r,l,u){return se("unsortedSegmentSum")}argMin(r,l){return se("argMin")}argMax(r,l){return se("argMax")}equal(r,l){return se("equal")}notEqual(r,l){return se("notEqual")}less(r,l){return se("less")}lessEqual(r,l){return se("lessEqual")}greater(r,l){return se("greater")}greaterEqual(r,l){return se("greaterEqual")}logicalNot(r){return se("logicalNot")}logicalAnd(r,l){return se("logicalAnd")}logicalOr(r,l){return se("logicalOr")}where(r){return se("where")}select(r,l,u){return se("select")}topk(r,l,u){return se("topk")}min(r,l){return se("min")}minimum(r,l){return se("minimum")}mod(r,l){return se("mod")}max(r,l){return se("max")}maximum(r,l){return se("maximum")}all(r,l){return se("all")}any(r,l){return se("any")}squaredDifference(r,l){return se("squaredDifference")}ceil(r){return se("ceil")}floor(r){return se("floor")}round(r){return se("round")}sign(r){return se("sign")}isNaN(r){return se("isNaN")}isInf(r){return se("isInf")}isFinite(r){return se("isFinite")}pow(r,l){return se("pow")}exp(r){return se("exp")}expm1(r){return se("expm1")}softmax(r,l){return se("softmax")}log(r){return se("log")}log1p(r){return se("log1p")}sqrt(r){return se("sqrt")}rsqrt(r){return se("rsqrt")}square(r){return se("square")}reciprocal(r){return se("reciprocal")}relu(r){return se("relu")}relu6(r){return se("relu6")}prelu(r,l){return se("prelu")}elu(r){return se("elu")}eluDer(r,l){return se("eluDer")}selu(r){return se("selu")}int(r){return se("int")}clip(r,l,u){return se("clip")}abs(r){return se("abs")}complexAbs(r){return se("complexAbs")}sigmoid(r){return se("sigmoid")}softplus(r){return se("softplus")}sin(r){return se("sin")}cos(r){return se("cos")}tan(r){return se("tan")}asin(r){return se("asin")}acos(r){return se("acos")}atan(r){return se("atan")}atan2(r,l){return se("atan2")}sinh(r){return se("sinh")}cosh(r){return se("cosh")}tanh(r){return se("tanh")}asinh(r){return se("asinh")}acosh(r){return se("acosh")}atanh(r){return se("atanh")}erf(r){return se("erf")}step(r,l){return se("step")}fusedConv2d({input:r,filter:l,convInfo:u,bias:p,activation:g,preluActivationWeights:f}){return se("fusedConv2d")}conv2d(r,l,u){return se("conv2d")}conv2dDerInput(r,l,u){return se("conv2dDerInput")}conv2dDerFilter(r,l,u){return se("conv2dDerFilter")}fusedDepthwiseConv2D({input:r,filter:l,convInfo:u,bias:p,activation:g,preluActivationWeights:f}){return se("fusedDepthwiseConv2D")}depthwiseConv2D(r,l,u){return se("depthwiseConv2D")}depthwiseConv2DDerInput(r,l,u){return se("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(r,l,u){return se("depthwiseConv2DDerFilter")}conv3d(r,l,u){return se("conv3d")}conv3dDerInput(r,l,u){return se("conv3dDerInput")}conv3dDerFilter(r,l,u){return se("conv3dDerFilter")}maxPool(r,l){return se("maxPool")}maxPoolBackprop(r,l,u,p){return se("maxPoolBackprop")}avgPool(r,l){return se("avgPool")}avgPoolBackprop(r,l,u){return se("avgPoolBackprop")}avgPool3d(r,l){return se("avgPool3d")}avgPool3dBackprop(r,l,u){return se("avgPool3dBackprop")}maxPool3d(r,l){return se("maxPool3d")}maxPool3dBackprop(r,l,u,p){return se("maxPool3dBackprop")}reshape(r,l){return se("reshape")}cast(r,l){return se("cast")}tile(r,l){return se("tile")}pad(r,l,u){return se("pad")}transpose(r,l){return se("transpose")}gather(r,l,u){return se("gather")}gatherND(r,l){return se("gatherND")}scatterND(r,l,u){return se("scatterND")}batchToSpaceND(r,l,u){return se("batchToSpaceND")}spaceToBatchND(r,l,u){return se("spaceToBatchND")}resizeBilinear(r,l,u,p){return se("resizeBilinear")}resizeBilinearBackprop(r,l,u){return se("resizeBilinearBackprop")}resizeNearestNeighbor(r,l,u,p){return se("resizeNearestNeighbor")}resizeNearestNeighborBackprop(r,l,u){return se("resizeNearestNeighborBackprop")}batchNorm(r,l,u,p,g,f){return se("batchNorm")}localResponseNormalization4D(r,l,u,p,g){return se("localResponseNormalization4D")}LRNGrad(r,l,u,p,g,f,I){return se("LRNGrad")}multinomial(r,l,u,p){return se("multinomial")}oneHot(r,l,u,p){return se("oneHot")}cumsum(r,l,u,p){return se("cumsum")}nonMaxSuppression(r,l,u,p,g){return se("nonMaxSuppression")}fft(r){return se("fft")}ifft(r){return se("ifft")}complex(r,l){return se("complex")}real(r){return se("real")}imag(r){return se("imag")}cropAndResize(r,l,u,p,g,f){return se("cropAndResize")}depthToSpace(r,l,u){return se("depthToSpace")}split(r,l,u){return se("split")}sparseToDense(r,l,u,p){return se("sparseToDense")}diag(r){return se("diag")}fill(r,l,u){return se("fill")}onesLike(r){return se("onesLike")}zerosLike(r){return se("zerosLike")}linspace(r,l,u){return se("linspace")}dispose(){return se("dispose")}}function se(r){throw new Error(`'${r}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}const RC="tfjsflags";class OC{constructor(r){this.global=r,this.flags={},this.flagRegistry={},this.urlFlags={},this.populateURLFlags()}setPlatform(r,l){this.platform!=null&&console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${l}.`),this.platformName=r,this.platform=l}registerFlag(r,l,u){if(this.flagRegistry[r]={evaluationFn:l,setHook:u},this.urlFlags[r]!=null){const p=this.urlFlags[r];console.warn(`Setting feature override from URL ${r}: ${p}.`),this.set(r,p)}}async getAsync(r){return r in this.flags?this.flags[r]:(this.flags[r]=await this.evaluateFlag(r),this.flags[r])}get(r){if(r in this.flags)return this.flags[r];const l=this.evaluateFlag(r);if(l instanceof Promise)throw new Error(`Flag ${r} cannot be synchronously evaluated. Please use getAsync() instead.`);return this.flags[r]=l,this.flags[r]}getNumber(r){return this.get(r)}getBool(r){return this.get(r)}getFlags(){return this.flags}get features(){return this.flags}set(r,l){if(this.flagRegistry[r]==null)throw new Error(`Cannot set flag ${r} as it has not been registered.`);this.flags[r]=l,this.flagRegistry[r].setHook!=null&&this.flagRegistry[r].setHook(l)}evaluateFlag(r){if(this.flagRegistry[r]==null)throw new Error(`Cannot evaluate flag '${r}': no evaluation function found.`);return this.flagRegistry[r].evaluationFn()}setFlags(r){this.flags=Object.assign({},r)}reset(){this.flags={},this.urlFlags={},this.populateURLFlags()}populateURLFlags(){if(typeof this.global=="undefined"||typeof this.global.location=="undefined"||typeof this.global.location.search=="undefined")return;const r=ZX(this.global.location.search);if(RC in r){const l=r[RC].split(",");l.forEach(u=>{const[p,g]=u.split(":");this.urlFlags[p]=QX(p,g)})}}}function ZX(r){const l={};return r.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(u,...p)=>(e7(l,p[0],p[1]),p.join("="))),l}function e7(r,l,u){r[decodeURIComponent(l)]=decodeURIComponent(u||"")}function QX(r,l){if(l=l.toLowerCase(),l==="true"||l==="false")return l==="true";if(`${+l}`===l)return+l;throw new Error(`Could not parse value flag value ${l} for flag ${r}.`)}function Cs(){return EC}let EC=null;function DC(r){EC=r}let CS;function RS(){if(CS==null){let r;if(typeof window!="undefined")r=window;else if(typeof global!="undefined")r=global;else if(typeof process!="undefined")r=process;else if(typeof self!="undefined")r=self;else throw new Error("Could not find a global object");CS=r}return CS}function t7(){const r=RS();return r._tfGlobals==null&&(r._tfGlobals=new Map),r._tfGlobals}function OS(r,l){const u=t7();if(u.has(r))return u.get(r);{const p=l();return u.set(r,p),u.get(r)}}const Lm="Abs",kC="Acos",FC="Acosh",wc="Add",_C="AddN",WC="ArgMax",$C="ArgMin",UC="Asin",BC="Asinh",MC="Atan",PC="Atanh",zC="Atan2",GC="AvgPool",VC="AvgPoolBackprop",YC="AvgPool3D",HC="AvgPool3DBackprop",Sm="BatchMatMul",Im="BatchToSpaceND",xm="BroadcastTo",Lc="Cast",qC="Ceil",jC="ClipByValue",KC="Complex",Tm="Concat",Am="Conv2D",XC="Conv2DBackpropFilter",vm="Conv2DBackpropInput",JC="Conv3D",ZC="Conv3DBackpropFilterV2",QC="Conv3DBackpropInputV2",Nm="Cos",Cm="Cosh",Rm="Cumsum",e2="CropAndResize",t2="DepthwiseConv2dNative",n2="DepthwiseConv2dNativeBackpropFilter",s2="DepthwiseConv2dNativeBackpropInput",i2="Dilation2D",r2="Dilation2DBackpropInput",o2="Dilation2DBackpropFilter",Om="Div",a2="Elu",c2="EluGrad",l2="Erf",h2="Equal",Em="Exp",u2="Expm1",d2="FFT",p2="Fill",m2="FlipLeftRight",Dm="Floor",km="FloorDiv",f2="FusedBatchNorm",Fm="GatherV2",g2="Greater",_m="GreaterEqual",Wm="Identity",y2="IFFT",b2="Imag",w2="IsFinite",L2="IsInf",S2="IsNan",I2="Less",x2="LessEqual",$m="Log",Um="Log1p",T2="LogicalAnd",A2="LogicalNot",v2="LogSoftmax",N2="LRN",C2="LRNBackprop",Bm="Max",Mm="Maximum",R2="MaxPool",O2="MaxPoolBackprop",E2="MaxPool3D",D2="MaxPool3DBackprop",Pm="Min",zm="Minimum",k2="Mod",Gm="Multiply",Vm="Negate",F2="NotEqual",_2="NonMaxSuppressionV3",W2="NonMaxSuppressionV4",$2="NonMaxSuppressionV5",U2="OnesLike",B2="OneHot",Ym="PadV2",Hm="Pow",M2="Prelu",P2="Range",z2="Real",G2="Reciprocal",qm="Relu",jm="Reshape",Km="ResizeNearestNeighbor",V2="ResizeNearestNeighborGrad",Xm="ResizeBilinear",Y2="ResizeBilinearGrad",H2="Relu6",Jm="Reverse",q2="Round",Zm="Rsqrt",Qm="SelectV2",j2="Selu",ef="Slice",tf="Sin",nf="Sinh",K2="Sign",sf="Sigmoid",X2="Softplus",rf="Sqrt",of="Sum",af="SpaceToBatchND",cf="SplitV",J2="Softmax",lf="SquaredDifference",Z2="Square",hf="Sub",Q2="Tan",eR="Tanh",uf="Tile",df="Transpose",pf="Unpack",mf="UnsortedSegmentSum",ff="ZerosLike",gf="Step",ES="FromPixels",tR="RotateWithOffset";const nR=OS("kernelRegistry",()=>new Map),DS=OS("gradRegistry",()=>new Map);function yf(r,l){const u=n7(r,l);return nR.get(u)}function kS(r){return DS.get(r)}function FS(r){const l=nR.entries(),u=[];for(;;){const{done:p,value:g}=l.next();if(p)break;const[f,I]=g,[S]=f.split("_");S===r&&u.push(I)}return u}function sR(r){const{kernelName:l}=r;DS.has(l)&&(Cs().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${l}'`)),DS.set(l,r)}function n7(r,l){return`${l}_${r}`}function Z(r,l){if(!r)throw new Error(typeof l=="string"?l:l())}function Zt(r,l,u=""){Z(jo(r,l),()=>u+` Shapes ${r} and ${l} must match`)}function Sc(r){Z(r!=null,()=>"The input to the tensor constructor must be a non-null value.")}function Ic(r,l=[],u=!1){if(l==null&&(l=[]),Array.isArray(r)||Rs(r)&&!u)for(let p=0;p=0)u*=r[f];else if(r[f]===-1){if(p!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${p} and dim ${f}`);p=f}else if(r[f]<0)throw Error(`Shapes can not be < 0. Found ${r[f]} at dim ${f}`);if(p===-1){if(l>0&&l!==u)throw Error(`Size(${l}) must match the product of shape ${r}`);return r}if(u===0)throw Error(`Cannot infer the missing size in [${r}] when there are 0 elements`);if(l%u!==0)throw Error(`The implicit shape can't be a fractional number. Got ${l} / ${u}`);const g=r.slice();return g[p]=l/u,g}function ht(r,l){const u=l.length;return r=r==null?l.map((p,g)=>g):[].concat(r),Z(r.every(p=>p>=-u&&p`All values in axis param must be in range [-${u}, ${u}) but got axis ${r}`),Z(r.every(p=>Qt(p)),()=>`All values in axis param must be integers but got axis ${r}`),r.map(p=>p<0?u+p:p)}function rR(r,l){const u=[],p=[],g=l!=null&&Array.isArray(l)&&l.length===0,f=l==null||g?null:ht(l,r).sort();let I=0;for(let S=0;SS)&&r[S]===1&&(u.push(r[S]),p.push(S)),f[I]<=S&&I++}r[S]!==1&&(u.push(r[S]),p.push(S))}return{newShape:u,keptDims:p}}function oR(r,l){let u=null;if(r==null||r==="float32")u=new Float32Array(l);else if(r==="int32")u=new Int32Array(l);else if(r==="bool")u=new Uint8Array(l);else if(r==="string")u=new Array(l);else throw new Error(`Unknown data type ${r}`);return u}function s7(r,l){for(let u=0;ul+=u.length),l}function au(r){return typeof r=="string"||r instanceof String}function i7(r){return typeof r=="boolean"}function r7(r){return typeof r=="number"}function cu(r){return Array.isArray(r)?cu(r[0]):r instanceof Float32Array?"float32":r instanceof Int32Array||r instanceof Uint8Array?"int32":r7(r)?"float32":au(r)?"string":i7(r)?"bool":"float32"}function _S(r){return!!(r&&r.constructor&&r.call&&r.apply)}function lu(r){const l=r.length;if(l<2)return[];const u=new Array(l-1);u[l-2]=r[l-1];for(let p=l-3;p>=0;--p)u[p]=u[p+1]*r[p+1];return u}function bf(r,l){if(l==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(r)&&(r=Ic(r)),Cs().getBool("DEBUG")&&s7(r,l),o7(r,l))return r;if(l==null||l==="float32"||l==="complex64")return new Float32Array(r);if(l==="int32")return new Int32Array(r);if(l==="bool"){const u=new Uint8Array(r.length);for(let p=0;pS*x);for(let S=0;Sp*g);if(u===0)return[];if(u!==l.length)throw new Error(`[${r}] does not match the input size ${l.length}.`);return hR(0,r,l)}function o7(r,l){return r instanceof Float32Array&&l==="float32"||r instanceof Int32Array&&l==="int32"||r instanceof Uint8Array&&l==="bool"}function wf(r,l){const u=Ko(r,l);for(let p=0;p{Z(Number.isInteger(l)&&l>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${r}].`)})}function uR(r,l="utf-8"){return l=l||"utf-8",Cs().platform.encode(r,l)}function US(r,l="utf-8"){return l=l||"utf-8",Cs().platform.decode(r,l)}class dR{constructor(r,l){this.backendTimer=r,this.logger=l,l==null&&(this.logger=new c7)}profileKernel(r,l,u){let p;const g=()=>{p=u()},f=this.backendTimer.time(g);for(let S=0;S{a7(v,x.dtype,r)})}const I={kernelName:r,outputs:p,inputs:l,timeMs:f.then(S=>S.kernelMs),extraInfo:f.then(S=>S.getExtraProfileInfo!=null?S.getExtraProfileInfo():"")};return I}logKernelProfile(r){const{kernelName:l,outputs:u,timeMs:p,inputs:g,extraInfo:f}=r;u.forEach(I=>{Promise.all([I.data(),p,f]).then(S=>{this.logger.logKernelProfile(l,I,S[0],S[1],g,S[2])})})}}function a7(r,l,u){if(l!=="float32")return!1;for(let p=0;p0?ne:""} `}}console.log(`%c${S} %c${I} %c${x}D ${O} %c${v} %c${C} %c${f}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function pR(r,l,u){const p={},g={};for(let x=0;xp[te.id]=!0),G=!0,g[v.id]=!0;break}if(G)break}}const f={};f[u.id]=!0;const I={};for(let x=r.length-1;x>=0;x--){const v=r[x],O=v.inputs;for(let C=0;C=0;g--){const f=l[g],I=[];if(f.outputs.forEach(x=>{const v=r[x.id];v!=null?I.push(v):I.push(null)}),f.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${f.kernelName}.`);const S=f.gradient(I);for(const x in f.inputs){if(!(x in S))throw new Error(`Cannot backprop through input ${x}. Available gradients found: ${Object.keys(S)}.`);const v=u(()=>S[x]());if(v.dtype!=="float32")throw new Error(`Error in gradient for op ${f.kernelName}. The gradient of input ${x} must have 'float32' dtype, but has '${v.dtype}'`);const O=f.inputs[x];if(!jo(v.shape,O.shape))throw new Error(`Error in gradient for op ${f.kernelName}. The gradient of input '${x}' has shape '${v.shape}', which does not match the shape of the input '${O.shape}'`);if(r[O.id]==null)r[O.id]=v;else{const C=r[O.id];r[O.id]=p(C,v),C.dispose()}}}}const fR=20,hu=3,BS=7;function gR(r,l,u,p){const g=lu(l),f=l7(r,l,u,g),I=l.length,S=Sf(r,l,u,g,f),x=["Tensor"];return p&&(x.push(` dtype: ${u}`),x.push(` rank: ${I}`),x.push(` shape: [${l}]`),x.push(" values:")),x.push(S.map(v=>" "+v).join(` -`)),x.join(` -`)}function l7(r,l,u,p){const g=qt(l),f=p[p.length-1],I=new Array(f).fill(0),S=l.length,x=u==="complex64"?du(r):r;if(S>1)for(let v=0;vfR){const oe=hu*I;let ge=Array.from(r.slice(0,oe)),fe=Array.from(r.slice((S-hu)*I,S*I));return u==="complex64"&&(ge=du(ge),fe=du(fe)),["["+ge.map((Ae,Te)=>uu(Ae,g[Te],u)).join(", ")+", ..., "+fe.map((Ae,Te)=>uu(Ae,g[S-hu+Te],u)).join(", ")+"]"]}const te=u==="complex64"?du(r):Array.from(r);return["["+te.map((oe,ge)=>uu(oe,g[ge],u)).join(", ")+"]"]}const v=l.slice(1),O=p.slice(1),C=p[0]*I,U=[];if(S>fR){for(let te=0;te`Length of values '${p}' does not match the size inferred by the shape '${this.size}'.`)}if(l==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=u||oR(l,this.size),this.strides=lu(r)}set(r,...l){l.length===0&&(l=[0]),Z(l.length===this.rank,()=>`The number of provided coordinates (${l.length}) must match the rank (${this.rank})`);const u=this.locToIndex(l);this.values[u]=r}get(...r){r.length===0&&(r=[0]);let l=0;for(const p of r){if(p<0||p>=this.shape[l]){const g=`Requested out of range element at ${r}. Buffer shape=${this.shape}`;throw new Error(g)}l++}let u=r[r.length-1];for(let p=0;pUS(u))}catch(u){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return r}dataSync(){this.throwIfDisposed();const r=Mi().readSync(this.dataId);if(this.dtype==="string")try{return r.map(l=>US(l))}catch(l){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return r}async bytes(){this.throwIfDisposed();const r=await Mi().read(this.dataId);return this.dtype==="string"?r:new Uint8Array(r.buffer)}dispose(){if(this.isDisposed)return;Mi().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(r=!1){return Tc.print(this,r)}clone(){return this.throwIfDisposed(),Tc.clone(this)}toString(r=!1){const l=this.dataSync();return gR(l,this.shape,this.dtype,r)}cast(r){return this.throwIfDisposed(),Tc.cast(this,r)}variable(r=!0,l,u){return this.throwIfDisposed(),Mi().makeVariable(this,r,l,u)}}Object.defineProperty(In,Symbol.hasInstance,{value:r=>!!r&&r.data!=null&&r.dataSync!=null&&r.throwIfDisposed!=null});class If extends In{constructor(r,l,u,p){super(r.shape,r.dtype,r.dataId,p);this.trainable=l,this.name=u}assign(r){if(r.dtype!==this.dtype)throw new Error(`dtype of the new value (${r.dtype}) and previous value (${this.dtype}) must match`);if(!jo(r.shape,this.shape))throw new Error(`shape of the new value (${r.shape}) and previous value (${this.shape}) must match`);Mi().disposeTensor(this),this.dataId=r.dataId,Mi().incRef(this,null)}dispose(){Mi().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty(If,Symbol.hasInstance,{value:r=>r instanceof In&&r.assign!=null&&r.assign instanceof Function});var IR;(function(r){r.R0="R0",r.R1="R1",r.R2="R2",r.R3="R3",r.R4="R4",r.R5="R5",r.R6="R6"})(IR||(IR={}));var MS;(function(r){r.float32="float32",r.int32="int32",r.bool="int32",r.complex64="complex64"})(MS||(MS={}));var PS;(function(r){r.float32="float32",r.int32="int32",r.bool="bool",r.complex64="complex64"})(PS||(PS={}));var zS;(function(r){r.float32="float32",r.int32="float32",r.bool="float32",r.complex64="complex64"})(zS||(zS={}));var GS;(function(r){r.float32="complex64",r.int32="complex64",r.bool="complex64",r.complex64="complex64"})(GS||(GS={}));const u7={float32:zS,int32:MS,bool:PS,complex64:GS};function xR(r,l){if(r==="string"||l==="string"){if(r==="string"&&l==="string")return"string";throw new Error(`Can not upcast ${r} with ${l}`)}return u7[r][l]}function mt(r,l){if(r.dtype===l.dtype)return[r,l];const u=xR(r.dtype,l.dtype);return[r.cast(u),l.cast(u)]}function xf(r){const l=[],u=new Set;return TR(r,l,u),l}function TR(r,l,u){if(r==null)return;if(r instanceof In){l.push(r);return}if(!d7(r))return;const p=r;for(const g in p){const f=p[g];u.has(f)||(u.add(f),TR(f,l,u))}}function d7(r){return Array.isArray(r)||typeof r=="object"}class AR{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(const r in this.registeredVariables)this.registeredVariables[r].dispose()}}class pu{constructor(r){this.ENV=r,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new AR}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;const r=this.getSortedBackends();for(let l=0;l{l.setupFunc!=null&&l.setupFunc(this.backendInstance)})}disposeRegisteredKernels(r){const l=FS(r);l.forEach(u=>{u.disposeFunc!=null&&u.disposeFunc(this.registry[r])})}initializeBackend(r){const l=this.registryFactory[r];if(l==null)throw new Error(`Cannot initialize backend ${r}, no registration found.`);try{const u=l.factory();if(u&&!(u instanceof CC)&&typeof u.then=="function"){const p=++this.pendingBackendInitId,g=u.then(f=>p(pthis.registryFactory[l].priority-this.registryFactory[r].priority)}initializeBackendsAndReturnBest(){const r=this.getSortedBackends();for(let l=0;lthis.startScope(u),()=>this.endScope(p),()=>(p=l(),p instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),p))}scopedRun(r,l,u){r();try{const p=u();return l(),p}catch(p){throw l(),p}}nextTensorId(){return pu.nextTensorId++}nextVariableId(){return pu.nextVariableId++}clone(r){const l=this.makeTensorFromDataId(r.dataId,r.shape,r.dtype),u={x:r},p=f=>({x:()=>{const I="float32",S={x:f},x={dtype:I};return Y.runKernelFunc(v=>v.cast(f,I),S,null,Lc,x)}}),g=[];return this.addTapeNode(this.state.activeScope.name,u,[l],p,g,{}),l}runKernel(r,l,u,p,g){const f=null,I=null;return this.runKernelFunc(f,l,I,r,u,p,g)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(r,l,u){const p=this.backend.numDataIds();let g=0;u.forEach(S=>{g+=S.dtype==="complex64"?3:1});const f=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],I=p-l-g-f;if(I>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${I} data ids) after running '${r}'`)}runKernelFunc(r,l,u,p,g,f,I){let S,x=[];const v=this.isTapeOn();p==null&&(p=this.state.activeScope!=null?this.state.activeScope.name:"");const O=this.state.numBytes,C=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let U;const G=yf(p,this.backendName);let ne;if(G!=null)U=()=>{const oe=this.backend.numDataIds();ne=G.kernelFunc({inputs:l,attrs:g,backend:this.backend});const ge=Array.isArray(ne)?ne:[ne];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(p,oe,ge);const fe=ge.map(({dataId:Ae,shape:Te,dtype:Ve})=>this.makeTensorFromDataId(Ae,Te,Ve));if(v){let Ae=this.getTensorsForGradient(p,l,fe);if(Ae==null){I==null&&(I=[]);const Te=fe.filter((Ve,rt)=>I[rt]);Ae=(f||[]).slice().concat(Te)}x=this.saveTensorsForBackwardMode(Ae)}return fe};else{const oe=ge=>{if(!v)return;x=ge.map(fe=>this.keep(this.clone(fe)))};U=()=>{const ge=this.backend.numDataIds();ne=this.tidy(()=>r(this.backend,oe));const fe=Array.isArray(ne)?ne:[ne];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(p,ge,fe),fe}}let te;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?S=U():(te=this.profiler.profileKernel(p,l,()=>U()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(te),S=te.outputs)}),v&&this.addTapeNode(p,l,S,u,x,g),this.state.profiling&&this.state.activeProfile.kernels.push({name:p,bytesAdded:this.state.numBytes-O,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-C,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(l).map(oe=>l[oe]!=null?l[oe].shape:null),outputShapes:S.map(oe=>oe.shape),kernelTimeMs:te.timeMs,extraInfo:te.extraInfo}),Array.isArray(ne)?S:S[0]}saveTensorsForBackwardMode(r){const l=r.map(u=>this.keep(this.clone(u)));return l}getTensorsForGradient(r,l,u){const p=kS(r);if(p!=null){const g=p.inputsToSave||[],f=p.outputsToSave||[];let I;p.saveAllInputs?(Z(Array.isArray(l),()=>"saveAllInputs is true, expected inputs to be an array."),I=Object.keys(l).map(x=>l[x])):I=g.map(x=>l[x]);const S=u.filter((x,v)=>f[v]);return I.concat(S)}return null}makeTensor(r,l,u,p){if(r==null)throw new Error("Values passed to engine.makeTensor() are null");u=u||"float32",p=p||this.backend;let g=r;u==="string"&&au(r[0])&&(g=r.map(S=>uR(S)));const f=p.write(g,l,u),I=new In(l,u,f,this.nextTensorId());if(this.incRef(I,p),u==="string"){const S=this.state.tensorInfo.get(f),x=lR(g);this.state.numBytes+=x-S.bytes,S.bytes=x}return I}makeTensorFromDataId(r,l,u,p){u=u||"float32";const g=new In(l,u,r,this.nextTensorId());return this.incRef(g,p),g}makeVariable(r,l=!0,u,p){u=u||this.nextVariableId().toString(),p!=null&&p!==r.dtype&&(r=r.cast(p));const g=new If(r,l,u,this.nextTensorId());if(this.state.registeredVariables[g.name]!=null)throw new Error(`Variable with name ${g.name} was already registered`);return this.state.registeredVariables[g.name]=g,this.incRef(g,this.backend),g}incRef(r,l){const u=this.state.tensorInfo.has(r.dataId)?this.state.tensorInfo.get(r.dataId).refCount:0;if(this.state.numTensors++,r.dtype==="string"&&this.state.numStringTensors++,u===0){this.state.numDataBuffers++;let p=0;r.dtype!=="complex64"&&r.dtype!=="string"&&(p=r.size*cR(r.dtype)),this.state.tensorInfo.set(r.dataId,{backend:l||this.backend,dtype:r.dtype,shape:r.shape,bytes:p,refCount:0}),this.state.numBytes+=p}this.state.tensorInfo.get(r.dataId).refCount++,r instanceof If||this.track(r)}disposeTensor(r){if(!this.state.tensorInfo.has(r.dataId))return;this.state.numTensors--,r.dtype==="string"&&this.state.numStringTensors--;const l=this.state.tensorInfo.get(r.dataId),u=l.refCount;u<=1?(r.dtype!=="complex64"&&(this.state.numBytes-=l.bytes),this.state.numDataBuffers--,l.backend.disposeData(r.dataId),this.state.tensorInfo.delete(r.dataId)):this.state.tensorInfo.get(r.dataId).refCount--}disposeVariables(){for(const r in this.state.registeredVariables){const l=this.state.registeredVariables[r];this.disposeVariable(l)}}disposeVariable(r){this.disposeTensor(r),this.state.registeredVariables[r.name]!=null&&delete this.state.registeredVariables[r.name]}memory(){const r=this.backend.memory();return r.numTensors=this.state.numTensors,r.numDataBuffers=this.state.numDataBuffers,r.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(r.unreliable=!0,r.reasons==null&&(r.reasons=[]),r.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),r}async profile(r){this.state.profiling=!0;const l=this.state.numBytes,u=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await r(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(p=>p.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-l,this.state.activeProfile.newTensors=this.state.numTensors-u;for(const p of this.state.activeProfile.kernels)p.kernelTimeMs=await p.kernelTimeMs,p.extraInfo=await p.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(r,l,u,p,g,f){const I={id:this.state.nextTapeNodeId++,kernelName:r,inputs:l,outputs:u,saved:g},S=kS(r);S!=null&&(p=S.gradFunc),p!=null&&(I.gradient=x=>(x=x.map((v,O)=>{if(v==null){const C=u[O],U=Ko(C.size,C.dtype);return this.makeTensor(U,C.shape,C.dtype)}return v}),p(x.length>1?x:x[0],g,f))),this.state.activeTape.push(I)}keep(r){return r.kept=!0,r}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(r){const l={track:[],name:"unnamed scope",id:this.state.nextScopeId++};r&&(l.name=r),this.state.scopeStack.push(l),this.state.activeScope=l}endScope(r){const l=xf(r),u=new Set(l.map(g=>g.id));for(let g=0;g{!g.kept&&g.scopeId===p.id&&this.track(g)})}gradients(r,l,u,p=!1){if(Z(l.length>0,()=>"gradients() received an empty list of xs."),u!=null&&u.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${u.dtype}'`);const g=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",r));Z(g instanceof In,()=>"The result y returned by f() must be a tensor.");const f=pR(this.state.activeTape,l,g);if(!p&&f.length===0&&l.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{const I={};I[g.id]=u==null?p7(g.shape):u,mR(I,f,x=>this.tidy(x),m7);const S=l.map(x=>I[x.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(x=>{for(const v of x.saved)v.dispose()}),this.state.activeTape=null),{value:g,grads:S}})}customGrad(r){return Z(_S(r),()=>"The f passed in customGrad(f) must be a function."),(...l)=>{Z(l.every(g=>g instanceof In),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let u;const p={};return l.forEach((g,f)=>{p[f]=g}),this.runKernelFunc((g,f)=>(u=r(...l,f),Z(u.value instanceof In,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),Z(_S(u.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),u.value),p,(g,f)=>{const I=u.gradFunc(g,f),S=Array.isArray(I)?I:[I];Z(S.length===l.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),Z(S.every(v=>v instanceof In),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");const x={};return S.forEach((v,O)=>{x[O]=()=>v}),x})}}readSync(r){const l=this.state.tensorInfo.get(r);return l.backend.readSync(r)}read(r){const l=this.state.tensorInfo.get(r);return l.backend.read(r)}async time(r){const l=$S(),u=await this.backend.time(r);return u.wallMs=$S()-l,u}track(r){return this.state.activeScope!=null&&(r.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(r)),r}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new AR;for(const r in this.registry)this.disposeRegisteredKernels(r),this.registry[r].dispose(),delete this.registry[r];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}pu.nextTensorId=0;pu.nextVariableId=0;function p7(r){const l=wf(qt(r),"float32");return Y.makeTensor(l,r,"float32")}function VS(){const r=RS();if(r._tfengine==null){const l=new OC(r);r._tfengine=new pu(l)}return DC(r._tfengine.ENV),wR(()=>r._tfengine),r._tfengine}const Y=VS();function m7(r,l){const u={a:r,b:l};return Y.runKernelFunc((p,g)=>{const f=p.add(r,l);return g([r,l]),f},u,null,wc)}function vR(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}const hr=Cs();hr.registerFlag("DEBUG",()=>!1,r=>{r&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")});hr.registerFlag("IS_BROWSER",()=>vR());hr.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined");hr.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor));hr.registerFlag("PROD",()=>!1);hr.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>hr.getBool("DEBUG"));hr.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0);hr.registerFlag("IS_TEST",()=>!1);function ur(r,l){let u=r;if(Rs(r))return l==="string"?[]:[r.length];if(!Array.isArray(r))return[];const p=[];for(;Array.isArray(u)||Rs(u)&&l!=="string";)p.push(u.length),u=u[0];return Array.isArray(r)&&Cs().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&&NR(r,p,[]),p}function NR(r,l,u){if(u=u||[],!Array.isArray(r)&&!Rs(r)){Z(l.length===0,()=>`Element arr[${u.join("][")}] is a primitive, but should be an array/TypedArray of ${l[0]} elements`);return}Z(l.length>0,()=>`Element arr[${u.join("][")}] should be a primitive, but is an array of ${r.length} elements`),Z(r.length===l[0],()=>`Element arr[${u.join("][")}] should have ${l[0]} elements, but has ${r.length} elements`);const p=l.slice(1);for(let g=0;g=0&&(g=p),CR(p,g,l,u),r==null||!Rs(r)&&!Array.isArray(r)&&typeof r!="number"&&typeof r!="boolean"&&typeof r!="string"){const x=r==null?"null":r.constructor.name;throw new Error(`Argument '${l}' passed to '${u}' must be a Tensor or TensorLike, but got '${x}'`)}const f=ur(r,g);!Rs(r)&&!Array.isArray(r)&&(r=[r]);const I=!0,S=g!=="string"?bf(r,g):Ic(r,[],I);return Y.makeTensor(S,f,g)}function Tf(r,l,u,p="numeric"){if(!Array.isArray(r))throw new Error(`Argument ${l} passed to ${u} must be a \`Tensor[]\` or \`TensorLike[]\``);const g=r;return g.map((f,I)=>M(f,`${l}[${I}]`,u),p)}const RR="__op";function z(r){const l=Object.keys(r);if(l.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${l.length} keys.`);let u=l[0];const p=r[u];u.endsWith("_")&&(u=u.substring(0,u.length-1)),u=u+RR;const g=(...f)=>{Y.startScope(u);try{const I=p(...f);return I instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),Y.endScope(I),I}catch(I){throw Y.endScope(null),I}};return Object.defineProperty(g,"name",{value:u,configurable:!0}),g}function f7(r,l){const u=M(r,"real","complex"),p=M(l,"imag","complex");Zt(u.shape,p.shape,`real and imag shapes, ${u.shape} and ${p.shape}, must match in call to tf.complex().`);const g=I=>I.complex(u,p),f={real:u,imag:p};return Y.runKernelFunc(g,f,null,KC)}const Pi=z({complex_:f7});function zi(r,l,u,p){if(p==null&&(p=cu(r)),p==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!Rs(r)&&!Array.isArray(r)&&typeof r!="number"&&typeof r!="boolean"&&typeof r!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(l!=null){Lf(l);const g=qt(l),f=qt(u);Z(g===f,()=>`Based on the provided shape, [${l}], the tensor should have ${g} values but has ${f}`);for(let I=0;I`Error creating a new Tensor. Inferred shape (${u}) does not match the provided shape (${l}). `)}}return!Rs(r)&&!Array.isArray(r)&&(r=[r]),l=l||u,r=p!=="string"?bf(r,p):Ic(r,[],!0),Y.makeTensor(r,l,p)}function YS(r,l,u){const p=ur(r,u);return zi(r,l,p,u)}function mu(r,l="float32",u){return l=l||"float32",Lf(r),new bR(r,l,u)}function g7(r,l){const u=M(r,"x","cast");if(!aR(l))throw new Error(`Failed to cast to unknown dtype ${l}`);if(l==="string"&&u.dtype!=="string"||l!=="string"&&u.dtype==="string")throw new Error("Only strings can be casted to strings");const p={x:u},g={dtype:l};return Y.runKernelFunc(f=>f.cast(u,l),p,null,Lc,g)}const Le=z({cast_:g7});function y7(r){const l=M(r,"x","clone",null),u=()=>Y.makeTensorFromDataId(l.dataId,l.shape,l.dtype),p={x:l};return Y.runKernelFunc(u,p,null,Wm)}const pi=z({clone_:y7});function HS(r,l=!1){console.log(r.toString(l))}VS();const b7={buffer:mu,cast:Le,clone:pi,print:HS};LR(b7);function w7(r,l){const u=M(r,"x","reshape",null),p={x:u},g={shape:l},f=(I,S)=>(l=iR(l,u.size),Z(u.size===qt(l),()=>"new shape and old shape must have the same number of elements."),S([u]),I.reshape(u,l));return Y.runKernelFunc(f,p,null,jm,g)}const ie=z({reshape_:w7});function L7(r,l,u=!1,p=!1){let g=M(r,"a","matMul"),f=M(l,"b","matMul");[g,f]=mt(g,f),Z(g.rank>=2&&f.rank>=2&&g.rank===f.rank,()=>`Error in matMul: inputs must have the same rank of at least 2, got ranks ${g.rank} and ${f.rank}.`);const I=u?g.shape[g.rank-2]:g.shape[g.rank-1],S=p?f.shape[f.rank-1]:f.shape[f.rank-2],x=u?g.shape[g.rank-1]:g.shape[g.rank-2],v=p?f.shape[f.rank-2]:f.shape[f.rank-1],O=g.shape.slice(0,-2),C=f.shape.slice(0,-2),U=qt(O),G=qt(C);Z(jo(O,C),()=>`Error in matMul: outer dimensions (${O}) and (${C}) of Tensors with shapes ${g.shape} and ${f.shape} must match.`),Z(I===S,()=>`Error in matMul: inner shapes (${I}) and (${S}) of Tensors with shapes ${g.shape} and ${f.shape} and transposeA=${u} and transposeB=${p} must match.`);const ne=g.shape.slice(0,-2).concat([x,v]),te=u?ie(g,[U,I,x]):ie(g,[U,x,I]),oe=p?ie(f,[G,v,S]):ie(f,[G,S,v]),ge=(Ve,rt)=>(rt([te,oe]),Ve.batchMatMul(te,oe,u,p)),fe={a:te,b:oe},Ae={transposeA:u,transposeB:p},Te=Y.runKernelFunc(ge,fe,null,Sm,Ae);return ie(Te,ne)}const dn=z({matMul_:L7});function S7(r,l){const u=M(r,"x","transpose");if(l==null&&(l=u.shape.map((f,I)=>I).reverse()),Z(u.rank===l.length,()=>`Error in transpose: rank of input ${u.rank} must match length of perm ${l}.`),l.forEach(f=>{Z(f>=0&&f`All entries in 'perm' must be between 0 and ${u.rank-1} but got ${l}`)}),u.rank<=1)return u.clone();const p={x:u},g={perm:l};return Y.runKernelFunc(f=>f.transpose(u,l),p,null,df,g)}const Wt=z({transpose_:S7});function qS(r,l,u){if(Sc(r),l!=null&&l.length!==3)throw new Error("tensor3d() requires shape to have three numbers");const p=ur(r,u);if(p.length!==3&&p.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(p.length===1&&l==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return zi(r,l,p,u)}const jS={};hm(jS,{fromPixels:()=>T7,toPixels:()=>x7});let Ac;function I7(r,l=3){if(l>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(r==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let u=!1,p=!1,g=!1,f=!1,I=!1;if(r.data instanceof Uint8Array)u=!0;else if(typeof ImageData!="undefined"&&r instanceof ImageData)p=!0;else if(typeof HTMLVideoElement!="undefined"&&r instanceof HTMLVideoElement)g=!0;else if(typeof HTMLImageElement!="undefined"&&r instanceof HTMLImageElement)f=!0;else if(r.getContext!=null)I=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${r.constructor.name}`);if(g){const G=2;if(g&&r.readyState element.")}const S=yf(ES,Y.backendName);if(S!=null){const G={pixels:r},ne={numChannels:l};return Y.runKernel(ES,G,ne)}const[x,v]=g?[r.videoWidth,r.videoHeight]:[r.width,r.height];let O;I?O=r.getContext("2d").getImageData(0,0,x,v).data:p||u?O=r.data:(f||g)&&(Ac==null&&(Ac=document.createElement("canvas").getContext("2d")),Ac.canvas.width=x,Ac.canvas.height=v,Ac.drawImage(r,0,0,x,v),O=Ac.getImageData(0,0,x,v).data);let C;if(l===4)C=new Int32Array(O);else{const G=x*v;C=new Int32Array(G*l);for(let ne=0;ne4||f===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${f}`);if(u.dtype!=="float32"&&u.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${u.dtype}. Please use float32 or int32 tensors.`);const I=await u.data(),S=u.dtype==="float32"?255:1,x=new Uint8ClampedArray(g*p*4);for(let v=0;v1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${G}.`)}else if(u.dtype==="int32"&&(G<0||G>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${G}.`);f===1?(O[0]=G*S,O[1]=G*S,O[2]=G*S):O[U]=G*S}const C=v*4;x[C+0]=Math.round(O[0]),x[C+1]=Math.round(O[1]),x[C+2]=Math.round(O[2]),x[C+3]=Math.round(O[3])}if(l!=null){l.width=g,l.height=p;const v=l.getContext("2d"),O=new ImageData(x,g,p);v.putImageData(O,0,0)}return u!==r&&u.dispose(),x}const T7=z({fromPixels_:I7});function OR(r,l,u){const p=r.shape.length;Z(p===l.length,()=>`Error in slice${p}D: Length of begin ${l} must match the rank of the array (${p}).`),Z(p===u.length,()=>`Error in slice${p}D: Length of size ${u} must match the rank of the array (${p}).`);for(let g=0;g`Error in slice${p}D: begin[${g}] + size[${g}] (${l[g]+u[g]}) would overflow input.shape[${g}] (${r.shape[g]})`)}function Af(r,l,u){let p;const g=r.shape.length;typeof l=="number"?p=[l,...new Array(g-1).fill(0)]:l.length{Z(I!==-1,()=>"slice() does not support negative begin indexing.")});let f;return u==null?f=new Array(g).fill(-1):typeof u=="number"?f=[u,...new Array(g-1).fill(-1)]:u.lengthI>=0?I:(Z(I===-1,()=>`Negative size values should be exactly -1 but got ${I} for the slice() size at index ${S}.`),r.shape[S]-p[S])),[p,f]}function A7(r){Cs().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(r+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}SR(A7);function ER(r,l){return Y.tidy(r,l)}function DR(r){const l=xf(r);l.forEach(u=>u.dispose())}function v7(r,l){let u=M(r,"a","add"),p=M(l,"b","add");[u,p]=mt(u,p);const g=(I,S)=>{const x=I.add(u,p);return S([u,p]),x},f={a:u,b:p};return Y.runKernelFunc(g,f,null,wc)}const Lt=z({add_:v7});function N7(r,l){let u=M(r,"a","floorDiv"),p=M(l,"b","floorDiv");[u,p]=mt(u,p);const g=(I,S)=>{const x=I.floorDiv(u,p);return S([u,p]),x},f={a:u,b:p};return Y.runKernelFunc(g,f,null,km)}const KS=z({floorDiv_:N7});function C7(r,l){let u=M(r,"a","div"),p=M(l,"b","div");if([u,p]=mt(u,p),u.dtype==="int32"&&p.dtype==="int32")return KS(u,p);const g=(S,x)=>{const v=S.realDivide(u,p);return x([u,p]),v},f={a:u,b:p},I={};return Y.runKernelFunc(g,f,null,Om,I)}const Pe=z({div_:C7});function R7(r,l){let u=M(r,"a","mul"),p=M(l,"b","mul");[u,p]=mt(u,p);const g=(I,S)=>{const x=I.multiply(u,p);return S([u,p]),x},f={a:u,b:p};return Y.runKernelFunc(g,f,null,Gm)}const ae=z({mul_:R7});function O7(r){const l=M(r,"x","abs"),u={x:l};return Y.runKernelFunc((p,g)=>(g([l]),l.dtype==="complex64"?p.complexAbs(l):p.abs(l)),u,null,Lm)}const Un=z({abs_:O7});function E7(r,l){for(let u=0;ur[f]);return[u,g]}function Zn(r,l){const u=l.map(p=>1);return D7(r,u,l)}function ds(r,l){if(E7(r,l))return null;const u=[];for(let p=0;pu.push(p)),u}function vc(r){return r.map((l,u)=>[u,l]).sort((l,u)=>l[1]-u[1]).map(l=>l[0])}function Kr(r,l){const u=[];for(let p=l-r;p`The output # of rows (${S}) must be an integer. Change the stride and/or zero pad parameters`);const x=Xo((I-l+2*p)/u+1,g);return Z(Qt(x),()=>`The output # of columns (${x}) must be an integer. Change the stride and/or zero pad parameters`),[S,x]}function W7(r,l,u,p,g,f){g==null&&(g=FR(r,l,p));const I=r[0],S=r[1],x=r[2],v=Xo((I-l+2*g)/p+1,f);Z(Qt(v),()=>`The output # of depths (${v}) must be an integer. Change the stride and/or zero pad parameters`);const O=Xo((S-l+2*g)/p+1,f);Z(Qt(O),()=>`The output # of rows (${O}) must be an integer. Change the stride and/or zero pad parameters`);const C=Xo((x-l+2*g)/p+1,f);return Z(Qt(C),()=>`The output # of columns (${C}) must be an integer. Change the stride and/or zero pad parameters`),[v,O,C,u]}function FR(r,l,u,p=1){const g=Nc(l,p);return Math.floor((r[0]*(u-1)-u+g)/2)}function vf(r){return typeof r=="number"?[r,r,r]:r.length===2?[r[0],r[1],1]:r}function XS(r){return typeof r=="number"?[r,r,r]:r}function Nc(r,l){return l<=1?r:r+(r-1)*(l-1)}function k7(r,l,u,p,g,f,I,S,x){let v,O,C;if(typeof r=="number"){const U=r===0?"VALID":"NUMBER";v={top:r,bottom:r,left:r,right:r,type:U};const G=_7([l,u],f,p,r,S);O=G[0],C=G[1]}else if(r==="same"){O=Math.ceil(l/p),C=Math.ceil(u/g);const U=Math.max(0,(O-1)*p+f-l),G=Math.max(0,(C-1)*g+I-u),ne=Math.floor(U/2),te=U-ne,oe=Math.floor(G/2),ge=G-oe;v={top:ne,bottom:te,left:oe,right:ge,type:"SAME"}}else if(r==="valid")v={top:0,bottom:0,left:0,right:0,type:"VALID"},O=Math.ceil((l-f+1)/p),C=Math.ceil((u-I+1)/g);else if(typeof r=="object"){const U=x==="channelsLast"?r[1][0]:r[2][0],G=x==="channelsLast"?r[1][1]:r[2][1],ne=x==="channelsLast"?r[2][0]:r[3][0],te=x==="channelsLast"?r[2][1]:r[3][1],oe=U===0&&G===0&&ne===0&&te===0?"VALID":"EXPLICIT";v={top:U,bottom:G,left:ne,right:te,type:oe},O=Xo((l-f+U+G)/p+1,S),C=Xo((u-I+ne+te)/g+1,S)}else throw Error(`Unknown padding parameter: ${r}`);return{padInfo:v,outHeight:O,outWidth:C}}function F7(r,l,u,p,g,f,I,S,x,v,O){let C,U,G,ne;if(typeof r=="number"){const te=r===0?"VALID":"NUMBER";C={top:r,bottom:r,left:r,right:r,front:r,back:r,type:te};const oe=W7([l,u,p,1],S,1,g,r,O);U=oe[0],G=oe[1],ne=oe[2]}else if(r==="same"){U=Math.ceil(l/g),G=Math.ceil(u/f),ne=Math.ceil(p/I);const te=(U-1)*g+S-l,oe=(G-1)*f+x-u,ge=(ne-1)*I+v-p,fe=Math.floor(te/2),Ae=te-fe,Te=Math.floor(oe/2),Ve=oe-Te,rt=Math.floor(ge/2),vt=ge-rt;C={top:Te,bottom:Ve,left:rt,right:vt,front:fe,back:Ae,type:"SAME"}}else if(r==="valid")C={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},U=Math.ceil((l-S+1)/g),G=Math.ceil((u-x+1)/f),ne=Math.ceil((p-v+1)/I);else throw Error(`Unknown padding parameter: ${r}`);return{padInfo:C,outDepth:U,outHeight:G,outWidth:ne}}function Xo(r,l){if(!l)return r;switch(l){case"round":return Math.round(r);case"ceil":return Math.ceil(r);case"floor":return Math.floor(r);default:throw new Error(`Unknown roundingMode ${l}`)}}function Xr(r){const[l,u,p]=vf(r);return l===1&&u===1&&p===1}function Jr(r,l){return Xr(r)||Xr(l)}function Cc(r){if(r==="NHWC")return"channelsLast";if(r==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${r}`)}function _R(r,l){const u=r[0].length;r.forEach((g,f)=>{Z(g.length===u,()=>`Error in concat${u}D: rank of tensors[${f}] must be the same as the rank of the rest (${u})`)}),Z(l>=0&&l`Error in concat${u}D: axis must be between 0 and ${u-1}.`);const p=r[0];r.forEach((g,f)=>{for(let I=0;I`Error in concat${u}D: Shape of tensors[${f}] (${g}) does not match the shape of the rest (${p}) along the non-concatenated axis ${f}.`)})}function WR(r,l){const u=r[0].slice();for(let p=1;p=1,()=>"Pass at least one tensor to concat");let u=Tf(r,"tensors","concat");u[0].dtype==="complex64"&&u.forEach(I=>{if(I.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor - with dtype ${I.dtype}. `)});const p=(I,S)=>{const x=ht(l,u[0].shape)[0],v=WR(u.map(U=>U.shape),x);if(qt(v)===0)return YS([],v);if(u=u.filter(U=>U.size>0),u.length===1)return u[0];const O=u.map(U=>U.shape);_R(O,x);const C=I.concat(u,x);return S(u),C},g=u,f={axis:l};return Y.runKernelFunc(p,g,null,Tm,f)}const yn=z({concat_:$7});function U7(r){const l=M(r,"x","sigmoid"),u={x:l};return Y.runKernelFunc((p,g)=>{const f=p.sigmoid(l);return g([f]),f},u,null,sf)}const JS=z({sigmoid_:U7});function B7(r,l,u){const p=M(r,"x","slice");if(p.rank===0)throw new Error("Slicing scalar is not possible");const g=(S,x)=>{const[v,O]=Af(p,l,u);return OR(p,v,O),x([p]),S.slice(p,v,O)},f={x:p},I={begin:l,size:u};return Y.runKernelFunc(g,f,null,ef,I)}const Tt=z({slice_:B7});function M7(r,l,u){const p=M(r,"x","batchToSpaceND"),g=l.reduce((x,v)=>x*v);Z(p.rank>=1+l.length,()=>`input rank is ${p.rank} but should be > than blockShape.length ${l.length}`),Z(u.length===l.length,()=>`crops.length is ${u.length} but should be equal to blockShape.length ${l.length}`),Z(p.shape[0]%g===0,()=>`input tensor batch is ${p.shape[0]} but is not divisible by the product of the elements of blockShape ${l.join(" * ")} === ${g}`);const f=x=>x.batchToSpaceND(p,l,u),I={x:p},S={blockShape:l,crops:u};return Y.runKernelFunc(f,I,null,Im,S)}const ZS=z({batchToSpaceND_:M7});function P7(r,l){let u=M(r,"broadcastTo","x");const p=u.shape;if(l.some(O=>!(O>0)||O%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${l}].`);if(l.lengthu.rank){const O=u.shape.slice();for(;O.length=0;O--)if(g[O]===l[O])f[O]=1;else if(u.shape[O]!==1)throw new Error(`broadcastTo(): [${p}] cannot be broadcast to [${l}].`);const I=f.map((O,C)=>O>1?C:-1).filter(O=>O>=0);if(I.length===0)return pi(u);const S=O=>O.tile(u,f),x={x:u},v={shape:l,inputShape:g};return Y.runKernelFunc(S,x,null,xm,v)}const Rf=z({broadcastTo_:P7});function z7(r,l,u,p,g="NHWC",f=[1,1],I){const S=M(r,"x","conv2d"),x=M(l,"filter","conv2d");let v=S,O=!1;S.rank===3&&(O=!0,v=ie(S,[1,S.shape[0],S.shape[1],S.shape[2]])),Z(v.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${v.rank}.`),Z(x.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${x.rank}.`),I!=null&&Z(Qt(p),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${I} but got pad ${p}.`);const C=g==="NHWC"?v.shape[3]:v.shape[1];Z(C===x.shape[2],()=>`Error in conv2d: depth of input (${C}) must match input depth for filter ${x.shape[2]}.`),Z(Jr(u,f),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${u} and dilations '${f}'`);const U=(oe,ge)=>{const fe=Cc(g),Ae=dr(v.shape,x.shape,u,f,p,I,!1,fe),Te=oe.conv2d(v,x,Ae);return ge([v,x]),Te},G={x:v,filter:x},ne={strides:u,pad:p,dataFormat:g,dilations:f,dimRoundingMode:I},te=Y.runKernelFunc(U,G,null,Am,ne);return O?ie(te,[te.shape[1],te.shape[2],te.shape[3]]):te}const QS=z({conv2d_:z7});function G7(r,l,u,p,g,f="NHWC",I){Z(r.length===l.rank,()=>`Length of inShape (${r.length}) and rank of dy (${l.rank}) must match`);let S=r,x=l,v=!1;l.rank===3&&(v=!0,x=ie(l,[1,l.shape[0],l.shape[1],l.shape[2]]),S=[1,r[0],r[1],r[2]]),Z(S.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${S.length}.`),Z(x.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${x.rank}`),Z(u.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${u.rank}`);const O=f==="NHWC"?S[3]:S[1],C=f==="NHWC"?x.shape[3]:x.shape[1];Z(O===u.shape[2],()=>`Error in conv2dDerInput: depth of input (${O}) must match input depth for filter ${u.shape[2]}.`),Z(C===u.shape[3],()=>`Error in conv2dDerInput: depth of output (${C}) must match output depth for filter ${u.shape[3]}.`),I!=null&&Z(Qt(g),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${I} but got pad ${g}.`);const U=(oe,ge)=>{const fe=1,Ae=Cc(f),Te=dr(S,u.shape,p,fe,g,I,!1,Ae),Ve=oe.conv2dDerInput(x,u,Te);return ge([x,u]),Ve},G={dy:x,filter:u},ne={strides:p,pad:g,dataFormat:f,dimRoundingMode:I,inputShape:S},te=Y.runKernelFunc(U,G,null,vm,ne);return v?ie(te,[te.shape[1],te.shape[2],te.shape[3]]):te}const $R=z({conv2DBackpropInput_:G7});function V7(r,l,u,p,g){Z(r.length===l.rank,()=>`Length of inShape (${r.length}) and rank of dy (${l.rank}) must match`);let f=r,I=l,S=!1;l.rank===4&&(S=!0,I=ie(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]]),f=[1,r[0],r[1],r[2],r[3]]);const x=f[4],v=I.shape[4];Z(f.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${f.length}.`),Z(I.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${I.rank}`),Z(u.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${u.rank}`),Z(x===u.shape[3],()=>`Error in conv3dDerInput: depth of input (${x}) must match input depth for filter ${u.shape[3]}.`),Z(v===u.shape[4],()=>`Error in conv3dDerInput: depth of output (${v}) must match output depth for filter ${u.shape[4]}.`);const O=ne=>{const te=1,oe=fu(f,u.shape,p,te,g);return ne.conv3dDerInput(I,u,oe)},C={dy:I},U={pad:g},G=Y.runKernelFunc(O,C,null,QC,U);return S?ie(G,[G.shape[1],G.shape[2],G.shape[3],G.shape[4]]):G}const UR=z({conv3DBackpropInput_:V7});function Y7(r){const l=M(r,"x","cos"),u={x:l};return Y.runKernelFunc((p,g)=>{const f=p.cos(l);return g([l]),f},u,null,Nm)}const gu=z({cos_:Y7});function H7(r){const l=M(r,"x","cosh"),u={x:l};return Y.runKernelFunc((p,g)=>{const f=p.cosh(l);return g([l]),f},u,null,Cm)}const eI=z({cosh_:H7});function q7(r,l=0,u=!1,p=!1){const g=M(r,"x","cumsum"),f=(x,v)=>{const O=ds([l],g.rank);let C=g;O!=null&&(C=Wt(g,O));const U=Kr(1,g.rank)[0];let G=x.cumsum(C,U,u,p);if(v([g]),O!=null){const ne=vc(O);G=Wt(G,ne)}return G},I={x:g},S={axis:l,exclusive:u,reverse:p};return Y.runKernelFunc(f,I,null,Rm,S)}const tI=z({cumsum_:q7});function At(r,l){const u=[];for(let p=0;p1)&&u.unshift(f)}return u}function it(r,l){const u=[],p=Math.max(r.length,l.length);for(let g=0;gI.equal(u,p),f={a:u,b:p};return Y.runKernelFunc(g,f,null,h2)}const nI=z({equal_:j7});function K7(r,l,u){const p=M(l,"a","where"),g=M(u,"b","where"),f=M(r,"condition","where","bool"),I=it(p.shape,g.shape),S=Rf(p,I),x=Rf(g,I);f.rank===1&&Z(f.shape[0]===p.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),f.rank!==1&&Zt(f.shape,x.shape,"Error in where: ");const v=(C,U)=>{const G=C.select(f,S,x);return U([f]),G},O={condition:f,t:S,e:x};return Y.runKernelFunc(v,O,null,Qm)}const Bn=z({where_:K7});function X7(r){const l=M(r,"x","zerosLike"),u={x:l};return Y.runKernelFunc(p=>p.zerosLike(l),u,null,ff)}const qe=z({zerosLike_:X7});function J7(r){const l=M(r,"x","exp"),u={x:l};return Y.runKernelFunc((p,g)=>{const f=p.exp(l);return g([f]),f},u,null,Em)}const Mn=z({exp_:J7});function Z7(r,l=0){const u=null,p=M(r,"x","expandDims",u);Z(l<=p.rank,()=>"Axis must be <= rank of the tensor");const g=p.shape.slice();return l<0&&(Z(-(p.rank+1)<=l,()=>`Axis must be in the interval [${-(p.rank+1)}, ${p.rank}]`),l=p.rank+l+1),g.splice(l,0,1),ie(p,g)}const Os=z({expandDims_:Z7});function Q7(r,l){const u=null,p=M(r,"x","tile",u);Z(p.rank===l.length,()=>`Error in transpose: rank of input ${p.rank} must match length of reps ${l}.`);const g=(x,v)=>{const O=x.tile(p,l);return v([p]),O},f=[p],I={x:p},S={reps:l};return Y.runKernelFunc(g,I,null,uf,S,f)}const Jo=z({tile_:Q7});function eJ(r,l,u,p="float32"){l==null&&(l=r);const g=mu([r,l],p),f=r<=l?r:l;for(let S=0;Sg.fill(r,l,u),{},null,p2,p)}function tJ(r){const l=M(r,"x","floor"),u={x:l};return Y.runKernelFunc(p=>p.floor(l),u,null,Dm)}const rI=z({floor_:tJ});function BR(r,l,u){const p=r.shape[u],g=[];let f=1,I=1;for(let S=0;S{const O=ht(u,p.shape)[0],C=BR(p,g,O),U=x.gather(p,ie(g,[g.size]),O);return v([p,g]),ie(U,C.outputShape)};return Y.runKernelFunc(S,f,null,Fm,I)}const oI=z({gather_:nJ});function sJ(r,l){let u=M(r,"a","greater"),p=M(l,"b","greater");[u,p]=mt(u,p),it(u.shape,p.shape);const g=I=>I.greater(u,p),f={a:u,b:p};return Y.runKernelFunc(g,f,null,g2)}const mi=z({greater_:sJ});function iJ(r,l){let u=M(r,"a","greaterEqual"),p=M(l,"b","greaterEqual");[u,p]=mt(u,p),it(u.shape,p.shape);const g=(I,S)=>{const x=I.greaterEqual(u,p);return S([u,p]),x},f={a:u,b:p};return Y.runKernelFunc(g,f,null,_m)}const pr=z({greaterEqual_:iJ});function rJ(r){const l=M(r,"input","imag"),u=g=>g.imag(l),p={input:l};return Y.runKernelFunc(u,p,null,b2)}const yu=z({imag_:rJ});function oJ(r,l){let u=M(r,"a","maximum"),p=M(l,"b","maximum");[u,p]=mt(u,p),u.dtype==="bool"&&(u=Le(u,"int32"),p=Le(p,"int32")),it(u.shape,p.shape);const g=(I,S)=>{const x=I.maximum(u,p);return S([u,p]),x},f={a:u,b:p};return Y.runKernelFunc(g,f,null,Mm)}const aI=z({maximum_:oJ});function ke(r,l){if((Rs(r)&&l!=="string"||Array.isArray(r))&&l!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(l==="string"&&Rs(r)&&!(r instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");const u=[],p=[];return zi(r,u,p,l)}function aJ(r,l){let u=M(r,"a","less"),p=M(l,"b","less");[u,p]=mt(u,p),it(u.shape,p.shape);const g=I=>I.less(u,p),f={a:u,b:p};return Y.runKernelFunc(g,f,null,I2)}const cI=z({less_:aJ});function cJ(r,l){let u=M(r,"a","lessEqual"),p=M(l,"b","lessEqual");[u,p]=mt(u,p),it(u.shape,p.shape);const g=(I,S)=>{const x=I.lessEqual(u,p);return S([u,p]),x},f={a:u,b:p};return Y.runKernelFunc(g,f,null,x2)}const mr=z({lessEqual_:cJ});function lJ(r){const l=M(r,"x","log"),u={x:l};return Y.runKernelFunc((p,g)=>{const f=p.log(l);return g([l]),f},u,null,$m)}const Zr=z({log_:lJ});function hJ(r){const l=M(r,"x","log1p"),u={x:l};return Y.runKernelFunc((p,g)=>{const f=p.log1p(l);return g([l]),f},u,null,Um)}const lI=z({log1p_:hJ});function Of(r){return Y.customGrad(r)}function uJ(r){const l=M(r,"x","neg"),u={x:l};return Y.runKernelFunc(p=>p.neg(l),u,null,Vm)}const yt=z({neg_:uJ});function dJ(r,l=null,u=!1){const p=M(r,"x","max"),g=(S,x)=>{const v=ht(l,p.shape);let O=v;const C=ds(O,p.rank);let U=p;C!=null&&(U=Wt(p,C),O=Kr(O.length,U.rank));const G=S.max(U,O);C!=null&&U.dispose();let ne=G;if(u){const te=Zn(ne.shape,ht(l,p.shape));ne=ie(ne,te),G.dispose()}return x([p,ne]),ne},f={x:p},I={reductionIndices:l,keepDims:u};return Y.runKernelFunc(g,f,null,Bm,I)}const Zo=z({max_:dJ});function pJ(r,l){let u=M(r,"a","sub"),p=M(l,"b","sub");[u,p]=mt(u,p);const g=(I,S)=>{const x=I.subtract(u,p);return S([u,p]),x},f={a:u,b:p};return Y.runKernelFunc(g,f,null,hf)}const Be=z({sub_:pJ});function mJ(r,l=null,u=!1){let p=M(r,"x","sum");p.dtype==="bool"&&(p=Le(p,"int32"));const g=(S,x)=>{x([p]);const v=ht(l,p.shape),O=ds(v,p.rank);let C=v,U=p;O!=null&&(U=Wt(p,O),C=Kr(C.length,p.rank));let G=S.sum(U,C);if(u){const ne=Zn(G.shape,v);G=ie(G,ne)}return G},f={x:p},I={axis:l,keepDims:u};return Y.runKernelFunc(g,f,null,of,I)}const Fe=z({sum_:mJ});function fJ(r,l=null,u=!1){const p=M(r,"x","logSumExp"),g=ht(l,p.shape),f=Zo(p,g,!0),I=Be(p,f),S=Mn(I),x=Fe(S,g),v=Zr(x),O=Lt(ie(f,v.shape),v);if(u){const C=Zn(O.shape,g);return ie(O,C)}return O}const hI=z({logSumExp_:fJ});function gJ(r,l){const u=M(r,"a","logicalAnd","bool"),p=M(l,"b","logicalAnd","bool");it(u.shape,p.shape);const g={a:u,b:p};return Y.runKernelFunc(f=>f.logicalAnd(u,p),g,null,T2)}const Qo=z({logicalAnd_:gJ});function yJ(r){const l=M(r,"x","logicalNot","bool"),u={x:l};return Y.runKernelFunc(p=>p.logicalNot(l),u,null,A2)}const uI=z({logicalNot_:yJ});function Es(r,l="float32"){if(l==="complex64"){const p=Es(r,"float32"),g=Es(r,"float32");return Pi(p,g)}const u=Ko(qt(r),l);return Y.makeTensor(u,r,l)}function Gi(r,l="float32"){if(l==="complex64"){const p=Gi(r,"float32"),g=Es(r,"float32");return Pi(p,g)}const u=wf(qt(r),l);return Y.makeTensor(u,r,l)}function bJ(r,l=null,u=!1){const p=M(r,"x","mean"),g=ht(l,p.shape),f=kR(p.shape,g),I=f[1],S=qt(I),x=Of(v=>{const O=ke(S),C=O.dtype===v.dtype?v:Le(v,O.dtype),U=Pe(C,O),G=Fe(U,l,u),ne=te=>{const oe=v.shape.slice();g.forEach(Ae=>{oe[Ae]=1});const ge=ie(te,oe),fe=Pe(ae(ge,Gi(v.shape,"float32")),S);return fe};return{value:G,gradFunc:ne}});return x(p)}const dI=z({mean_:bJ});function wJ(r,l=null,u=!1){const p=M(r,"x","min"),g=(S,x)=>{const v=ht(l,p.shape);let O=v;const C=ds(O,p.rank);let U=p;C!=null&&(U=Wt(p,C),O=Kr(O.length,p.rank));const G=S.min(U,O);C!=null&&U.dispose();let ne=G;if(u){const te=Zn(ne.shape,v);ne=ie(G,te),G.dispose()}return x([p,ne]),ne},f={x:p},I={axis:l,keepDims:u};return Y.runKernelFunc(g,f,null,Pm,I)}const Ef=z({min_:wJ});function LJ(r,l){let u=M(r,"a","minimum"),p=M(l,"b","minimum");[u,p]=mt(u,p),u.dtype==="bool"&&(u=Le(u,"int32"),p=Le(p,"int32")),it(u.shape,p.shape);const g=(I,S)=>{const x=I.minimum(u,p);return S([u,p]),x},f={a:u,b:p};return Y.runKernelFunc(g,f,null,zm)}const pI=z({minimum_:LJ});function SJ(r){const l=M(r,"x","square"),u={},p=[l],g=[];return Y.runKernelFunc((f,I)=>(I([l]),f.square(l)),{x:l},null,"Square",u,p,g)}const ut=z({square_:SJ});function IJ(r,l){let u=M(r,"a","notEqual"),p=M(l,"b","notEqual");[u,p]=mt(u,p),it(u.shape,p.shape);const g=I=>I.notEqual(u,p),f={a:u,b:p};return Y.runKernelFunc(g,f,null,F2)}const mI=z({notEqual_:IJ});function xJ(r){const l=M(r,"input","real"),u=g=>g.real(l),p={input:l};return Y.runKernelFunc(u,p,null,z2)}const Rc=z({real_:xJ});function TJ(r,l,u=0){const p=M(r,"x","pad");if(p.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");const g=(S,x)=>(x([p]),S.pad(p,l,u)),f={paddings:l,constantValue:u},I={x:p};return Y.runKernelFunc(g,I,null,Ym,f)}const fI=z({pad_:TJ});function AJ(r,l,u){const p=M(r,"x","spaceToBatchND");Z(p.rank>=1+l.length,()=>`input rank ${p.rank} should be > than [blockShape] ${l.length}`),Z(u.length===l.length,()=>`paddings.shape[0] ${u.length} must be equal to [blockShape] ${l.length}`),Z(p.shape.reduce((S,x,v)=>v>0&&v<=l.length?S&&(x+u[v-1][0]+u[v-1][1])%l[v-1]===0:S,!0),()=>`input spatial dimensions ${p.shape.slice(1)} with paddings ${u.toString()} must be divisible by blockShapes ${l.toString()}`);const g=S=>S.spaceToBatchND(p,l,u),f={x:p},I={blockShape:l,paddings:u};return Y.runKernelFunc(g,f,null,af,I)}const gI=z({spaceToBatchND_:AJ});function vJ(r,l){let u=M(r,"base","pow"),p=M(l,"exp","pow");[u,p]=mt(u,p);const g={a:u,b:p},f=(I,S)=>{const x=I.pow(u,p);return S([u,p,x]),x};return Y.runKernelFunc(f,g,null,Hm)}const ea=z({pow_:vJ});function Qr(r,l){Sc(r);const u=ur(r,l);if(u.length!==1)throw new Error("tensor1d() requires values to be a flat/TypedArray");const p=null;return zi(r,p,u,l)}function Df(r,l,u=1,p="float32"){if(u===0)throw new Error("Cannot have a step of zero");const g=()=>{const I=r===l,S=r1;if(I||S||x)return Es([0],p);const v=Math.abs(Math.ceil((l-r)/u)),O=Ko(v,p);l(f([l]),l.dtype==="bool"?Le(l,"int32"):g.relu(l)),p={x:l};return Y.runKernelFunc(u,p,null,qm)}const bu=z({relu_:NJ});function CJ(r,l){const u=M(r,"x","reverse"),p=I=>{const S=ht(l,u.shape);if(u.rank===0)return pi(u);const x=I.reverse(u,S);return ie(x,u.shape)},g={x:u},f={dims:l};return Y.runKernelFunc(p,g,null,Jm,f)}const Oc=z({reverse_:CJ});function RJ(r){const l=M(r,"x","rsqrt"),u={x:l};return Y.runKernelFunc((p,g)=>{const f=p.rsqrt(l);return g([l]),f},u,null,Zm)}const yI=z({rsqrt_:RJ});function OJ(r){const l=M(r,"x","sin"),u={x:l};return Y.runKernelFunc((p,g)=>{const f=p.sin(l);return g([l]),f},u,null,tf)}const bI=z({sin_:OJ});function EJ(r){const l=M(r,"x","sinh"),u={x:l};return Y.runKernelFunc((p,g)=>{const f=p.sinh(l);return g([l]),f},u,null,nf)}const wI=z({sinh_:EJ});function DJ(r){Z(r.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${r.dtype}.`);const l={input:r};return Y.runKernelFunc(u=>{const p=r.shape[r.shape.length-1],g=r.size/p,f=r.as2D(g,p),I=u.fft(f);return I.reshape(r.shape)},l,null,d2)}const wu=z({fft_:DJ});function kJ(r){Z(r.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${r.dtype}.`);const l={input:r};return Y.runKernelFunc(u=>{const p=r.shape[r.shape.length-1],g=r.size/p,f=ie(r,[g,p]),I=u.ifft(f);return ie(I,r.shape)},l,null,y2)}const Ec=z({ifft_:kJ});function FJ(r){const l=r.shape[r.shape.length-1],u=r.size/l;let p;if(l<=2){const g=ie(r,[u,l]);p=Ec(g)}else{const g=[u,2*(l-1)],f=ie(Rc(r),[u,l]),I=ie(yu(r),[u,l]),S=Oc(Tt(f,[0,1],[u,l-2]),1),x=ae(Oc(Tt(I,[0,1],[u,l-2]),1),ke(-1)),v=yn([f,S],1),O=yn([I,x],1),C=ie(Pi(v,O),[g[0],g[1]]);p=Ec(C)}if(p=Rc(p),r.rank===3&&r.shape[0]!==0){const g=p,f=r.shape[0];p=ie(p,[f,p.shape[0]/f,p.shape[1]]),g.dispose()}return p}const LI=z({irfft_:FJ});function MR(r,l,u=0){let p=[];if(typeof l=="number")Z(r.shape[u]%l===0,()=>"Number of splits must evenly divide the axis."),p=new Array(l).fill(r.shape[u]/l);else{const g=l.reduce((I,S)=>(S===-1&&(I+=1),I),0);Z(g<=1,()=>"There should be only one negative value in split array.");const f=l.indexOf(-1);if(f!==-1){const I=l.reduce((S,x)=>x>0?S+x:S);l[f]=r.shape[u]-I}Z(r.shape[u]===l.reduce((I,S)=>I+S),()=>"The sum of sizes must match the size of the axis dimension."),p=l}return p}function _J(r,l,u=0){const p=M(r,"x","split"),g=(S,x)=>{const v=ht(u,p.shape)[0],O=MR(p,l,v);return S.split(p,O,v)},f={x:p},I={numOrSizeSplits:l,axis:u};return Y.runKernelFunc(g,f,null,cf,I)}const eo=z({split_:_J});function WJ(r,l){Z(r.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${r.dtype}`);let u=r.shape[r.shape.length-1];const p=r.size/u;let g;if(l!=null&&l0),te=r.shape.map(oe=>oe);te[r.shape.length-1]=l,g=Tt(r,ne,te),u=l}else if(l!=null&&l>u){const ne=r.shape.map(te=>te);ne[r.shape.length-1]=l-u,g=yn([r,Es(ne)],r.shape.length-1),u=l}else g=r;const f=qe(g),I=ie(Pi(g,f),[p,u]),S=wu(I),x=Math.floor(u/2)+1,v=Rc(S),O=yu(S),C=eo(v,[x,u-x],v.shape.length-1),U=eo(O,[x,u-x],O.shape.length-1),G=g.shape.slice();return G[g.shape.length-1]=x,ie(Pi(C[0],U[0]),G)}const Lu=z({rfft_:WJ});function $J(r){const l=M(r,"x","sqrt"),u={x:l};return Y.runKernelFunc((p,g)=>{const f=p.sqrt(l);return g([l]),f},u,null,rf)}const ps=z({sqrt_:$J});function UJ(r,l){let u=M(r,"a","squaredDifference"),p=M(l,"b","squaredDifference");[u,p]=mt(u,p),it(u.shape,p.shape);const g=(S,x)=>{const v=S.squaredDifference(u,p);return x([u,p]),v},f={a:u,b:p},I={};return Y.runKernelFunc(g,f,null,lf,I)}const SI=z({squaredDifference_:UJ});function BJ(r,l){const u=M(r,"x","squeeze");return ie(u,rR(u.shape,l).newShape)}const II=z({squeeze_:BJ});function MJ(r,l=0){const u=Tf(r,"tensors","stack");if(Z(u.length>=1,()=>"Pass at least one tensor to tf.stack"),u.length===1)return Os(u[0],l);const p=u[0].rank,g=u[0].shape,f=u[0].dtype;Z(l<=p,()=>"Axis must be <= rank of the tensor"),u.forEach(S=>{Zt(g,S.shape,"All tensors passed to stack must have matching shapes"),Z(f===S.dtype,()=>"All tensors passed to stack must have matching dtypes")});const I=u.map(S=>Os(S,l));return yn(I,l)}const Vs=z({stack_:MJ});function PJ(r,l=0){const u=M(r,"x","step"),p={x:u},g={alpha:l};return Y.runKernelFunc(f=>f.step(u,l),p,null,gf,g)}const ta=z({step_:PJ});function na(r,l,u){if(Sc(r),l!=null&&l.length!==2)throw new Error("tensor2d() requires shape to have two numbers");const p=ur(r,u);if(p.length!==2&&p.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(p.length===1&&l==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return zi(r,l,p,u)}function zJ(r,l,u){const p=M(r,"x","unsortedSegmentSum"),g=M(l,"segmentIds","unsortedSegmentSum","int32");Z(Qt(u),()=>"numSegments must be of dtype int");const f={x:p,segmentIds:g},I={numSegments:u},S=(x,v)=>{const O=x.unsortedSegmentSum(p,g,u);return v([g]),O};return Y.runKernelFunc(S,f,null,mf,I)}const xI=z({unsortedSegmentSum_:zJ});function GJ(r,l=0){const u=M(r,"x","unstack");Z(l>=-u.shape.length&&l`Axis = ${l} is not in [-${u.shape.length}, ${u.shape.length})`),l<0&&(l+=u.shape.length);const p={value:u},g={axis:l},f=I=>I.unstack(u,l);return Y.runKernelFunc(f,p,null,pf,g)}const Su=z({unstack_:GJ});function VJ(r,l="euclidean",u=null,p=!1){r=M(r,"x","norm");const g=PR(r,l,u);let f=g.shape;if(p){const I=ht(u,r.shape);f=Zn(g.shape,I)}return ie(g,f)}function PR(r,l,u=null){if(r.rank===0)return Un(r);if(r.rank!==1&&u===null)return PR(ie(r,[-1]),l,u);if(r.rank===1||typeof u=="number"||Array.isArray(u)&&u.length===1){if(l===1)return Fe(Un(r),u);if(l===Infinity)return Zo(Un(r),u);if(l===-Infinity)return Ef(Un(r),u);if(l==="euclidean"||l===2)return ps(Fe(ea(Un(r),ke(2,"int32")),u));throw new Error(`Error in norm: invalid ord value: ${l}`)}if(Array.isArray(u)&&u.length===2){if(l===1)return Zo(Fe(Un(r),u[0]),u[1]-1);if(l===Infinity)return Zo(Fe(Un(r),u[1]),u[0]);if(l===-Infinity)return Ef(Fe(Un(r),u[1]),u[0]);if(l==="fro"||l==="euclidean")return ps(Fe(ut(r),u));throw new Error(`Error in norm: invalid ord value: ${l}`)}throw new Error(`Error in norm: invalid axis: ${u}`)}const kf=z({norm_:VJ});function zR(r){return Math.floor(Math.pow(2,Math.ceil(Math.log(r)/Math.log(2))))}function Ff(r,l,u){const p=1-r%2,g=new Float32Array(r);for(let f=0;f`Error in conv2dDerFilter: input must be rank 4, but got shape ${S.shape}.`),Z(x.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${x.shape}.`),Z(u.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${u}.`);const v=f==="NHWC"?S.shape[3]:S.shape[1],O=f==="NHWC"?x.shape[3]:x.shape[1];Z(v===u[2],()=>`Error in conv2dDerFilter: depth of input ${v}) must match input depth in filter (${u[2]}.`),Z(O===u[3],()=>`Error in conv2dDerFilter: depth of dy (${O}) must match output depth for filter (${u[3]}).`),I!=null&&Z(Qt(g),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${I} but got pad ${g}.`);const C=ne=>{const te=1,oe=Cc(f),ge=dr(S.shape,u,p,te,g,I,!1,oe);return ne.conv2dDerFilter(S,x,ge)},U={x:S,dy:x},G={strides:p,pad:g,dataFormat:f,dimRoundingMode:I};return Y.runKernelFunc(C,U,null,XC,G)}const _f=z({conv2DBackpropFilter_:YJ});function HJ(r,l,u,p){let g=r;r.rank===3&&(g=ie(r,[1,r.shape[0],r.shape[1],r.shape[2]]));let f=l;f.rank===3&&(f=ie(l,[1,l.shape[0],l.shape[1],l.shape[2]]));const I=x=>x.depthwiseConv2DDerFilter(g,f,p),S={x:g,dy:f};return Y.runKernelFunc(I,S,null,n2)}const GR=z({depthwiseConv2dNativeBackpropFilter_:HJ});function qJ(r,l,u,p){let g=l,f=!1;l.rank===3&&(f=!0,g=ie(l,[1,l.shape[0],l.shape[1],l.shape[2]]));const I=v=>v.depthwiseConv2DDerInput(g,u,p),S={dy:g},x=Y.runKernelFunc(I,S,null,s2);return f?ie(x,[x.shape[1],x.shape[2],x.shape[3]]):x}const VR=z({depthwiseConv2dNativeBackpropInput_:qJ});function jJ(r){return Ff(r,.54,.46)}const YR=z({hammingWindow_:jJ});function KJ(r){return Ff(r,.5,.5)}const Wf=z({hannWindow_:KJ});function XJ(r,l,u,p=!1,g=0){let f=0;const I=[];for(;f+l<=r.size;)I.push(Tt(r,f,l)),f+=u;if(p)for(;f`Error in cropAndResize: image must be rank 4,but got rank ${I.rank}.`),Z(S.rank===2&&S.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${v},4] but had shape ${S.shape}.`),Z(x.rank===1&&x.shape[0]===v,()=>`Error in cropAndResize: boxInd must be have size [${v}] but had shape ${S.shape}.`),Z(p.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${p.length}.`),Z(p[0]>=1&&p[1]>=1,()=>`cropSize must be atleast [1,1], but was ${p}`),Z(g==="bilinear"||g==="nearest",()=>`method must be bilinear or nearest, but was ${g}`);const O=ne=>ne.cropAndResize(I,S,x,p,g,f),C={image:I,boxes:S,boxInd:x},U={method:g,extrapolationValue:f,cropSize:p},G=Y.runKernelFunc(O,C,null,e2,U);return G}const qR=z({cropAndResize_:ZJ});function QJ(r){const l=M(r,"image","flipLeftRight","float32");Z(l.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${l.rank}.`);const u={image:l},p=Y.runKernel(m2,u,{});return p}const jR=z({flipLeftRight_:QJ});function e9(r,l,u=0,p=.5){const g=M(r,"image","rotateWithOffset","float32");Z(g.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${g.rank}.`);const f={image:g},I={radians:l,fillValue:u,center:p},S=Y.runKernel(tR,f,I);return S}const KR=z({rotateWithOffset_:e9});function Ys(r,l,u,p,g,f){p==null&&(p=.5),g==null&&(g=Number.NEGATIVE_INFINITY),f==null&&(f=0);const I=r.shape[0];return u=Math.min(u,I),Z(0<=p&&p<=1,()=>`iouThreshold must be in [0, 1], but was '${p}'`),Z(r.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${r.rank}'`),Z(r.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${r.shape[1]}`),Z(l.rank===1,()=>"scores must be a 1D tensor"),Z(l.shape[0]===I,()=>`scores has incompatible shape with boxes. Expected ${I}, but was ${l.shape[0]}`),Z(0<=f&&f<=1,()=>`softNmsSigma must be in [0, 1], but was '${f}'`),{maxOutputSize:u,iouThreshold:p,scoreThreshold:g,softNmsSigma:f}}function t9(r,l,u,p=.5,g=Number.NEGATIVE_INFINITY){const f=M(r,"boxes","nonMaxSuppression"),I=M(l,"scores","nonMaxSuppression"),S=Ys(f,I,u,p,g);u=S.maxOutputSize,p=S.iouThreshold,g=S.scoreThreshold;const x={maxOutputSize:u,iouThreshold:p,scoreThreshold:g};return Y.runKernelFunc(v=>v.nonMaxSuppression(f,I,u,p,g),{boxes:f,scores:I},null,_2,x)}const XR=z({nonMaxSuppression_:t9});function JR(r,l,u){const p=n9(r,l,u),g=p<0?-(p+1):p;r.splice(g,0,l)}function n9(r,l,u){return i9(r,l,u||s9)}function s9(r,l){return r>l?1:r>>1);const S=u(l,r[f]);S>0?p=f+1:(g=f,I=!S)}return I?p:-p-1}function ZR(r,l,u,p,g){return TI(r,l,u,p,g,0).selectedIndices}function QR(r,l,u,p,g,f){return TI(r,l,u,p,g,0,!1,f,!0)}function eO(r,l,u,p,g,f){return TI(r,l,u,p,g,f,!0)}function TI(r,l,u,p,g,f,I=!1,S=!1,x=!1){const v=[];for(let oe=0;oeg&&v.push({score:l[oe],boxIndex:oe,suppressBeginIndex:0});v.sort(tO);const O=f>0?-.5/f:0,C=[],U=[];for(;C.length0;){const oe=v.pop(),{score:ge,boxIndex:fe,suppressBeginIndex:Ae}=oe;if(ge=Ae;--Ve){const rt=r9(r,fe,C[Ve]);if(rt>=p){Te=!0;break}if(oe.score=oe.score*o9(p,O,rt),oe.score<=g)break}oe.suppressBeginIndex=C.length,Te||(oe.score===ge?(C.push(fe),U.push(oe.score)):oe.score>g&&JR(v,oe,tO))}const G=C.length,ne=u-G;S&&ne>0&&(C.push(...new Array(ne).fill(0)),U.push(...new Array(ne).fill(0)));const te={selectedIndices:Qr(C,"int32")};return I&&(te.selectedScores=Qr(U,"float32")),x&&(te.validOutputs=ke(G,"int32")),te}function r9(r,l,u){const p=r.subarray(l*4,l*4+4),g=r.subarray(u*4,u*4+4),f=Math.min(p[0],p[2]),I=Math.min(p[1],p[3]),S=Math.max(p[0],p[2]),x=Math.max(p[1],p[3]),v=Math.min(g[0],g[2]),O=Math.min(g[1],g[3]),C=Math.max(g[0],g[2]),U=Math.max(g[1],g[3]),G=(S-f)*(x-I),ne=(C-v)*(U-O);if(G<=0||ne<=0)return 0;const te=Math.max(f,v),oe=Math.max(I,O),ge=Math.min(S,C),fe=Math.min(x,U),Ae=Math.max(ge-te,0)*Math.max(fe-oe,0);return Ae/(G+ne-Ae)}function o9(r,l,u){const p=Math.exp(l*u*u);return u<=r?p:0}function tO(r,l){return r.score-l.score||r.score===l.score&&l.boxIndex-r.boxIndex}async function a9(r,l,u,p=.5,g=Number.NEGATIVE_INFINITY){const f=M(r,"boxes","nonMaxSuppressionAsync"),I=M(l,"scores","nonMaxSuppressionAsync"),S=Ys(f,I,u,p,g);u=S.maxOutputSize,p=S.iouThreshold,g=S.scoreThreshold;const x=await Promise.all([f.data(),I.data()]),v=x[0],O=x[1],C=ZR(v,O,u,p,g);return f!==r&&f.dispose(),I!==l&&I.dispose(),C}const nO=a9;function c9(r,l,u,p=.5,g=Number.NEGATIVE_INFINITY,f=0){const I=M(r,"boxes","nonMaxSuppression"),S=M(l,"scores","nonMaxSuppression"),x=Ys(I,S,u,p,g,f);u=x.maxOutputSize,p=x.iouThreshold,g=x.scoreThreshold,f=x.softNmsSigma;const v={boxes:I,scores:S},O={maxOutputSize:u,iouThreshold:p,scoreThreshold:g,softNmsSigma:f},C=Y.runKernel($2,v,O);return{selectedIndices:C[0],selectedScores:C[1]}}const sO=z({nonMaxSuppressionWithScore_:c9});async function l9(r,l,u,p=.5,g=Number.NEGATIVE_INFINITY,f=0){const I=M(r,"boxes","nonMaxSuppressionAsync"),S=M(l,"scores","nonMaxSuppressionAsync"),x=Ys(I,S,u,p,g,f);u=x.maxOutputSize,p=x.iouThreshold,g=x.scoreThreshold,f=x.softNmsSigma;const v=await Promise.all([I.data(),S.data()]),O=v[0],C=v[1],U=eO(O,C,u,p,g,f);return I!==r&&I.dispose(),S!==l&&S.dispose(),U}const iO=l9;function h9(r,l,u,p=.5,g=Number.NEGATIVE_INFINITY,f=!1){const I=M(r,"boxes","nonMaxSuppression"),S=M(l,"scores","nonMaxSuppression"),x=Ys(I,S,u,p,g,null),v=x.maxOutputSize,O=x.iouThreshold,C=x.scoreThreshold,U={boxes:I,scores:S},G={maxOutputSize:v,iouThreshold:O,scoreThreshold:C,padToMaxOutputSize:f},ne=Y.runKernel(W2,U,G);return{selectedIndices:ne[0],validOutputs:ne[1]}}const rO=z({nonMaxSuppressionPadded_:h9});async function u9(r,l,u,p=.5,g=Number.NEGATIVE_INFINITY,f=!1){const I=M(r,"boxes","nonMaxSuppressionAsync"),S=M(l,"scores","nonMaxSuppressionAsync"),x=Ys(I,S,u,p,g,null),v=x.maxOutputSize,O=x.iouThreshold,C=x.scoreThreshold,[U,G]=await Promise.all([I.data(),S.data()]),ne=QR(U,G,v,O,C,f);return I!==r&&I.dispose(),S!==l&&S.dispose(),ne}const oO=u9;function d9(r,l,u=!1){const p=M(r,"images","resizeBilinear");Z(p.rank===3||p.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${p.rank}.`),Z(l.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${l}.`);let g=p,f=!1;p.rank===3&&(f=!0,g=ie(p,[1,p.shape[0],p.shape[1],p.shape[2]]));const[I,S]=l,x=(U,G)=>(G([g]),U.resizeBilinear(g,I,S,u)),v={images:g},O={alignCorners:u,size:l},C=Y.runKernelFunc(x,v,null,Xm,O);return f?ie(C,[C.shape[1],C.shape[2],C.shape[3]]):C}const aO=z({resizeBilinear_:d9});function p9(r,l,u=!1){const p=M(r,"images","resizeNearestNeighbor");Z(p.rank===3||p.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${p.rank}.`),Z(l.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${l}.`),Z(p.dtype==="float32"||p.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let g=p,f=!1;p.rank===3&&(f=!0,g=ie(p,[1,p.shape[0],p.shape[1],p.shape[2]]));const[I,S]=l,x={images:g},v={alignCorners:u,size:l},O=(U,G)=>(G([g]),U.resizeNearestNeighbor(g,I,S,u)),C=Y.runKernelFunc(O,x,null,Km,v);return f?ie(C,[C.shape[1],C.shape[2],C.shape[3]]):C}const cO=z({resizeNearestNeighbor_:p9});function m9(r,l,u){Z(l%1===0,()=>`bandPart(): numLower must be an integer, got ${l}.`),Z(u%1===0,()=>`bandPart(): numUpper must be an integer, got ${u}.`);const p=M(r,"a","bandPart");Z(p.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${p.rank}.`);const g=p.shape,[f,I]=p.shape.slice(-2);if(!(l<=f))throw new Error(`bandPart(): numLower (${l}) must not be greater than the number of rows (${f}).`);if(!(u<=I))throw new Error(`bandPart(): numUpper (${u}) must not be greater than the number of columns (${I}).`);l<0&&(l=f),u<0&&(u=I);const S=ie(Df(0,f,1,"int32"),[-1,1]),x=Df(0,I,1,"int32"),v=Be(S,x),O=Qo(mr(v,ke(+l,"int32")),pr(v,ke(-u,"int32"))),C=Es([f,I],p.dtype);return ie(Vs(Su(ie(p,[-1,f,I])).map(U=>Bn(O,U,C))),g)}const lO=z({bandPart_:m9});function f9(r){let l;if(Array.isArray(r)){l=!1,Z(r!=null&&r.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");const g=r[0].shape[0];for(let f=1;f`Gram-Schmidt: Non-unique lengths found in the input vectors: (${r[f].shape[0]} vs. ${g})`)}else l=!0,r=eo(r,r.shape[0],0).map(g=>II(g,[0]));Z(r.length<=r[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${r.length}) exceeds number of dimensions (${r[0].shape[0]}).`);const u=[],p=r;for(let g=0;g{let f=p[g];if(g>0)for(let I=0;I=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${r.rank}`),r.rank===2)return uO(r,l);{const u=r.shape.slice(0,r.shape.length-2).reduce((x,v)=>x*v),p=Su(ie(r,[u,r.shape[r.shape.length-2],r.shape[r.shape.length-1]]),0),g=[],f=[];p.forEach(x=>{const[v,O]=uO(x,l);g.push(v),f.push(O)});const I=ie(Vs(g,0),r.shape),S=ie(Vs(f,0),r.shape);return[I,S]}}function uO(r,l=!1){return Y.tidy(()=>{Z(r.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${r.shape.length}D Tensor.`);const u=r.shape[0],p=r.shape[1];let g=sI(u),f=pi(r);const I=na([[1]],[1,1]);let S=pi(I);const x=u>=p?p:u;for(let v=0;v{const G=Tt(f,[v,v],[u-v,1]),ne=kf(G),te=Tt(f,[v,v],[1,1]),oe=Bn(mi(te,0),na([[-1]]),na([[1]])),ge=Be(te,ae(oe,ne)),fe=Pe(G,ge);fe.shape[0]===1?S=pi(I):S=yn([I,Tt(fe,[1,0],[fe.shape[0]-1,fe.shape[1]])],0);const Ae=yt(Pe(dn(oe,ge),ne)),Te=Tt(f,[v,0],[u-v,p]),Ve=ae(Ae,S),rt=Wt(S);if(v===0)f=Be(Te,dn(Ve,dn(rt,Te)));else{const Kt=Be(Te,dn(Ve,dn(rt,Te)));f=yn([Tt(f,[0,0],[v,p]),Kt],0)}const vt=Wt(Ve),$t=Tt(g,[0,v],[u,g.shape[1]-v]);if(v===0)g=Be($t,dn(dn($t,S),vt));else{const Kt=Be($t,dn(dn($t,S),vt));g=yn([Tt(g,[0,0],[u,v]),Kt],1)}return[S,f,g]}),DR([O,C,U])}return!l&&u>p&&(g=Tt(g,[0,0],[u,p]),f=Tt(f,[0,0],[p,p])),[g,f]})}const dO=z({qr_:g9});var jt;(function(r){r[r.NONE=0]="NONE",r[r.MEAN=1]="MEAN",r[r.SUM=2]="SUM",r[r.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(jt||(jt={}));function y9(r,l,u=jt.SUM_BY_NONZERO_WEIGHTS){const p=M(r,"losses","computeWeightedLoss");let g=null;l!=null&&(g=M(l,"weights","computeWeightedLoss"));const f=g==null?p:ae(p,g);if(u===jt.NONE)return f;if(u===jt.SUM)return Fe(f);if(u===jt.MEAN){if(g==null)return dI(f);{const I=p.size/g.size,S=Pe(Fe(f),Fe(g));return I>1?Pe(S,ke(I)):S}}if(u===jt.SUM_BY_NONZERO_WEIGHTS){if(g==null)return Pe(Fe(f),ke(p.size));{const I=ae(g,Gi(p.shape)),S=Le(Fe(mI(I,ke(0))),"float32");return Pe(Fe(f),S)}}throw Error(`Unknown reduction: ${u}`)}const xn=z({computeWeightedLoss_:y9});function b9(r,l,u,p=jt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","absoluteDifference"),f=M(l,"predictions","absoluteDifference");let I=null;u!=null&&(I=M(u,"weights","absoluteDifference")),Zt(g.shape,f.shape,"Error in absoluteDifference: ");const S=Un(Be(g,f));return xn(S,I,p)}const pO=z({absoluteDifference_:b9});function w9(r,l,u,p,g=jt.SUM_BY_NONZERO_WEIGHTS){const f=M(r,"labels","cosineDistance"),I=M(l,"predictions","cosineDistance");let S=null;p!=null&&(S=M(p,"weights","cosineDistance")),Zt(f.shape,I.shape,"Error in cosineDistance: ");const x=ke(1),v=Be(x,Fe(ae(f,I),u,!0));return xn(v,S,g)}const mO=z({cosineDistance_:w9});function L9(r,l,u,p=jt.SUM_BY_NONZERO_WEIGHTS){let g=M(r,"labels","hingeLoss");const f=M(l,"predictions","hingeLoss");let I=null;u!=null&&(I=M(u,"weights","hingeLoss")),Zt(g.shape,f.shape,"Error in hingeLoss: ");const S=ke(1);g=Be(ae(ke(2),g),S);const x=bu(Be(S,ae(g,f)));return xn(x,I,p)}const fO=z({hingeLoss_:L9});function S9(r,l,u,p=1,g=jt.SUM_BY_NONZERO_WEIGHTS){const f=M(r,"labels","huberLoss"),I=M(l,"predictions","huberLoss");let S=null;u!=null&&(S=M(u,"weights","huberLoss")),Zt(f.shape,I.shape,"Error in huberLoss: ");const x=ke(p),v=Un(Be(I,f)),O=pI(v,x),C=Be(v,O),U=Lt(ae(ke(.5),ut(O)),ae(x,C));return xn(U,S,g)}const gO=z({huberLoss_:S9});function I9(r,l,u,p=1e-7,g=jt.SUM_BY_NONZERO_WEIGHTS){const f=M(r,"labels","logLoss"),I=M(l,"predictions","logLoss");let S=null;u!=null&&(S=M(u,"weights","logLoss")),Zt(f.shape,I.shape,"Error in logLoss: ");const x=ke(1),v=ke(p),O=yt(ae(f,Zr(Lt(I,v)))),C=ae(Be(x,f),Zr(Lt(Be(x,I),v))),U=Be(O,C);return xn(U,S,g)}const yO=z({logLoss_:I9});function x9(r,l,u,p=jt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","meanSquaredError"),f=M(l,"predictions","meanSquaredError");let I=null;u!=null&&(I=M(u,"weights","meanSquaredError")),Zt(g.shape,f.shape,"Error in meanSquaredError: ");const S=SI(g,f);return xn(S,I,p)}const bO=z({meanSquaredError_:x9});function T9(r,l){const u=M(r,"labels","sigmoidCrossEntropyWithLogits"),p=M(l,"logits","sigmoidCrossEntropyWithLogits");Zt(u.shape,p.shape,"Error in sigmoidCrossEntropyWithLogits: ");const g=bu(p),f=ae(p,u),I=lI(Mn(yt(Un(p))));return Lt(Be(g,f),I)}function A9(r,l,u,p=0,g=jt.SUM_BY_NONZERO_WEIGHTS){let f=M(r,"multiClassLabels","sigmoidCrossEntropy");const I=M(l,"logits","sigmoidCrossEntropy");let S=null;if(u!=null&&(S=M(u,"weights","sigmoidCrossEntropy")),Zt(f.shape,I.shape,"Error in sigmoidCrossEntropy: "),p>0){const v=ke(p),O=ke(1),C=ke(.5);f=Lt(ae(f,Be(O,v)),ae(C,v))}const x=T9(f,I);return xn(x,S,g)}const wO=z({sigmoidCrossEntropy_:A9});function v9(r,l,u=-1){if(u===-1&&(u=l.rank-1),u!==l.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${l.rank} and dim was ${u}`);const p=Of((g,f,I)=>{const S=!0,x=hI(f,[u],S),v=Be(Le(f,"float32"),x);I([g,v]);const O=yt(ae(v,g)),C=Fe(O,[u]),U=(G,ne)=>{const[te,oe]=ne,ge=Zn(G.shape,[u]);return[ae(ie(G,ge),Be(Le(te,"float32"),Mn(oe))),ae(ie(G,ge),Be(Mn(oe),Le(te,"float32")))]};return{value:C,gradFunc:U}});return p(r,l)}function N9(r,l,u,p=0,g=jt.SUM_BY_NONZERO_WEIGHTS){let f=M(r,"onehotLabels","softmaxCrossEntropy");const I=M(l,"logits","softmaxCrossEntropy");let S=null;if(u!=null&&(S=M(u,"weights","softmaxCrossEntropy")),Zt(f.shape,I.shape,"Error in softmaxCrossEntropy: "),p>0){const v=ke(p),O=ke(1),C=ke(f.shape[1]);f=Lt(ae(f,Be(O,v)),Pe(v,C))}const x=v9(f,I);return xn(x,S,g)}const LO=z({softmaxCrossEntropy_:N9});const ITe={fft:wu,ifft:Ec,rfft:Lu,irfft:LI},NTe={hammingWindow:YR,hannWindow:Wf,frame:$f,stft:HR},SO={flipLeftRight:jR,resizeNearestNeighbor:cO,resizeBilinear:aO,rotateWithOffset:KR,cropAndResize:qR,nonMaxSuppression:XR,nonMaxSuppressionAsync:nO,nonMaxSuppressionWithScore:sO,nonMaxSuppressionWithScoreAsync:iO,nonMaxSuppressionPadded:rO,nonMaxSuppressionPaddedAsync:oO},zTe={bandPart:lO,gramSchmidt:hO,qr:dO},ZTe={absoluteDifference:pO,computeWeightedLoss:xn,cosineDistance:mO,hingeLoss:fO,huberLoss:gO,logLoss:yO,meanSquaredError:bO,sigmoidCrossEntropy:wO,softmaxCrossEntropy:LO};const IO=1.7580993408473768,xO=1.0507009873554805;const TO={kernelName:Lm,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(r,ta(Le(u,"float32"),-1))}}};const AO={kernelName:kC,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=ut(Le(u,"float32")),g=ps(Be(ke(1),p));return yt(Pe(r,g))}}}};const vO={kernelName:FC,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=ps(Be(ut(Le(u,"float32")),1));return Pe(r,p)}}}};const NO={kernelName:wc,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,g=it(u.shape,p.shape),f=()=>{let S=r;const x=At(u.shape,g);return x.length>0&&(S=Fe(S,x)),ie(S,u.shape)},I=()=>{let S=r;const x=At(p.shape,g);return x.length>0&&(S=Fe(S,x)),ie(S,p.shape)};return{a:f,b:I}}};const CO={kernelName:_C,saveAllInputs:!0,gradFunc:(r,l)=>{const u={};return l.forEach((p,g)=>{u[g]=()=>r.clone()}),u}};const RO={kernelName:WC,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>qe(u)}}};const OO={kernelName:$C,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>qe(u)}}};const EO={kernelName:UC,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Pe(r,ps(Be(ke(1),ut(Le(u,"float32")))))}}};const DO={kernelName:BC,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=ps(Lt(ke(1),ut(Le(u,"float32"))));return Pe(r,p)}}}};const kO={kernelName:zC,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,g=it(u.shape,p.shape),f=()=>{const S=Lt(ut(u),ut(p));let x=ae(r,Pe(p,S));const v=At(u.shape,g);return v.length>0&&(x=Fe(x,v)),ie(x,u.shape)},I=()=>{const S=Lt(ut(u),ut(p));let x=yt(ae(r,Pe(u,S)));const v=At(p.shape,g);return v.length>0&&(x=Fe(x,v)),ie(x,p.shape)};return{a:f,b:I}}};const FO={kernelName:MC,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Pe(r,Lt(ut(Le(u,"float32")),1))}}};const _O={kernelName:PC,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Pe(r,Be(ke(1),ut(Le(u,"float32"))))}}};function C9(r,l,u,p,g=[1,1,1],f,I){const S=M(r,"dy","avgPool3dBackprop"),x=M(l,"input","avgPool3dBackprop");let v=S,O=x,C=!1;x.rank===4&&(C=!0,v=ie(S,[1,S.shape[0],S.shape[1],S.shape[2],S.shape[3]]),O=ie(x,[1,x.shape[0],x.shape[1],x.shape[2],x.shape[3]])),Z(v.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${v.rank}.`),Z(O.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${O.rank}.`),Z(Jr(p,g),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${p} and dilations '${g}'`),I!=null&&Z(Qt(f),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${I} but got pad ${f}.`);const U=oe=>{const ge=Cf(O.shape,u,p,g,f,I);return oe.avgPool3dBackprop(v,O,ge)},G={dy:v,input:O},ne={filterSize:u,strides:p,dilations:g,pad:f,dimRoundingMode:I},te=Y.runKernelFunc(U,G,null,HC,ne);return C?ie(te,[te.shape[1],te.shape[2],te.shape[3],te.shape[4]]):te}const WO=z({avgPool3dBackprop_:C9});const $O={kernelName:YC,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{filterSize:g,strides:f,dilations:I,pad:S,dimRoundingMode:x}=u,v=I==null?[1,1,1]:I;return{x:()=>WO(r,p,g,f,v,S,x)}}};function R9(r,l,u,p,g){const f=M(r,"dy","avgPoolBackprop"),I=M(l,"input","avgPoolBackprop");Z(I.rank===f.rank,()=>`Rank of input (${I.rank}) does not match rank of dy (${f.rank})`);let S=I,x=f,v=!1;I.rank===3&&(v=!0,S=ie(I,[1,I.shape[0],I.shape[1],I.shape[2]]),x=ie(f,[1,f.shape[0],f.shape[1],f.shape[2]])),Z(x.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${x.rank}.`),Z(S.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${S.rank}.`);const O=ne=>{const te=Nf(S.shape,u,p,1,g);return ne.avgPoolBackprop(x,S,te)},C={dy:x,input:S},U={filterSize:u,strides:p,pad:g},G=Y.runKernelFunc(O,C,null,VC,U);return v?ie(G,[G.shape[1],G.shape[2],G.shape[3]]):G}const UO=z({avgPoolBackprop_:R9});const BO={kernelName:GC,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{filterSize:g,strides:f,pad:I}=u;return{x:()=>UO(r,p,g,f,I)}}};const MO={kernelName:Sm,inputsToSave:["a","b"],gradFunc:(r,l,u)=>{const[p,g]=l,{transposeA:f,transposeB:I}=u;return!f&&!I?{a:()=>dn(r,g,!1,!0),b:()=>dn(p,r,!0,!1)}:!f&&I?{a:()=>dn(r,g,!1,!1),b:()=>dn(r,p,!0,!1)}:f&&!I?{a:()=>dn(g,r,!1,!0),b:()=>dn(p,r,!1,!1)}:{a:()=>dn(g,r,!0,!0),b:()=>dn(r,p,!0,!0)}}};const PO={kernelName:Im,gradFunc:(r,l,u)=>{const{blockShape:p,crops:g}=u;return{x:()=>gI(r,p,g)}}};const zO={kernelName:xm,gradFunc:(r,l,u)=>{const p=u,g=p.inputShape,f=p.shape,I=Array.from(f);for(let x=g.length-1;x>=0;x--)if(g[x]===f[x])I[x]=1;else if(g[x]!==1)throw new Error(`broadcastTo(): [${g}] cannot be broadcast to [${f}].`);const S=[];for(let x=0;x1&&S.push(x);return{x:()=>Fe(r,S,!0)}}};const GO={kernelName:Lc,gradFunc:r=>({x:()=>r.clone()})};const VO={kernelName:qC,gradFunc:r=>({x:()=>qe(r)})};const YO={kernelName:jC,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{clipValueMin:g,clipValueMax:f}=u;return{x:()=>Bn(Qo(pr(p,g),mr(p,f)),r,qe(r))}}};const HO={kernelName:Tm,saveAllInputs:!0,gradFunc:(r,l,u)=>{const p=l.map(x=>x.shape),{axis:g}=u,f=ht(g,l[0].shape)[0],I=p.map(x=>x[f]),S=eo(r,I,f);return S.map(x=>()=>x)}};const qO={kernelName:Am,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const[p,g]=l,{dilations:f,strides:I,pad:S,dataFormat:x}=u;return Z(Xr(f),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${f}'`),{x:()=>$R(p.shape,r,g,I,S,x),filter:()=>_f(p,r,g.shape,I,S,x)}}};const jO={kernelName:vm,inputsToSave:["dy","filter"],gradFunc:(r,l,u)=>{const[p,g]=l,{strides:f,pad:I,dataFormat:S,dimRoundingMode:x}=u;return{dy:()=>QS(r,g,f,I,S,1,x),filter:()=>_f(r,p,g.shape,f,I,S,x)}}};function O9(r,l,u,p,g){let f=r;r.rank===4&&(f=ie(r,[1,r.shape[0],r.shape[1],r.shape[2],r.shape[3]]));let I=l;I.rank===4&&(I=ie(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]])),Z(f.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${f.shape}.`),Z(I.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${I.shape}.`),Z(u.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${u}.`),Z(f.shape[4]===u[3],()=>`Error in conv3dDerFilter: depth of input ${f.shape[4]}) must match input depth in filter (${u[3]}.`),Z(I.shape[4]===u[4],()=>`Error in conv3dDerFilter: depth of dy (${I.shape[4]}) must match output depth for filter (${u[4]}).`);const S=O=>{const C=1,U=fu(f.shape,u,p,C,g);return O.conv3dDerFilter(f,I,U)},x={x:f,y:I},v={strides:p,pad:g};return Y.runKernelFunc(S,x,null,ZC,v)}const KO=z({conv3DBackpropFilter_:O9});const XO={kernelName:JC,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const{dilations:p,strides:g,pad:f}=u;Z(Xr(p),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${p}'`);const[I,S]=l;return{x:()=>UR(I.shape,r,S,g,f),filter:()=>KO(I,r,S.shape,g,f)}}};const JO={kernelName:Nm,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(yt(bI(Le(u,"float32"))),r)}}};const ZO={kernelName:Cm,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(wI(Le(u,"float32")),r)}}};const QO={kernelName:Rm,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{axis:g,exclusive:f,reverse:I}=u;return{x:()=>{const S=ds([g],p.rank);let x=tI(r,g,f,!I);return S!=null&&(x=Wt(x,S)),x}}}};const e1={kernelName:t2,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const{dilations:p,strides:g,pad:f,dimRoundingMode:I}=u,S=p==null?[1,1]:p;Z(Xr(S),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${S}'`);const[x,v]=l;Z(x.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${x.rank}.`),Z(v.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${v.rank}.`),Z(x.shape[3]===v.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${x.shape[3]}) must match the inChannels dimension in filter ${v.shape[2]}.`),Z(Jr(g,S),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${g} and dilations '${S}'.`),I!=null&&Z(Qt(f),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${I} but got pad ${f}.`);const O=dr(x.shape,v.shape,g,S,f,I,!0);return{x:()=>VR(x.shape,r,v,O),filter:()=>GR(x,r,v.shape,O)}}};const t1={kernelName:i2,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const[p,g]=l,f={x:p,filter:g,dy:r},I={x:p,filter:g,dy:r};return{x:()=>Y.runKernel(r2,f,u),filter:()=>Y.runKernel(o2,I,u)}}};const n1={kernelName:Om,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,g=it(u.shape,p.shape),f=()=>{const S=Pe(r,Le(p,"float32")),x=At(u.shape,g);return x.length>0?ie(Fe(S,x),u.shape):S},I=()=>{let S=ae(r,Le(u,"float32"));const x=At(p.shape,g);x.length>0&&(S=ie(Fe(S,x),p.shape));const v=ut(p);return yt(Pe(S,Le(v,"float32")))};return{a:f,b:I}}};const s1={kernelName:a2,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l,p=f=>f.eluDer(r,u),g={dy:r,y:u};return{x:()=>Y.runKernelFunc(p,g,null,c2)}}};const i1={kernelName:l2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l,p=ae(Mn(yt(ut(u))),2/Math.sqrt(Math.PI));return{x:()=>ae(r,p)}}};const r1={kernelName:Em,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(r,u)}}};const o1={kernelName:u2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(r,Mn(u))}}};const a1={kernelName:Dm,gradFunc:r=>({x:()=>qe(r)})};const c1={kernelName:km,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,g=it(u.shape,p.shape),f=()=>{const S=Pe(r,Le(p,"float32")),x=At(u.shape,g);return x.length>0?ie(Fe(S,x),u.shape):S},I=()=>{let S=ae(r,Le(u,"float32"));const x=At(p.shape,g);x.length>0&&(S=ie(Fe(S,x),p.shape));const v=ut(p);return yt(Pe(S,Le(v,"float32")))};return{a:f,b:I}}};const l1={kernelName:f2,inputsToSave:["x","mean","variance","scale"],gradFunc:(r,l,u)=>{const{varianceEpsilon:p}=u,[g,f,I,S]=l,x=S==null?ke(1):S,v=At(f.shape,g.shape),O=[];if(f.rank===1){for(let Te=0;Tef.rank===1?ie(ae(ae(r,Jo(ie(G,[1,1,1,f.shape[0]]),O)),x),g.shape):ie(ae(ae(r,G),x),g.shape),oe=()=>{let Te=ae(ae(G,ke(-1)),U);return f.rank===1&&(Te=Fe(Te,v)),ie(Te,f.shape)},ge=()=>{let Te=ae(ae(ne,C),U);return f.rank===1&&(Te=Fe(Te,v)),ie(Te,f.shape)},fe=()=>{const Te=ae(C,G);let Ve=ae(r,Te);return f.rank===1&&(Ve=Fe(Ve,v)),ie(Ve,f.shape)},Ae=()=>{let Te=r;return f.rank===1&&(Te=Fe(Te,v)),ie(Te,f.shape)};return{x:te,mean:oe,variance:ge,scale:fe,offset:Ae}}};const d1={kernelName:Fm,inputsToSave:["x","indices"],gradFunc:(r,l,u)=>{const[p,g]=l,{axis:f}=u,I=ht(f,p.shape)[0],S=()=>{const x=p.shape,v=g.size,O=x.slice(0,I),C=O.length,U=x.slice(f,x.length).slice(1),G=U.length,ne=h1(0,C),te=h1(C+1,C+1+G),oe=u1([O,[v],U]),ge=ie(r,oe),fe=ie(g,[v]),Ae=u1([[C],ne,te]),Te=Wt(ge,Ae);let Ve=xI(Te,fe,p.shape[I]);const rt=vc(Ae);return Ve=Wt(Ve,rt),Ve};return{x:S,indices:()=>g}}};function h1(r,l){const u=[];for(let p=r;p{const[u,p]=l;return{a:()=>qe(u),b:()=>qe(p)}}};const m1={kernelName:Wm,gradFunc:r=>({x:()=>Le(r,"float32")})};const f1={kernelName:w2,gradFunc:r=>({x:()=>qe(r)})};const g1={kernelName:L2,gradFunc:r=>({x:()=>qe(r)})};const y1={kernelName:S2,gradFunc:r=>({x:()=>qe(r)})};const b1={kernelName:Um,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Pe(r,Lt(u,1))}}};const w1={kernelName:$m,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Pe(r,Le(u,"float32"))}}};const L1={kernelName:v2,inputsToSave:[],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p]=l,{axis:g}=u;return{logits:()=>{const f=!0,I=Mn(p);return Be(r,ae(Fe(r,g,f),I))}}}};function E9(r,l,u,p=5,g=1,f=1,I=.5){const S=O=>O.LRNGrad(u,r,l,p,g,f,I),x={x:r,y:l,dy:u},v={depthRadius:p,bias:g,alpha:f,beta:I};return Y.runKernelFunc(S,x,null,C2,v)}const S1=z({localResponseNormalizationBackprop_:E9});const I1={kernelName:N2,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p,g]=l,{depthRadius:f,bias:I,alpha:S,beta:x}=u;return{x:()=>S1(p,g,r,f,I,S,x)}}};function Uf(r,l,u,p,g){return l.rank{const f=ae(r,Le(nI(u,l),r.dtype));return g==null?f:Wt(f,g)}}}const AI={kernelName:Bm,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const p=u,{reductionIndices:g}=p,[f,I]=l,S=ht(g,f.shape),x=ds(S,f.rank),v=Uf(r,I,f,S,x);return{x:()=>{let O=v.x();return x!=null&&(O=Wt(O)),O}}}};const x1={kernelName:Mm,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,g=()=>ae(r,Le(pr(u,p),"float32")),f=()=>ae(r,Le(cI(u,p),"float32"));return{a:g,b:f}}};function D9(r,l,u,p,g,f=[1,1,1],I,S){const x=M(r,"dy","maxPool3dBackprop"),v=M(l,"input","maxPool3dBackprop"),O=M(u,"output","maxPool3dBackprop");let C=x,U=v,G=O,ne=!1;v.rank===4&&(ne=!0,C=ie(x,[1,x.shape[0],x.shape[1],x.shape[2],x.shape[3]]),U=ie(v,[1,v.shape[0],v.shape[1],v.shape[2],v.shape[3]]),G=ie(O,[1,O.shape[0],O.shape[1],O.shape[2],O.shape[3]])),Z(C.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${C.rank}.`),Z(U.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${U.rank}.`),Z(G.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${G.rank}.`),Z(Jr(g,f),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${g} and dilations '${f}'`),S!=null&&Z(Qt(I),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${S} but got pad ${I}.`);const te=Ae=>{const Te=Cf(U.shape,p,g,f,I,S);return Ae.maxPool3dBackprop(C,U,G,Te)},oe={dy:C,input:U,output:G},ge={filterSize:p,strides:g,dilations:f,pad:I,dimRoundingMode:S},fe=Y.runKernelFunc(te,oe,null,D2,ge);return ne?ie(fe,[fe.shape[1],fe.shape[2],fe.shape[3],fe.shape[4]]):fe}const T1=z({maxPool3dBackprop_:D9});const A1={kernelName:E2,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p,g]=l,{filterSize:f,strides:I,dilations:S,pad:x,dimRoundingMode:v}=u,O=S==null?[1,1,1]:S;return{x:()=>T1(r,p,g,f,I,O,x,v)}}};function k9(r,l,u,p,g,f,I){const S=M(r,"dy","maxPoolBackprop"),x=M(l,"input","maxPoolBackprop"),v=M(u,"output","maxPoolBackprop");Z(x.rank===S.rank,()=>`Rank of input (${x.rank}) does not match rank of dy (${S.rank})`),Z(S.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${S.rank}.`),Z(x.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${x.rank}.`),I!=null&&Z(Qt(f),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${I} but got pad ${f}.`);const O=G=>{const ne=Nf(x.shape,p,g,1,f,I);return G.maxPoolBackprop(S,x,v,ne)},C={dy:S,input:x,output:v},U={filterSize:p,strides:g,pad:f,dimRoundingMode:I};return Y.runKernelFunc(O,C,null,O2,U)}const v1=z({maxPoolBackprop_:k9});const N1={kernelName:R2,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p,g]=l,{filterSize:f,strides:I,pad:S}=u;return{x:()=>v1(r,p,g,f,I,S)}}};const C1={kernelName:Pm,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const p=u,{axis:g}=p,[f,I]=l,S=ht(g,f.shape),x=ds(S,f.rank),v=Uf(r,I,f,S,x);return{x:()=>{let O=v.x();return x!=null&&(O=Wt(O)),O}}}};const R1={kernelName:zm,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,g=()=>ae(r,Le(mr(u,p),"float32")),f=()=>ae(r,Le(mi(u,p),"float32"));return{a:g,b:f}}};const O1={kernelName:k2,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,g=it(u.shape,p.shape),f=()=>{const S=At(u.shape,g);return S.length>0?ie(Fe(r,S),u.shape):r},I=()=>{const S=ae(r,yt(rI(Pe(u,p)))),x=At(p.shape,g);return x.length>0?ie(Fe(S,x),p.shape):S};return{a:f,b:I}}};const E1={kernelName:Gm,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,g=it(u.shape,p.shape),f=()=>{const S=ae(r,Le(p,"float32")),x=At(u.shape,g);return x.length>0?ie(Fe(S,x),u.shape):S},I=()=>{const S=ae(r,Le(u,"float32")),x=At(p.shape,g);return x.length>0?ie(Fe(S,x),p.shape):S};return{a:f,b:I}}};const D1={kernelName:Vm,gradFunc:r=>({x:()=>yt(r)})};const k1={kernelName:B2,inputsToSave:["indices"],gradFunc:(r,l)=>{const u=l[0];return{indices:()=>Es(u.shape,"float32")}}};const F1={kernelName:U2,gradFunc:r=>({x:()=>qe(r)})};const vI={kernelName:Ym,inputsToSave:["x"],gradFunc:(r,l,u)=>{const p=l[0],{paddings:g}=u,f=g.map(I=>I[0]);return{x:()=>Tt(r,f,p.shape)}}};const _1={kernelName:Hm,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(r,l)=>{const[u,p,g]=l,f=u,I=p,S=it(f.shape,I.shape),x=()=>{const O=Le(I,"float32");let C=ae(r,ae(O,ea(f,Be(O,ke(1)))));const U=At(f.shape,S);return U.length>0&&(C=Fe(C,U)),ie(C,f.shape)},v=()=>{const O=mi(f,0),C=Bn(O,Zr(f),qe(f));let U=ae(r,ae(g,C));const G=At(I.shape,S);return G.length>0&&(U=Fe(U,G)),ie(U,I.shape)};return{a:x,b:v}}};const W1={kernelName:M2,inputsToSave:["x","alpha"],gradFunc:(r,l)=>{const[u,p]=l,g=mi(u,0);return{x:()=>Bn(g,r,ae(r,p)),alpha:()=>{let f=Bn(g,qe(r),ae(r,u));const I=At(p.shape,r.shape);return I.length>0&&(f=Fe(f,I)),ie(f,p.shape)}}}};const $1={kernelName:G2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Pe(r,yt(ut(u)))}}};const U1={kernelName:H2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l,p=ae(mr(u,6),ta(u));return{x:()=>ae(r,Le(p,"float32"))}}};const B1={kernelName:qm,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(r,Le(ta(u),"float32"))}}};const M1={kernelName:jm,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ie(r,u.shape)}}};const P1={kernelName:Xm,inputsToSave:["images"],gradFunc:(r,l,u)=>{const[p]=l,g=S=>{const{alignCorners:x}=u;return S.resizeBilinearBackprop(r,p,x)},f={images:p},I=()=>Y.runKernelFunc(g,f,null,Y2,u);return{images:I}}};const z1={kernelName:Km,inputsToSave:["images"],gradFunc:(r,l,u)=>{const[p]=l,g=S=>{const{alignCorners:x}=u;return S.resizeNearestNeighborBackprop(r,p,x)},f={images:p},I=()=>Y.runKernelFunc(g,f,null,V2,u);return{images:I}}};const G1={kernelName:Jm,gradFunc:(r,l,u)=>{const{dims:p}=u,g=ht(p,r.shape);return{x:()=>Oc(r,g)}}};const V1={kernelName:q2,gradFunc:r=>({x:()=>qe(r)})};const Y1={kernelName:Zm,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>yt(Pe(r,ae(ea(u,1.5),2)))}}};const H1={kernelName:Qm,inputsToSave:["condition"],gradFunc:(r,l)=>{const[u]=l;return{condition:()=>Le(qe(u),"float32"),t:()=>ae(r,Le(u,r.dtype)),e:()=>ae(r,Le(uI(u),r.dtype))}}};const q1={kernelName:j2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=mi(u,ke(0)),g=ke(IO),f=ke(xO),I=ae(r,f),S=ae(ae(r,g),Mn(Le(u,"float32")));return Bn(p,I,S)}}}};const j1={kernelName:sf,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(r,ae(u,Be(ke(1),u)))}}};const K1={kernelName:K2,gradFunc:r=>({x:()=>qe(r)})};const X1={kernelName:tf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(gu(Le(u,"float32")),r)}}};const J1={kernelName:nf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(eI(Le(u,"float32")),r)}}};const Z1={kernelName:ef,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{begin:g,size:f}=u,I=p.shape,[S,x]=Af(p,g,f),v=[];for(let O=0;OfI(r,v)}}};const Q1={kernelName:J2,outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p]=l,{dim:g}=u,f=!0,I=ae(r,p);return{logits:()=>Be(I,ae(Fe(I,[g],f),p))}}};const eE={kernelName:X2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(r,JS(u))}}};const NI={kernelName:af,gradFunc:(r,l,u)=>{const{blockShape:p,paddings:g}=u;return{x:()=>ZS(r,p,g)}}};const CI={kernelName:cf,gradFunc:(r,l,u)=>{const{axis:p}=u;return{x:()=>yn(r,p)}}};const tE={kernelName:rf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Pe(r,ae(ps(Le(u,"float32")),2))}}};const nE={kernelName:Z2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(r,ae(Le(u,"float32"),2))}}};const sE={kernelName:lf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,g=ke(2),f=()=>ae(r,ae(g,Be(u,p))),I=()=>ae(r,ae(g,Be(p,u)));return{a:f,b:I}}};const iE={kernelName:gf,gradFunc:r=>({x:()=>qe(r)})};const rE={kernelName:hf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,g=it(u.shape,p.shape),f=()=>{let S=r;const x=At(u.shape,g);return x.length>0&&(S=Fe(S,x)),ie(S,u.shape)},I=()=>{let S=r;const x=At(p.shape,g);return x.length>0&&(S=Fe(S,x)),ie(yt(S),p.shape)};return{a:f,b:I}}};const oE={kernelName:of,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,g=p.shape.slice(),{axis:f}=u,I=ht(f,p.shape);I.forEach(v=>{g[v]=1});const S=ie(r,g),x=ae(S,Gi(p.shape,"float32"));return{x:()=>x}}};const aE={kernelName:Q2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Pe(r,ut(gu(u)))}}};const cE={kernelName:eR,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l;return{x:()=>ae(Be(ke(1),ut(u)),r)}}};const lE={kernelName:uf,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{reps:g}=u,f=()=>{let I=qe(p);if(p.rank===1)for(let S=0;S{const p=u,{perm:g}=p,f=vc(g);return{x:()=>Wt(r,f)}}};const uE={kernelName:pf,gradFunc:(r,l,u)=>{const p=u,{axis:g}=p;return{value:()=>Vs(r,g)}}};const dE={kernelName:mf,inputsToSave:["segmentIds"],gradFunc:(r,l)=>{const[u]=l,p=()=>F9(r,u);return{x:p}}};function F9(r,l){const u=aI(l,qe(l)),p=oI(r,u);let g=pr(l,ke(0,"int32"));const f=p.rank-g.rank;for(let S=0;S({x:()=>qe(r)})};const _9=[TO,AO,vO,NO,CO,RO,OO,EO,DO,kO,FO,_O,$O,BO,MO,PO,zO,GO,VO,YO,HO,jO,qO,XO,JO,ZO,QO,e1,t1,n1,s1,i1,r1,o1,c1,a1,l1,d1,p1,m1,f1,g1,y1,b1,w1,L1,I1,AI,AI,x1,A1,N1,C1,R1,O1,E1,D1,k1,F1,vI,vI,_1,W1,$1,U1,B1,M1,P1,z1,G1,V1,Y1,H1,q1,j1,K1,X1,J1,Z1,Q1,eE,NI,NI,CI,CI,tE,sE,nE,iE,rE,oE,aE,cE,lE,hE,uE,dE,pE];for(const r of _9)sR(r);function RI(r,l,u=!1){const{Image:p,Canvas:g}=gt.getEnv();if(!(r instanceof p||r instanceof g))throw new Error("imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement");const f=qo(r),I=l/Math.max(f.height,f.width),S=I*f.width,x=I*f.height,v=bc({width:l,height:l}),O=r instanceof g?r:ou(r),C=Math.abs(S-x)/2,U=u&&S{if(lr(u)){this._imageTensors[p]=u,this._inputDimensions[p]=u.shape;return}if(Ns(u)){const f=u.shape[0];if(f!==1)throw new Error(`NetInput - tf.Tensor4D with batchSize ${f} passed, but not supported in input array`);this._imageTensors[p]=u,this._inputDimensions[p]=u.shape.slice(1);return}const g=u instanceof gt.getEnv().Canvas?u:ou(u);this._canvases[p]=g,this._inputDimensions[p]=[g.height,g.width,3]})}get imageTensors(){return this._imageTensors}get canvases(){return this._canvases}get isBatchInput(){return this.batchSize>1||this._treatAsBatchInput}get batchSize(){return this._batchSize}get inputDimensions(){return this._inputDimensions}get inputSize(){return this._inputSize}get reshapedInputDimensions(){return Wi(this.batchSize,0,1).map((r,l)=>this.getReshapedInputDimensions(l))}getInput(r){return this.canvases[r]||this.imageTensors[r]}getInputDimensions(r){return this._inputDimensions[r]}getInputHeight(r){return this._inputDimensions[r][0]}getInputWidth(r){return this._inputDimensions[r][1]}getReshapedInputDimensions(r){if(typeof this.inputSize!="number")throw new Error("getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet");const l=this.getInputWidth(r),u=this.getInputHeight(r);return pS({width:l,height:u},this.inputSize)}toBatchTensor(r,l=!0){return this._inputSize=r,ER(()=>{const u=Wi(this.batchSize,0,1).map(g=>{const f=this.getInput(g);if(f instanceof In){let I=Ns(f)?f:f.expandDims();return I=yS(I,l),(I.shape[1]!==r||I.shape[2]!==r)&&(I=SO.resizeBilinear(I,[r,r])),I.as3D(r,r,3)}if(f instanceof gt.getEnv().Canvas)return jS.fromPixels(RI(f,r,l));throw new Error(`toBatchTensor - at batchIdx ${g}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${f}`)}),p=Vs(u.map(g=>Le(g,"float32"))).as4D(this.batchSize,r,r,3);return p})}}async function Rt(r){if(r instanceof to)return r;let l=Array.isArray(r)?r:[r];if(!l.length)throw new Error("toNetInput - empty array passed as input");const u=g=>Array.isArray(r)?` at input index ${g}:`:"",p=l.map(Ho);return p.forEach((g,f)=>{if(!wm(g)&&!lr(g)&&!Ns(g))throw typeof l[f]=="string"?new Error(`toNetInput -${u(f)} string passed, but could not resolve HTMLElement for element id ${l[f]}`):new Error(`toNetInput -${u(f)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`);if(Ns(g)){const I=g.shape[0];if(I!==1)throw new Error(`toNetInput -${u(f)} tf.Tensor4D with batchSize ${I} passed, but not supported in input array`)}}),await Promise.all(p.map(g=>wm(g)&&AS(g))),new to(p,Array.isArray(r))}async function Dc(r,l){const{Canvas:u}=gt.getEnv();let p=r;if(!(r instanceof u)){const I=await Rt(r);if(I.batchSize>1)throw new Error("extractFaces - batchSize > 1 not supported");const S=I.getInput(0);p=S instanceof u?S:await NS(S)}const g=Jn(p),f=l.map(I=>I instanceof Ht?I.forSize(p.width,p.height).box.floor():I).map(I=>I.clipAtImageBorders(p.width,p.height));return f.map(({x:I,y:S,width:x,height:v})=>{const O=bc({width:x,height:v});return Jn(O).putImageData(g.getImageData(I,S,x,v),0,0),O})}const Bf=Ke(Xe());async function kc(r,l){if(!lr(r)&&!Ns(r))throw new Error("extractFaceTensors - expected image tensor to be 3D or 4D");if(Ns(r)&&r.shape[0]>1)throw new Error("extractFaceTensors - batchSize > 1 not supported");return Bf.tidy(()=>{const[u,p,g]=r.shape.slice(Ns(r)?1:0),f=l.map(S=>S instanceof Ht?S.forSize(p,u).box:S).map(S=>S.clipAtImageBorders(p,u)),I=f.map(({x:S,y:x,width:v,height:O})=>Bf.slice3d(r.as3D(u,p,g),[x,S,0],[O,v,g]));return I})}async function sa(r,l){const u=gt.getEnv().fetch,p=await u(r,l);if(!(p.status<400))throw new Error(`failed to fetch: (${p.status}) ${p.statusText}, from url: ${p.url}`);return p}async function W9(r){const l=await sa(r),u=await l.blob();if(!u.type.startsWith("image/"))throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${u.type}, for url: ${l.url}`);return vS(u)}async function OI(r){return(await sa(r)).json()}async function $9(r){return new Float32Array(await(await sa(r)).arrayBuffer())}function Mf(r,l){const u=`${l}-weights_manifest.json`;if(!r)return{modelBaseUri:"",manifestUri:u};if(r==="/")return{modelBaseUri:"/",manifestUri:`/${u}`};const p=r.startsWith("http://")?"http://":r.startsWith("https://")?"https://":"";r=r.replace(p,"");const g=r.split("/").filter(S=>S),f=r.endsWith(".json")?g[g.length-1]:u;let I=p+(r.endsWith(".json")?g.slice(0,g.length-1):g).join("/");return I=r.startsWith("/")?`/${I}`:I,{modelBaseUri:I,manifestUri:I==="/"?`/${f}`:`${I}/${f}`}}const mE=Ke(Xe());async function EI(r,l){const{manifestUri:u,modelBaseUri:p}=Mf(r,l);let g=await OI(u);return mE.io.loadWeights(g,p)}function U9(r,l,u=!1){const{width:p,height:g}=u?qo(l):l;return r.width=p,r.height=g,{width:p,height:g}}const fr=Ke(Xe());class En{constructor(r){this._name=r;this._params=void 0;this._paramMappings=[]}get params(){return this._params}get paramMappings(){return this._paramMappings}get isLoaded(){return!!this.params}getParamFromPath(r){const{obj:l,objProp:u}=this.traversePropertyPath(r);return l[u]}reassignParamFromPath(r,l){const{obj:u,objProp:p}=this.traversePropertyPath(r);u[p].dispose(),u[p]=l}getParamList(){return this._paramMappings.map(({paramPath:r})=>({path:r,tensor:this.getParamFromPath(r)}))}getTrainableParams(){return this.getParamList().filter(r=>r.tensor instanceof fr.Variable)}getFrozenParams(){return this.getParamList().filter(r=>!(r.tensor instanceof fr.Variable))}variable(){this.getFrozenParams().forEach(({path:r,tensor:l})=>{this.reassignParamFromPath(r,l.variable())})}freeze(){this.getTrainableParams().forEach(({path:r,tensor:l})=>{const u=fr.tensor(l.dataSync());l.dispose(),this.reassignParamFromPath(r,u)})}dispose(r=!0){this.getParamList().forEach(l=>{if(r&&l.tensor.isDisposed)throw new Error(`param tensor has already been disposed for path ${l.path}`);l.tensor.dispose()}),this._params=void 0}serializeParams(){return new Float32Array(this.getParamList().map(({tensor:r})=>Array.from(r.dataSync())).reduce((r,l)=>r.concat(l)))}async load(r){if(r instanceof Float32Array){this.extractWeights(r);return}await this.loadFromUri(r)}async loadFromUri(r){if(r&&typeof r!="string")throw new Error(`${this._name}.loadFromUri - expected model uri`);const l=await EI(r,this.getDefaultModelName());this.loadFromWeightMap(l)}async loadFromDisk(r){if(r&&typeof r!="string")throw new Error(`${this._name}.loadFromDisk - expected model file path`);const{readFile:l}=gt.getEnv(),{manifestUri:u,modelBaseUri:p}=Mf(r,this.getDefaultModelName()),g=x=>Promise.all(x.map(v=>l(v).then(O=>O.buffer))),f=fr.io.weightsLoaderFactory(g),I=JSON.parse((await l(u)).toString()),S=await f(I,p);this.loadFromWeightMap(S)}loadFromWeightMap(r){const{paramMappings:l,params:u}=this.extractParamsFromWeigthMap(r);this._paramMappings=l,this._params=u}extractWeights(r){const{paramMappings:l,params:u}=this.extractParams(r);this._paramMappings=l,this._params=u}traversePropertyPath(r){if(!this.params)throw new Error("traversePropertyPath - model has no loaded params");const l=r.split("/").reduce((g,f)=>{if(!g.nextObj.hasOwnProperty(f))throw new Error(`traversePropertyPath - object does not have property ${f}, for path ${r}`);return{obj:g.nextObj,objProp:f,nextObj:g.nextObj[f]}},{nextObj:this.params}),{obj:u,objProp:p}=l;if(!u||!p||!(u[p]instanceof fr.Tensor))throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${r}`);return{obj:u,objProp:p}}}const Fc=Ke(Xe());function Qn(r,l,u){return Fc.tidy(()=>{let p=Fc.separableConv2d(r,l.depthwise_filter,l.pointwise_filter,u,"same");return p=Fc.add(p,l.bias),p})}const Dt=Ke(Xe());function Pf(r,l,u=!1){return Dt.tidy(()=>{const p=Dt.relu(u?Dt.add(Dt.conv2d(r,l.conv0.filters,[2,2],"same"),l.conv0.bias):Qn(r,l.conv0,[2,2])),g=Qn(p,l.conv1,[1,1]),f=Dt.relu(Dt.add(p,g)),I=Qn(f,l.conv2,[1,1]);return Dt.relu(Dt.add(p,Dt.add(g,I)))})}function Iu(r,l,u=!1,p=!0){return Dt.tidy(()=>{const g=Dt.relu(u?Dt.add(Dt.conv2d(r,l.conv0.filters,p?[2,2]:[1,1],"same"),l.conv0.bias):Qn(r,l.conv0,p?[2,2]:[1,1])),f=Qn(g,l.conv1,[1,1]),I=Dt.relu(Dt.add(g,f)),S=Qn(I,l.conv2,[1,1]),x=Dt.relu(Dt.add(g,Dt.add(f,S))),v=Qn(x,l.conv3,[1,1]);return Dt.relu(Dt.add(g,Dt.add(f,Dt.add(S,v))))})}const no=Ke(Xe());function ia(r,l,u="same",p=!1){return no.tidy(()=>{const g=no.add(no.conv2d(r,l.filters,[1,1],u),l.bias);return p?no.relu(g):g})}function Pn(r,l){Object.keys(r).forEach(u=>{l.some(p=>p.originalPath===u)||r[u].dispose()})}const zf=Ke(Xe());function _c(r,l){return function(u,p,g,f){const I=zf.tensor4d(r(u*p*g*g),[g,g,u,p]),S=zf.tensor1d(r(p));return l.push({paramPath:`${f}/filters`},{paramPath:`${f}/bias`}),{filters:I,bias:S}}}const Gf=Ke(Xe());function Vf(r,l){return function(u,p,g){const f=Gf.tensor2d(r(u*p),[u,p]),I=Gf.tensor1d(r(p));return l.push({paramPath:`${g}/weights`},{paramPath:`${g}/bias`}),{weights:f,bias:I}}}class DI{constructor(r,l,u){this.depthwise_filter=r;this.pointwise_filter=l;this.bias=u}}const xu=Ke(Xe());function Wc(r,l){return function(u,p,g){const f=xu.tensor4d(r(3*3*u),[3,3,u,1]),I=xu.tensor4d(r(u*p),[1,1,u,p]),S=xu.tensor1d(r(p));return l.push({paramPath:`${g}/depthwise_filter`},{paramPath:`${g}/pointwise_filter`},{paramPath:`${g}/bias`}),new DI(f,I,S)}}function $c(r){return function(l){const u=r(`${l}/depthwise_filter`,4),p=r(`${l}/pointwise_filter`,4),g=r(`${l}/bias`,1);return new DI(u,p,g)}}function ms(r,l){return function(u,p,g){const f=r[u];if(!Po(f,p))throw new Error(`expected weightMap[${u}] to be a Tensor${p}D, instead have ${f}`);return l.push({originalPath:u,paramPath:g||u}),f}}function zn(r){let l=r;function u(g){const f=l.slice(0,g);return l=l.slice(g),f}function p(){return l}return{extractWeights:u,getRemainingWeights:p}}function Yf(r,l){const u=_c(r,l),p=Wc(r,l);function g(I,S,x,v=!1){const O=v?u(I,S,3,`${x}/conv0`):p(I,S,`${x}/conv0`),C=p(S,S,`${x}/conv1`),U=p(S,S,`${x}/conv2`);return{conv0:O,conv1:C,conv2:U}}function f(I,S,x,v=!1){const{conv0:O,conv1:C,conv2:U}=g(I,S,x,v),G=p(S,S,`${x}/conv3`);return{conv0:O,conv1:C,conv2:U,conv3:G}}return{extractDenseBlock3Params:g,extractDenseBlock4Params:f}}function fE(r){const l=[],{extractWeights:u,getRemainingWeights:p}=zn(r),{extractDenseBlock4Params:g}=Yf(u,l),f=g(3,32,"dense0",!0),I=g(32,64,"dense1"),S=g(64,128,"dense2"),x=g(128,256,"dense3");if(p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{paramMappings:l,params:{dense0:f,dense1:I,dense2:S,dense3:x}}}function Hf(r){return function(l){const u=r(`${l}/filters`,4),p=r(`${l}/bias`,1);return{filters:u,bias:p}}}function qf(r,l){const u=ms(r,l),p=Hf(u),g=$c(u);function f(S,x=!1){const v=x?p(`${S}/conv0`):g(`${S}/conv0`),O=g(`${S}/conv1`),C=g(`${S}/conv2`);return{conv0:v,conv1:O,conv2:C}}function I(S,x=!1){const v=x?p(`${S}/conv0`):g(`${S}/conv0`),O=g(`${S}/conv1`),C=g(`${S}/conv2`),U=g(`${S}/conv3`);return{conv0:v,conv1:O,conv2:C,conv3:U}}return{extractDenseBlock3Params:f,extractDenseBlock4Params:I}}function gE(r){const l=[],{extractDenseBlock4Params:u}=qf(r,l),p={dense0:u("dense0",!0),dense1:u("dense1"),dense2:u("dense2"),dense3:u("dense3")};return Pn(r,l),{params:p,paramMappings:l}}const so=Ke(Xe());class jf extends En{constructor(){super("FaceFeatureExtractor")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("FaceFeatureExtractor - load model before inference");return so.tidy(()=>{const u=so.cast(r.toBatchTensor(112,!0),"float32"),p=[122.782,117.001,104.298],g=di(u,p).div(so.scalar(255));let f=Iu(g,l.dense0,!0);return f=Iu(f,l.dense1),f=Iu(f,l.dense2),f=Iu(f,l.dense3),f=so.avgPool(f,[7,7],[2,2],"valid"),f})}async forward(r){return this.forwardInput(await Rt(r))}getDefaultModelName(){return"face_feature_extractor_model"}extractParamsFromWeigthMap(r){return gE(r)}extractParams(r){return fE(r)}}const Uc=Ke(Xe());function Tu(r,l){return Uc.tidy(()=>Uc.add(Uc.matMul(r,l.weights),l.bias))}function yE(r,l,u){const p=[],{extractWeights:g,getRemainingWeights:f}=zn(r),I=Vf(g,p),S=I(l,u,"fc");if(f().length!==0)throw new Error(`weights remaing after extract: ${f().length}`);return{paramMappings:p,params:{fc:S}}}function bE(r){const l=[],u=ms(r,l);function p(f){const I=u(`${f}/weights`,2),S=u(`${f}/bias`,1);return{weights:I,bias:S}}const g={fc:p("fc")};return Pn(r,l),{params:g,paramMappings:l}}function Kf(r){const l={},u={};return Object.keys(r).forEach(p=>{const g=p.startsWith("fc")?u:l;g[p]=r[p]}),{featureExtractorMap:l,classifierMap:u}}const wE=Ke(Xe());class Xf extends En{constructor(r,l){super(r);this._faceFeatureExtractor=l}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(r){const{params:l}=this;if(!l)throw new Error(`${this._name} - load model before inference`);return wE.tidy(()=>{const u=r instanceof to?this.faceFeatureExtractor.forwardInput(r):r;return Tu(u.as2D(u.shape[0],-1),l.fc)})}dispose(r=!0){this.faceFeatureExtractor.dispose(r),super.dispose(r)}loadClassifierParams(r){const{params:l,paramMappings:u}=this.extractClassifierParams(r);this._params=l,this._paramMappings=u}extractClassifierParams(r){return yE(r,this.getClassifierChannelsIn(),this.getClassifierChannelsOut())}extractParamsFromWeigthMap(r){const{featureExtractorMap:l,classifierMap:u}=Kf(r);return this.faceFeatureExtractor.loadFromWeightMap(l),bE(u)}extractParams(r){const l=this.getClassifierChannelsIn(),u=this.getClassifierChannelsOut(),p=u*l+u,g=r.slice(0,r.length-p),f=r.slice(r.length-p);return this.faceFeatureExtractor.extractWeights(g),this.extractClassifierParams(f)}}const kI=["neutral","happy","sad","angry","fearful","disgusted","surprised"];class ra{constructor(r){if(r.length!==7)throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${r.length}`);kI.forEach((l,u)=>{this[l]=r[u]})}asSortedArray(){return kI.map(r=>({expression:r,probability:this[r]})).sort((r,l)=>l.probability-r.probability)}}const Bc=Ke(Xe());class FI extends Xf{constructor(r=new jf){super("FaceExpressionNet",r)}forwardInput(r){return Bc.tidy(()=>Bc.softmax(this.runNet(r)))}async forward(r){return this.forwardInput(await Rt(r))}async predictExpressions(r){const l=await Rt(r),u=await this.forwardInput(l),p=await Promise.all(Bc.unstack(u).map(async f=>{const I=await f.data();return f.dispose(),I}));u.dispose();const g=p.map(f=>new ra(f));return l.isBatchInput?g:g[0]}getDefaultModelName(){return"face_expression_model"}getClassifierChannelsIn(){return 256}getClassifierChannelsOut(){return 7}}function _I(r){return r.expressions instanceof ra}function Jf(r,l){const u={expressions:l};return Object.assign({},r,u)}function B9(r,l,u=.1,p){const g=Array.isArray(l)?l:[l];g.forEach(f=>{const I=f instanceof ra?f:_I(f)?f.expressions:void 0;if(!I)throw new Error("drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof");const S=I.asSortedArray(),x=S.filter(C=>C.probability>u),v=Ui(f)?f.detection.box.bottomLeft:p||new Je(0,0),O=new yc(x.map(C=>`${C.expression} (${zo(C.probability)})`),v);O.draw(r)})}function oa(r){return Ui(r)&&r.landmarks instanceof Gs&&r.unshiftedLandmarks instanceof Gs&&r.alignedRect instanceof Ht}function Mc(r,l){const{box:u}=r.detection,p=l.shiftBy(u.x,u.y),g=p.align(),{imageDims:f}=r.detection,I=new Ht(r.detection.score,g.rescale(f.reverse()),f),S={landmarks:p,unshiftedLandmarks:l,alignedRect:I};return Object.assign({},r,S)}class LE{constructor(r={}){const{drawLines:l=!0,drawPoints:u=!0,lineWidth:p,lineColor:g,pointSize:f,pointColor:I}=r;this.drawLines=l,this.drawPoints=u,this.lineWidth=p||1,this.pointSize=f||2,this.lineColor=g||"rgba(0, 255, 255, 1)",this.pointColor=I||"rgba(255, 0, 255, 1)"}}class SE{constructor(r,l={}){this.faceLandmarks=r,this.options=new LE(l)}draw(r){const l=Jn(r),{drawLines:u,drawPoints:p,lineWidth:g,lineColor:f,pointSize:I,pointColor:S}=this.options;if(u&&this.faceLandmarks instanceof iu&&(l.strokeStyle=f,l.lineWidth=g,cr(l,this.faceLandmarks.getJawOutline()),cr(l,this.faceLandmarks.getLeftEyeBrow()),cr(l,this.faceLandmarks.getRightEyeBrow()),cr(l,this.faceLandmarks.getNose()),cr(l,this.faceLandmarks.getLeftEye(),!0),cr(l,this.faceLandmarks.getRightEye(),!0),cr(l,this.faceLandmarks.getMouth(),!0)),p){l.strokeStyle=S,l.fillStyle=S;const x=v=>{l.beginPath(),l.arc(v.x,v.y,I,0,2*Math.PI),l.fill()};this.faceLandmarks.positions.forEach(x)}}}function M9(r,l){const u=Array.isArray(l)?l:[l];u.forEach(p=>{const g=p instanceof Gs?p:oa(p)?p.landmarks:void 0;if(!g)throw new Error("drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks> or array thereof");new SE(g).draw(r)})}const WI={};hm(WI,{AnchorPosition:()=>Bi,DrawBox:()=>TS,DrawBoxOptions:()=>NC,DrawFaceLandmarks:()=>SE,DrawFaceLandmarksOptions:()=>LE,DrawTextField:()=>yc,DrawTextFieldOptions:()=>ym,drawContour:()=>cr,drawDetections:()=>KX,drawFaceExpressions:()=>B9,drawFaceLandmarks:()=>M9});function P9(r,l){const u=_c(r,l),p=Wc(r,l);function g(I,S,x){const v=p(I,S,`${x}/separable_conv0`),O=p(S,S,`${x}/separable_conv1`),C=u(I,S,1,`${x}/expansion_conv`);return{separable_conv0:v,separable_conv1:O,expansion_conv:C}}function f(I,S){const x=p(I,I,`${S}/separable_conv0`),v=p(I,I,`${S}/separable_conv1`),O=p(I,I,`${S}/separable_conv2`);return{separable_conv0:x,separable_conv1:v,separable_conv2:O}}return{extractConvParams:u,extractSeparableConvParams:p,extractReductionBlockParams:g,extractMainBlockParams:f}}function IE(r,l){const u=[],{extractWeights:p,getRemainingWeights:g}=zn(r),{extractConvParams:f,extractSeparableConvParams:I,extractReductionBlockParams:S,extractMainBlockParams:x}=P9(p,u),v=f(3,32,3,"entry_flow/conv_in"),O=S(32,64,"entry_flow/reduction_block_0"),C=S(64,128,"entry_flow/reduction_block_1"),U={conv_in:v,reduction_block_0:O,reduction_block_1:C},G={};Wi(l,0,1).forEach(ge=>{G[`main_block_${ge}`]=x(128,`middle_flow/main_block_${ge}`)});const ne=S(128,256,"exit_flow/reduction_block"),te=I(256,512,"exit_flow/separable_conv"),oe={reduction_block:ne,separable_conv:te};if(g().length!==0)throw new Error(`weights remaing after extract: ${g().length}`);return{paramMappings:u,params:{entry_flow:U,middle_flow:G,exit_flow:oe}}}function z9(r,l){const u=ms(r,l),p=Hf(u),g=$c(u);function f(S){const x=g(`${S}/separable_conv0`),v=g(`${S}/separable_conv1`),O=p(`${S}/expansion_conv`);return{separable_conv0:x,separable_conv1:v,expansion_conv:O}}function I(S){const x=g(`${S}/separable_conv0`),v=g(`${S}/separable_conv1`),O=g(`${S}/separable_conv2`);return{separable_conv0:x,separable_conv1:v,separable_conv2:O}}return{extractConvParams:p,extractSeparableConvParams:g,extractReductionBlockParams:f,extractMainBlockParams:I}}function xE(r,l){const u=[],{extractConvParams:p,extractSeparableConvParams:g,extractReductionBlockParams:f,extractMainBlockParams:I}=z9(r,u),S=p("entry_flow/conv_in"),x=f("entry_flow/reduction_block_0"),v=f("entry_flow/reduction_block_1"),O={conv_in:S,reduction_block_0:x,reduction_block_1:v},C={};Wi(l,0,1).forEach(te=>{C[`main_block_${te}`]=I(`middle_flow/main_block_${te}`)});const U=f("exit_flow/reduction_block"),G=g("exit_flow/separable_conv"),ne={reduction_block:U,separable_conv:G};return Pn(r,u),{params:{entry_flow:O,middle_flow:C,exit_flow:ne},paramMappings:u}}const tn=Ke(Xe());function TE(r,l,u){return tn.add(tn.conv2d(r,l.filters,u,"same"),l.bias)}function $I(r,l,u=!0){let p=u?tn.relu(r):r;return p=Qn(p,l.separable_conv0,[1,1]),p=Qn(tn.relu(p),l.separable_conv1,[1,1]),p=tn.maxPool(p,[3,3],[2,2],"same"),p=tn.add(p,TE(r,l.expansion_conv,[2,2])),p}function G9(r,l){let u=Qn(tn.relu(r),l.separable_conv0,[1,1]);return u=Qn(tn.relu(u),l.separable_conv1,[1,1]),u=Qn(tn.relu(u),l.separable_conv2,[1,1]),u=tn.add(u,r),u}class AE extends En{constructor(r){super("TinyXception");this._numMainBlocks=r}forwardInput(r){const{params:l}=this;if(!l)throw new Error("TinyXception - load model before inference");return tn.tidy(()=>{const u=tn.cast(r.toBatchTensor(112,!0),"float32"),p=[122.782,117.001,104.298],g=di(u,p).div(tn.scalar(256));let f=tn.relu(TE(g,l.entry_flow.conv_in,[2,2]));return f=$I(f,l.entry_flow.reduction_block_0,!1),f=$I(f,l.entry_flow.reduction_block_1),Wi(this._numMainBlocks,0,1).forEach(I=>{f=G9(f,l.middle_flow[`main_block_${I}`])}),f=$I(f,l.exit_flow.reduction_block),f=tn.relu(Qn(f,l.exit_flow.separable_conv,[1,1])),f})}async forward(r){return this.forwardInput(await Rt(r))}getDefaultModelName(){return"tiny_xception_model"}extractParamsFromWeigthMap(r){return xE(r,this._numMainBlocks)}extractParams(r){return IE(r,this._numMainBlocks)}}function vE(r){const l=[],{extractWeights:u,getRemainingWeights:p}=zn(r),g=Vf(u,l),f=g(512,1,"fc/age"),I=g(512,2,"fc/gender");if(p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{paramMappings:l,params:{fc:{age:f,gender:I}}}}function NE(r){const l=[],u=ms(r,l);function p(f){const I=u(`${f}/weights`,2),S=u(`${f}/bias`,1);return{weights:I,bias:S}}const g={fc:{age:p("fc/age"),gender:p("fc/gender")}};return Pn(r,l),{params:g,paramMappings:l}}var gr;(function(r){r.FEMALE="female",r.MALE="male"})(gr||(gr={}));const Vi=Ke(Xe());class UI extends En{constructor(r=new AE(2)){super("AgeGenderNet");this._faceFeatureExtractor=r}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(r){const{params:l}=this;if(!l)throw new Error(`${this._name} - load model before inference`);return Vi.tidy(()=>{const u=r instanceof to?this.faceFeatureExtractor.forwardInput(r):r,p=Vi.avgPool(u,[7,7],[2,2],"valid").as2D(u.shape[0],-1),g=Tu(p,l.fc.age).as1D(),f=Tu(p,l.fc.gender);return{age:g,gender:f}})}forwardInput(r){return Vi.tidy(()=>{const{age:l,gender:u}=this.runNet(r);return{age:l,gender:Vi.softmax(u)}})}async forward(r){return this.forwardInput(await Rt(r))}async predictAgeAndGender(r){const l=await Rt(r),u=await this.forwardInput(l),p=Vi.unstack(u.age),g=Vi.unstack(u.gender),f=p.map((S,x)=>({ageTensor:S,genderTensor:g[x]})),I=await Promise.all(f.map(async({ageTensor:S,genderTensor:x})=>{const v=(await S.data())[0],O=(await x.data())[0],C=O>.5,U=C?gr.MALE:gr.FEMALE,G=C?O:1-O;return S.dispose(),x.dispose(),{age:v,gender:U,genderProbability:G}}));return u.age.dispose(),u.gender.dispose(),l.isBatchInput?I:I[0]}getDefaultModelName(){return"age_gender_model"}dispose(r=!0){this.faceFeatureExtractor.dispose(r),super.dispose(r)}loadClassifierParams(r){const{params:l,paramMappings:u}=this.extractClassifierParams(r);this._params=l,this._paramMappings=u}extractClassifierParams(r){return vE(r)}extractParamsFromWeigthMap(r){const{featureExtractorMap:l,classifierMap:u}=Kf(r);return this.faceFeatureExtractor.loadFromWeightMap(l),NE(u)}extractParams(r){const l=512*1+1+(512*2+2),u=r.slice(0,r.length-l),p=r.slice(r.length-l);return this.faceFeatureExtractor.extractWeights(u),this.extractClassifierParams(p)}}const fs=Ke(Xe());class Zf extends Xf{postProcess(r,l,u){const p=u.map(({width:f,height:I})=>{const S=l/Math.max(I,f);return{width:f*S,height:I*S}}),g=p.length;return fs.tidy(()=>{const f=(O,C)=>fs.stack([fs.fill([68],O,"float32"),fs.fill([68],C,"float32")],1).as2D(1,136).as1D(),I=(O,C)=>{const{width:U,height:G}=p[O];return C(U,G)?Math.abs(U-G)/2:0},S=O=>I(O,(C,U)=>CI(O,(C,U)=>Uf(S(C),x(C))))).div(fs.stack(Array.from(Array(g),(O,C)=>f(p[C].width,p[C].height))));return v})}forwardInput(r){return fs.tidy(()=>{const l=this.runNet(r);return this.postProcess(l,r.inputSize,r.inputDimensions.map(([u,p])=>({height:u,width:p})))})}async forward(r){return this.forwardInput(await Rt(r))}async detectLandmarks(r){const l=await Rt(r),u=fs.tidy(()=>fs.unstack(this.forwardInput(l))),p=await Promise.all(u.map(async(g,f)=>{const I=Array.from(await g.data()),S=I.filter((v,O)=>dm(O)),x=I.filter((v,O)=>!dm(O));return new iu(Array(68).fill(0).map((v,O)=>new Je(S[O],x[O])),{height:l.getInputHeight(f),width:l.getInputWidth(f)})}));return u.forEach(g=>g.dispose()),l.isBatchInput?p:p[0]}getClassifierChannelsOut(){return 136}}class Au extends Zf{constructor(r=new jf){super("FaceLandmark68Net",r)}getDefaultModelName(){return"face_landmark_68_model"}getClassifierChannelsIn(){return 256}}function CE(r){const l=[],{extractDenseBlock3Params:u}=qf(r,l),p={dense0:u("dense0",!0),dense1:u("dense1"),dense2:u("dense2")};return Pn(r,l),{params:p,paramMappings:l}}function RE(r){const l=[],{extractWeights:u,getRemainingWeights:p}=zn(r),{extractDenseBlock3Params:g}=Yf(u,l),f=g(3,32,"dense0",!0),I=g(32,64,"dense1"),S=g(64,128,"dense2");if(p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{paramMappings:l,params:{dense0:f,dense1:I,dense2:S}}}const io=Ke(Xe());class OE extends En{constructor(){super("TinyFaceFeatureExtractor")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("TinyFaceFeatureExtractor - load model before inference");return io.tidy(()=>{const u=io.cast(r.toBatchTensor(112,!0),"float32"),p=[122.782,117.001,104.298],g=di(u,p).div(io.scalar(255));let f=Pf(g,l.dense0,!0);return f=Pf(f,l.dense1),f=Pf(f,l.dense2),f=io.avgPool(f,[14,14],[2,2],"valid"),f})}async forward(r){return this.forwardInput(await Rt(r))}getDefaultModelName(){return"face_feature_extractor_tiny_model"}extractParamsFromWeigthMap(r){return CE(r)}extractParams(r){return RE(r)}}class BI extends Zf{constructor(r=new OE){super("FaceLandmark68TinyNet",r)}getDefaultModelName(){return"face_landmark_68_tiny_model"}getClassifierChannelsIn(){return 128}}class V9 extends Au{}const Qf=Ke(Xe());function EE(r,l){return Qf.add(Qf.mul(r,l.weights),l.biases)}const Pc=Ke(Xe());function MI(r,l,u,p,g="same"){const{filters:f,bias:I}=l.conv;let S=Pc.conv2d(r,f,u,g);return S=Pc.add(S,I),S=EE(S,l.scale),p?Pc.relu(S):S}function DE(r,l){return MI(r,l,[1,1],!0)}function PI(r,l){return MI(r,l,[1,1],!1)}function eg(r,l){return MI(r,l,[2,2],!0,"valid")}const gs=Ke(Xe());function Y9(r,l){function u(S,x,v){const O=r(S),C=O.length/(x*v*v);if(dS(C))throw new Error(`depth has to be an integer: ${C}, weights.length: ${O.length}, numFilters: ${x}, filterSize: ${v}`);return gs.tidy(()=>gs.transpose(gs.tensor4d(O,[x,C,v,v]),[2,3,1,0]))}function p(S,x,v,O){const C=u(S,x,v),U=gs.tensor1d(r(x));return l.push({paramPath:`${O}/filters`},{paramPath:`${O}/bias`}),{filters:C,bias:U}}function g(S,x){const v=gs.tensor1d(r(S)),O=gs.tensor1d(r(S));return l.push({paramPath:`${x}/weights`},{paramPath:`${x}/biases`}),{weights:v,biases:O}}function f(S,x,v,O){const C=p(S,x,v,`${O}/conv`),U=g(x,`${O}/scale`);return{conv:C,scale:U}}function I(S,x,v,O,C=!1){const U=f((C?.5:1)*S,x,v,`${O}/conv1`),G=f(S,x,v,`${O}/conv2`);return{conv1:U,conv2:G}}return{extractConvLayerParams:f,extractResidualLayerParams:I}}function kE(r){const{extractWeights:l,getRemainingWeights:u}=zn(r),p=[],{extractConvLayerParams:g,extractResidualLayerParams:f}=Y9(l,p),I=g(4704,32,7,"conv32_down"),S=f(9216,32,3,"conv32_1"),x=f(9216,32,3,"conv32_2"),v=f(9216,32,3,"conv32_3"),O=f(36864,64,3,"conv64_down",!0),C=f(36864,64,3,"conv64_1"),U=f(36864,64,3,"conv64_2"),G=f(36864,64,3,"conv64_3"),ne=f(147456,128,3,"conv128_down",!0),te=f(147456,128,3,"conv128_1"),oe=f(147456,128,3,"conv128_2"),ge=f(589824,256,3,"conv256_down",!0),fe=f(589824,256,3,"conv256_1"),Ae=f(589824,256,3,"conv256_2"),Te=f(589824,256,3,"conv256_down_out"),Ve=gs.tidy(()=>gs.transpose(gs.tensor2d(l(256*128),[128,256]),[1,0]));if(p.push({paramPath:"fc"}),u().length!==0)throw new Error(`weights remaing after extract: ${u().length}`);const rt={conv32_down:I,conv32_1:S,conv32_2:x,conv32_3:v,conv64_down:O,conv64_1:C,conv64_2:U,conv64_3:G,conv128_down:ne,conv128_1:te,conv128_2:oe,conv256_down:ge,conv256_1:fe,conv256_2:Ae,conv256_down_out:Te,fc:Ve};return{params:rt,paramMappings:p}}function H9(r,l){const u=ms(r,l);function p(I){const S=u(`${I}/scale/weights`,1),x=u(`${I}/scale/biases`,1);return{weights:S,biases:x}}function g(I){const S=u(`${I}/conv/filters`,4),x=u(`${I}/conv/bias`,1),v=p(I);return{conv:{filters:S,bias:x},scale:v}}function f(I){return{conv1:g(`${I}/conv1`),conv2:g(`${I}/conv2`)}}return{extractConvLayerParams:g,extractResidualLayerParams:f}}function FE(r){const l=[],{extractConvLayerParams:u,extractResidualLayerParams:p}=H9(r,l),g=u("conv32_down"),f=p("conv32_1"),I=p("conv32_2"),S=p("conv32_3"),x=p("conv64_down"),v=p("conv64_1"),O=p("conv64_2"),C=p("conv64_3"),U=p("conv128_down"),G=p("conv128_1"),ne=p("conv128_2"),te=p("conv256_down"),oe=p("conv256_1"),ge=p("conv256_2"),fe=p("conv256_down_out"),Ae=r.fc;if(l.push({originalPath:"fc",paramPath:"fc"}),!uS(Ae))throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${Ae}`);const Te={conv32_down:g,conv32_1:f,conv32_2:I,conv32_3:S,conv64_down:x,conv64_1:v,conv64_2:O,conv64_3:C,conv128_down:U,conv128_1:G,conv128_2:ne,conv256_down:te,conv256_1:oe,conv256_2:ge,conv256_down_out:fe,fc:Ae};return Pn(r,l),{params:Te,paramMappings:l}}const Gn=Ke(Xe());function fi(r,l){let u=DE(r,l.conv1);return u=PI(u,l.conv2),u=Gn.add(u,r),u=Gn.relu(u),u}function vu(r,l){let u=eg(r,l.conv1);u=PI(u,l.conv2);let p=Gn.avgPool(r,2,2,"valid");const g=Gn.zeros(p.shape),f=p.shape[3]!==u.shape[3],I=p.shape[1]!==u.shape[1]||p.shape[2]!==u.shape[2];if(I){const S=[...u.shape];S[1]=1;const x=Gn.zeros(S);u=Gn.concat([u,x],1);const v=[...u.shape];v[2]=1;const O=Gn.zeros(v);u=Gn.concat([u,O],2)}return p=f?Gn.concat([p,g],3):p,u=Gn.add(p,u),u=Gn.relu(u),u}const Ds=Ke(Xe());class Nu extends En{constructor(){super("FaceRecognitionNet")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("FaceRecognitionNet - load model before inference");return Ds.tidy(()=>{const u=Ds.cast(r.toBatchTensor(150,!0),"float32"),p=[122.782,117.001,104.298],g=di(u,p).div(Ds.scalar(256));let f=eg(g,l.conv32_down);f=Ds.maxPool(f,3,2,"valid"),f=fi(f,l.conv32_1),f=fi(f,l.conv32_2),f=fi(f,l.conv32_3),f=vu(f,l.conv64_down),f=fi(f,l.conv64_1),f=fi(f,l.conv64_2),f=fi(f,l.conv64_3),f=vu(f,l.conv128_down),f=fi(f,l.conv128_1),f=fi(f,l.conv128_2),f=vu(f,l.conv256_down),f=fi(f,l.conv256_1),f=fi(f,l.conv256_2),f=vu(f,l.conv256_down_out);const I=f.mean([1,2]),S=Ds.matMul(I,l.fc);return S})}async forward(r){return this.forwardInput(await Rt(r))}async computeFaceDescriptor(r){const l=await Rt(r),u=Ds.tidy(()=>Ds.unstack(this.forwardInput(l))),p=await Promise.all(u.map(g=>g.data()));return u.forEach(g=>g.dispose()),l.isBatchInput?p:p[0]}getDefaultModelName(){return"face_recognition_model"}extractParamsFromWeigthMap(r){return FE(r)}extractParams(r){return kE(r)}}function q9(r){const l=new Nu;return l.extractWeights(r),l}function tg(r,l){const u={descriptor:l};return Object.assign({},r,u)}function j9(r){return typeof r.age=="number"}function ng(r,l){const u={age:l};return Object.assign({},r,u)}function K9(r){return(r.gender===gr.MALE||r.gender===gr.FEMALE)&&fc(r.genderProbability)}function sg(r,l,u){const p={gender:l,genderProbability:u};return Object.assign({},r,p)}const gi=Ke(Xe());function X9(r,l){function u(x,v){const O=gi.tensor4d(r(3*3*x),[3,3,x,1]),C=gi.tensor1d(r(x)),U=gi.tensor1d(r(x)),G=gi.tensor1d(r(x)),ne=gi.tensor1d(r(x));return l.push({paramPath:`${v}/filters`},{paramPath:`${v}/batch_norm_scale`},{paramPath:`${v}/batch_norm_offset`},{paramPath:`${v}/batch_norm_mean`},{paramPath:`${v}/batch_norm_variance`}),{filters:O,batch_norm_scale:C,batch_norm_offset:U,batch_norm_mean:G,batch_norm_variance:ne}}function p(x,v,O,C,U){const G=gi.tensor4d(r(x*v*O*O),[O,O,x,v]),ne=gi.tensor1d(r(v));return l.push({paramPath:`${C}/filters`},{paramPath:`${C}/${U?"batch_norm_offset":"bias"}`}),{filters:G,bias:ne}}function g(x,v,O,C){const{filters:U,bias:G}=p(x,v,O,C,!0);return{filters:U,batch_norm_offset:G}}function f(x,v,O){const C=u(x,`${O}/depthwise_conv`),U=g(x,v,1,`${O}/pointwise_conv`);return{depthwise_conv:C,pointwise_conv:U}}function I(){const x=g(3,32,3,"mobilenetv1/conv_0"),v=f(32,64,"mobilenetv1/conv_1"),O=f(64,128,"mobilenetv1/conv_2"),C=f(128,128,"mobilenetv1/conv_3"),U=f(128,256,"mobilenetv1/conv_4"),G=f(256,256,"mobilenetv1/conv_5"),ne=f(256,512,"mobilenetv1/conv_6"),te=f(512,512,"mobilenetv1/conv_7"),oe=f(512,512,"mobilenetv1/conv_8"),ge=f(512,512,"mobilenetv1/conv_9"),fe=f(512,512,"mobilenetv1/conv_10"),Ae=f(512,512,"mobilenetv1/conv_11"),Te=f(512,1024,"mobilenetv1/conv_12"),Ve=f(1024,1024,"mobilenetv1/conv_13");return{conv_0:x,conv_1:v,conv_2:O,conv_3:C,conv_4:U,conv_5:G,conv_6:ne,conv_7:te,conv_8:oe,conv_9:ge,conv_10:fe,conv_11:Ae,conv_12:Te,conv_13:Ve}}function S(){const x=g(1024,256,1,"prediction_layer/conv_0"),v=g(256,512,3,"prediction_layer/conv_1"),O=g(512,128,1,"prediction_layer/conv_2"),C=g(128,256,3,"prediction_layer/conv_3"),U=g(256,128,1,"prediction_layer/conv_4"),G=g(128,256,3,"prediction_layer/conv_5"),ne=g(256,64,1,"prediction_layer/conv_6"),te=g(64,128,3,"prediction_layer/conv_7"),oe=p(512,12,1,"prediction_layer/box_predictor_0/box_encoding_predictor"),ge=p(512,9,1,"prediction_layer/box_predictor_0/class_predictor"),fe=p(1024,24,1,"prediction_layer/box_predictor_1/box_encoding_predictor"),Ae=p(1024,18,1,"prediction_layer/box_predictor_1/class_predictor"),Te=p(512,24,1,"prediction_layer/box_predictor_2/box_encoding_predictor"),Ve=p(512,18,1,"prediction_layer/box_predictor_2/class_predictor"),rt=p(256,24,1,"prediction_layer/box_predictor_3/box_encoding_predictor"),vt=p(256,18,1,"prediction_layer/box_predictor_3/class_predictor"),$t=p(256,24,1,"prediction_layer/box_predictor_4/box_encoding_predictor"),Kt=p(256,18,1,"prediction_layer/box_predictor_4/class_predictor"),Dn=p(128,24,1,"prediction_layer/box_predictor_5/box_encoding_predictor"),Tn=p(128,18,1,"prediction_layer/box_predictor_5/class_predictor"),An={box_encoding_predictor:oe,class_predictor:ge},Ks={box_encoding_predictor:fe,class_predictor:Ae},Li={box_encoding_predictor:Te,class_predictor:Ve},Xs={box_encoding_predictor:rt,class_predictor:vt},ua={box_encoding_predictor:$t,class_predictor:Kt},Xc={box_encoding_predictor:Dn,class_predictor:Tn};return{conv_0:x,conv_1:v,conv_2:O,conv_3:C,conv_4:U,conv_5:G,conv_6:ne,conv_7:te,box_predictor_0:An,box_predictor_1:Ks,box_predictor_2:Li,box_predictor_3:Xs,box_predictor_4:ua,box_predictor_5:Xc}}return{extractMobilenetV1Params:I,extractPredictionLayerParams:S}}function _E(r){const l=[],{extractWeights:u,getRemainingWeights:p}=zn(r),{extractMobilenetV1Params:g,extractPredictionLayerParams:f}=X9(u,l),I=g(),S=f(),x=gi.tensor3d(u(5118*4),[1,5118,4]),v={extra_dim:x};if(l.push({paramPath:"output_layer/extra_dim"}),p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{params:{mobilenetv1:I,prediction_layer:S,output_layer:v},paramMappings:l}}function J9(r,l){const u=ms(r,l);function p(v,O,C){const U=u(`${v}/Conv2d_${O}_pointwise/weights`,4,`${C}/filters`),G=u(`${v}/Conv2d_${O}_pointwise/convolution_bn_offset`,1,`${C}/batch_norm_offset`);return{filters:U,batch_norm_offset:G}}function g(v){const O=`mobilenetv1/conv_${v}`,C=`MobilenetV1/Conv2d_${v}_depthwise`,U=`${O}/depthwise_conv`,G=`${O}/pointwise_conv`,ne=u(`${C}/depthwise_weights`,4,`${U}/filters`),te=u(`${C}/BatchNorm/gamma`,1,`${U}/batch_norm_scale`),oe=u(`${C}/BatchNorm/beta`,1,`${U}/batch_norm_offset`),ge=u(`${C}/BatchNorm/moving_mean`,1,`${U}/batch_norm_mean`),fe=u(`${C}/BatchNorm/moving_variance`,1,`${U}/batch_norm_variance`);return{depthwise_conv:{filters:ne,batch_norm_scale:te,batch_norm_offset:oe,batch_norm_mean:ge,batch_norm_variance:fe},pointwise_conv:p("MobilenetV1",v,G)}}function f(){return{conv_0:p("MobilenetV1",0,"mobilenetv1/conv_0"),conv_1:g(1),conv_2:g(2),conv_3:g(3),conv_4:g(4),conv_5:g(5),conv_6:g(6),conv_7:g(7),conv_8:g(8),conv_9:g(9),conv_10:g(10),conv_11:g(11),conv_12:g(12),conv_13:g(13)}}function I(v,O){const C=u(`${v}/weights`,4,`${O}/filters`),U=u(`${v}/biases`,1,`${O}/bias`);return{filters:C,bias:U}}function S(v){const O=I(`Prediction/BoxPredictor_${v}/BoxEncodingPredictor`,`prediction_layer/box_predictor_${v}/box_encoding_predictor`),C=I(`Prediction/BoxPredictor_${v}/ClassPredictor`,`prediction_layer/box_predictor_${v}/class_predictor`);return{box_encoding_predictor:O,class_predictor:C}}function x(){return{conv_0:p("Prediction",0,"prediction_layer/conv_0"),conv_1:p("Prediction",1,"prediction_layer/conv_1"),conv_2:p("Prediction",2,"prediction_layer/conv_2"),conv_3:p("Prediction",3,"prediction_layer/conv_3"),conv_4:p("Prediction",4,"prediction_layer/conv_4"),conv_5:p("Prediction",5,"prediction_layer/conv_5"),conv_6:p("Prediction",6,"prediction_layer/conv_6"),conv_7:p("Prediction",7,"prediction_layer/conv_7"),box_predictor_0:S(0),box_predictor_1:S(1),box_predictor_2:S(2),box_predictor_3:S(3),box_predictor_4:S(4),box_predictor_5:S(5)}}return{extractMobilenetV1Params:f,extractPredictionLayerParams:x}}function WE(r){const l=[],{extractMobilenetV1Params:u,extractPredictionLayerParams:p}=J9(r,l),g=r["Output/extra_dim"];if(l.push({originalPath:"Output/extra_dim",paramPath:"output_layer/extra_dim"}),!lr(g))throw new Error(`expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have ${g}`);const f={mobilenetv1:u(),prediction_layer:p(),output_layer:{extra_dim:g}};return Pn(r,l),{params:f,paramMappings:l}}const ro=Ke(Xe());function Hs(r,l,u){return ro.tidy(()=>{let p=ro.conv2d(r,l.filters,u,"same");return p=ro.add(p,l.batch_norm_offset),ro.clipByValue(p,0,6)})}const yr=Ke(Xe()),Z9=.0010000000474974513;function Q9(r,l,u){return yr.tidy(()=>{let p=yr.depthwiseConv2d(r,l.filters,u,"same");return p=yr.batchNorm(p,l.batch_norm_mean,l.batch_norm_variance,l.batch_norm_offset,l.batch_norm_scale,Z9),yr.clipByValue(p,0,6)})}function eZ(r){return[2,4,6,12].some(l=>l===r)?[2,2]:[1,1]}function $E(r,l){return yr.tidy(()=>{let u,p=Hs(r,l.conv_0,[2,2]);const g=[l.conv_1,l.conv_2,l.conv_3,l.conv_4,l.conv_5,l.conv_6,l.conv_7,l.conv_8,l.conv_9,l.conv_10,l.conv_11,l.conv_12,l.conv_13];if(g.forEach((f,I)=>{const S=I+1,x=eZ(S);p=Q9(p,f.depthwise_conv,x),p=Hs(p,f.pointwise_conv,[1,1]),S===11&&(u=p)}),u===null)throw new Error("mobileNetV1 - output of conv layer 11 is null");return{out:p,conv11:u}})}function UE(r,l,u,p,g){const f=r.shape[0],I=Math.min(u,f),S=l.map((O,C)=>({score:O,boxIndex:C})).filter(O=>O.score>g).sort((O,C)=>C.score-O.score),x=O=>O<=p?1:0,v=[];return S.forEach(O=>{if(v.length>=I)return;const C=O.score;for(let U=v.length-1;U>=0;--U){const G=tZ(r,O.boxIndex,v[U]);if(G===0)continue;if(O.score*=x(G),O.score<=g)break}C===O.score&&v.push(O.boxIndex)}),v}function tZ(r,l,u){const p=r.arraySync(),g=Math.min(p[l][0],p[l][2]),f=Math.min(p[l][1],p[l][3]),I=Math.max(p[l][0],p[l][2]),S=Math.max(p[l][1],p[l][3]),x=Math.min(p[u][0],p[u][2]),v=Math.min(p[u][1],p[u][3]),O=Math.max(p[u][0],p[u][2]),C=Math.max(p[u][1],p[u][3]),U=(I-g)*(S-f),G=(O-x)*(C-v);if(U<=0||G<=0)return 0;const ne=Math.max(g,x),te=Math.max(f,v),oe=Math.min(I,O),ge=Math.min(S,C),fe=Math.max(oe-ne,0)*Math.max(ge-te,0);return fe/(U+G-fe)}const De=Ke(Xe());function nZ(r){const l=De.unstack(De.transpose(r,[1,0])),u=[De.sub(l[2],l[0]),De.sub(l[3],l[1])],p=[De.add(l[0],De.div(u[0],De.scalar(2))),De.add(l[1],De.div(u[1],De.scalar(2)))];return{sizes:u,centers:p}}function sZ(r,l){const{sizes:u,centers:p}=nZ(r),g=De.unstack(De.transpose(l,[1,0])),f=De.div(De.mul(De.exp(De.div(g[2],De.scalar(5))),u[0]),De.scalar(2)),I=De.add(De.mul(De.div(g[0],De.scalar(10)),u[0]),p[0]),S=De.div(De.mul(De.exp(De.div(g[3],De.scalar(5))),u[1]),De.scalar(2)),x=De.add(De.mul(De.div(g[1],De.scalar(10)),u[1]),p[1]);return De.transpose(De.stack([De.sub(I,f),De.sub(x,S),De.add(I,f),De.add(x,S)]),[1,0])}function BE(r,l,u){return De.tidy(()=>{const p=r.shape[0];let g=sZ(De.reshape(De.tile(u.extra_dim,[p,1,1]),[-1,4]),De.reshape(r,[-1,4]));g=De.reshape(g,[p,g.shape[0]/p,4]);const f=De.sigmoid(De.slice(l,[0,0,1],[-1,-1,-1]));let I=De.slice(f,[0,0,0],[-1,-1,1]);I=De.reshape(I,[p,I.shape[1]]);const S=De.unstack(g),x=De.unstack(I);return{boxes:S,scores:x}})}const Cu=Ke(Xe());function aa(r,l){return Cu.tidy(()=>{const u=r.shape[0],p=Cu.reshape(ia(r,l.box_encoding_predictor),[u,-1,1,4]),g=Cu.reshape(ia(r,l.class_predictor),[u,-1,3]);return{boxPredictionEncoding:p,classPrediction:g}})}const Ru=Ke(Xe());function ME(r,l,u){return Ru.tidy(()=>{const p=Hs(r,u.conv_0,[1,1]),g=Hs(p,u.conv_1,[2,2]),f=Hs(g,u.conv_2,[1,1]),I=Hs(f,u.conv_3,[2,2]),S=Hs(I,u.conv_4,[1,1]),x=Hs(S,u.conv_5,[2,2]),v=Hs(x,u.conv_6,[1,1]),O=Hs(v,u.conv_7,[2,2]),C=aa(l,u.box_predictor_0),U=aa(r,u.box_predictor_1),G=aa(g,u.box_predictor_2),ne=aa(I,u.box_predictor_3),te=aa(x,u.box_predictor_4),oe=aa(O,u.box_predictor_5),ge=Ru.concat([C.boxPredictionEncoding,U.boxPredictionEncoding,G.boxPredictionEncoding,ne.boxPredictionEncoding,te.boxPredictionEncoding,oe.boxPredictionEncoding],1),fe=Ru.concat([C.classPrediction,U.classPrediction,G.classPrediction,ne.classPrediction,te.classPrediction,oe.classPrediction],1);return{boxPredictions:ge,classPredictions:fe}})}class yi{constructor({minConfidence:r,maxResults:l}={}){this._name="SsdMobilenetv1Options";if(this._minConfidence=r||.5,this._maxResults=l||100,typeof this._minConfidence!="number"||this._minConfidence<=0||this._minConfidence>=1)throw new Error(`${this._name} - expected minConfidence to be a number between 0 and 1`);if(typeof this._maxResults!="number")throw new Error(`${this._name} - expected maxResults to be a number`)}get minConfidence(){return this._minConfidence}get maxResults(){return this._maxResults}}const bi=Ke(Xe());class zc extends En{constructor(){super("SsdMobilenetv1")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("SsdMobilenetv1 - load model before inference");return bi.tidy(()=>{const u=bi.cast(r.toBatchTensor(512,!1),"float32"),p=bi.sub(bi.mul(u,bi.scalar(.007843137718737125)),bi.scalar(1)),g=$E(p,l.mobilenetv1),{boxPredictions:f,classPredictions:I}=ME(g.out,g.conv11,l.prediction_layer);return BE(f,I,l.output_layer)})}async forward(r){return this.forwardInput(await Rt(r))}async locateFaces(r,l={}){const{maxResults:u,minConfidence:p}=new yi(l),g=await Rt(r),{boxes:f,scores:I}=this.forwardInput(g),S=f[0],x=I[0];for(let fe=1;fe{const[Ae,Te]=[Math.max(0,oe[fe][0]),Math.min(1,oe[fe][2])].map(vt=>vt*te),[Ve,rt]=[Math.max(0,oe[fe][1]),Math.min(1,oe[fe][3])].map(vt=>vt*ne);return new Ht(v[fe],new su(Ve,Ae,rt-Ve,Te-Ae),{height:g.getInputHeight(0),width:g.getInputWidth(0)})});return S.dispose(),x.dispose(),ge}getDefaultModelName(){return"ssd_mobilenetv1_model"}extractParamsFromWeigthMap(r){return WE(r)}extractParams(r){return _E(r)}}function PE(r){const l=new zc;return l.extractWeights(r),l}function iZ(r){return PE(r)}class rZ extends zc{}const zE=.4,GE=[new Je(.738768,.874946),new Je(2.42204,2.65704),new Je(4.30971,7.04493),new Je(10.246,4.59428),new Je(12.6868,11.8741)],VE=[new Je(1.603231,2.094468),new Je(6.041143,7.080126),new Je(2.882459,3.518061),new Je(4.266906,5.178857),new Je(9.041765,10.66308)],YE=[117.001,114.697,97.404],HE="tiny_yolov2_model",qE="tiny_yolov2_separable_conv_model";const ig=r=>typeof r=="number";function zI(r){if(!r)throw new Error(`invalid config: ${r}`);if(typeof r.withSeparableConvs!="boolean")throw new Error(`config.withSeparableConvs has to be a boolean, have: ${r.withSeparableConvs}`);if(!ig(r.iouThreshold)||r.iouThreshold<0||r.iouThreshold>1)throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${r.iouThreshold}`);if(!Array.isArray(r.classes)||!r.classes.length||!r.classes.every(l=>typeof l=="string"))throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(r.classes)}`);if(!Array.isArray(r.anchors)||!r.anchors.length||!r.anchors.map(l=>l||{}).every(l=>ig(l.x)&&ig(l.y)))throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(r.anchors)}`);if(r.meanRgb&&(!Array.isArray(r.meanRgb)||r.meanRgb.length!==3||!r.meanRgb.every(ig)))throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(r.meanRgb)}`)}const qs=Ke(Xe());function Gc(r){return qs.tidy(()=>{const l=qs.mul(r,qs.scalar(.10000000149011612));return qs.add(qs.relu(qs.sub(r,l)),l)})}const js=Ke(Xe());function br(r,l){return js.tidy(()=>{let u=js.pad(r,[[0,0],[1,1],[1,1],[0,0]]);return u=js.conv2d(u,l.conv.filters,[1,1],"valid"),u=js.sub(u,l.bn.sub),u=js.mul(u,l.bn.truediv),u=js.add(u,l.conv.bias),Gc(u)})}const oo=Ke(Xe());function wr(r,l){return oo.tidy(()=>{let u=oo.pad(r,[[0,0],[1,1],[1,1],[0,0]]);return u=oo.separableConv2d(u,l.depthwise_filter,l.pointwise_filter,[1,1],"valid"),u=oo.add(u,l.bias),Gc(u)})}const GI=Ke(Xe());function oZ(r,l){const u=_c(r,l);function p(I,S){const x=GI.tensor1d(r(I)),v=GI.tensor1d(r(I));return l.push({paramPath:`${S}/sub`},{paramPath:`${S}/truediv`}),{sub:x,truediv:v}}function g(I,S,x){const v=u(I,S,3,`${x}/conv`),O=p(S,`${x}/bn`);return{conv:v,bn:O}}const f=Wc(r,l);return{extractConvParams:u,extractConvWithBatchNormParams:g,extractSeparableConvParams:f}}function jE(r,l,u,p){const{extractWeights:g,getRemainingWeights:f}=zn(r),I=[],{extractConvParams:S,extractConvWithBatchNormParams:x,extractSeparableConvParams:v}=oZ(g,I);let O;if(l.withSeparableConvs){const[C,U,G,ne,te,oe,ge,fe,Ae]=p,Te=l.isFirstLayerConv2d?S(C,U,3,"conv0"):v(C,U,"conv0"),Ve=v(U,G,"conv1"),rt=v(G,ne,"conv2"),vt=v(ne,te,"conv3"),$t=v(te,oe,"conv4"),Kt=v(oe,ge,"conv5"),Dn=fe?v(ge,fe,"conv6"):void 0,Tn=Ae?v(fe,Ae,"conv7"):void 0,An=S(Ae||fe||ge,5*u,1,"conv8");O={conv0:Te,conv1:Ve,conv2:rt,conv3:vt,conv4:$t,conv5:Kt,conv6:Dn,conv7:Tn,conv8:An}}else{const[C,U,G,ne,te,oe,ge,fe,Ae]=p,Te=x(C,U,"conv0"),Ve=x(U,G,"conv1"),rt=x(G,ne,"conv2"),vt=x(ne,te,"conv3"),$t=x(te,oe,"conv4"),Kt=x(oe,ge,"conv5"),Dn=x(ge,fe,"conv6"),Tn=x(fe,Ae,"conv7"),An=S(Ae,5*u,1,"conv8");O={conv0:Te,conv1:Ve,conv2:rt,conv3:vt,conv4:$t,conv5:Kt,conv6:Dn,conv7:Tn,conv8:An}}if(f().length!==0)throw new Error(`weights remaing after extract: ${f().length}`);return{params:O,paramMappings:I}}function aZ(r,l){const u=ms(r,l);function p(S){const x=u(`${S}/sub`,1),v=u(`${S}/truediv`,1);return{sub:x,truediv:v}}function g(S){const x=u(`${S}/filters`,4),v=u(`${S}/bias`,1);return{filters:x,bias:v}}function f(S){const x=g(`${S}/conv`),v=p(`${S}/bn`);return{conv:x,bn:v}}const I=$c(u);return{extractConvParams:g,extractConvWithBatchNormParams:f,extractSeparableConvParams:I}}function KE(r,l){const u=[],{extractConvParams:p,extractConvWithBatchNormParams:g,extractSeparableConvParams:f}=aZ(r,u);let I;if(l.withSeparableConvs){const S=l.filterSizes&&l.filterSizes.length||9;I={conv0:l.isFirstLayerConv2d?p("conv0"):f("conv0"),conv1:f("conv1"),conv2:f("conv2"),conv3:f("conv3"),conv4:f("conv4"),conv5:f("conv5"),conv6:S>7?f("conv6"):void 0,conv7:S>8?f("conv7"):void 0,conv8:p("conv8")}}else I={conv0:g("conv0"),conv1:g("conv1"),conv2:g("conv2"),conv3:g("conv3"),conv4:g("conv4"),conv5:g("conv5"),conv6:g("conv6"),conv7:g("conv7"),conv8:p("conv8")};return Pn(r,u),{params:I,paramMappings:u}}var VI;(function(r){r[r.XS=224]="XS",r[r.SM=320]="SM",r[r.MD=416]="MD",r[r.LG=608]="LG"})(VI||(VI={}));class Lr{constructor({inputSize:r,scoreThreshold:l}={}){this._name="TinyYolov2Options";if(this._inputSize=r||416,this._scoreThreshold=l||.5,typeof this._inputSize!="number"||this._inputSize%32!==0)throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`);if(typeof this._scoreThreshold!="number"||this._scoreThreshold<=0||this._scoreThreshold>=1)throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`)}get inputSize(){return this._inputSize}get scoreThreshold(){return this._scoreThreshold}}const kt=Ke(Xe());class Vc extends En{constructor(r){super("TinyYolov2");zI(r),this._config=r}get config(){return this._config}get withClassScores(){return this.config.withClassScores||this.config.classes.length>1}get boxEncodingSize(){return 5+(this.withClassScores?this.config.classes.length:0)}runTinyYolov2(r,l){let u=br(r,l.conv0);return u=kt.maxPool(u,[2,2],[2,2],"same"),u=br(u,l.conv1),u=kt.maxPool(u,[2,2],[2,2],"same"),u=br(u,l.conv2),u=kt.maxPool(u,[2,2],[2,2],"same"),u=br(u,l.conv3),u=kt.maxPool(u,[2,2],[2,2],"same"),u=br(u,l.conv4),u=kt.maxPool(u,[2,2],[2,2],"same"),u=br(u,l.conv5),u=kt.maxPool(u,[2,2],[1,1],"same"),u=br(u,l.conv6),u=br(u,l.conv7),ia(u,l.conv8,"valid",!1)}runMobilenet(r,l){let u=this.config.isFirstLayerConv2d?Gc(ia(r,l.conv0,"valid",!1)):wr(r,l.conv0);return u=kt.maxPool(u,[2,2],[2,2],"same"),u=wr(u,l.conv1),u=kt.maxPool(u,[2,2],[2,2],"same"),u=wr(u,l.conv2),u=kt.maxPool(u,[2,2],[2,2],"same"),u=wr(u,l.conv3),u=kt.maxPool(u,[2,2],[2,2],"same"),u=wr(u,l.conv4),u=kt.maxPool(u,[2,2],[2,2],"same"),u=wr(u,l.conv5),u=kt.maxPool(u,[2,2],[1,1],"same"),u=l.conv6?wr(u,l.conv6):u,u=l.conv7?wr(u,l.conv7):u,ia(u,l.conv8,"valid",!1)}forwardInput(r,l){const{params:u}=this;if(!u)throw new Error("TinyYolov2 - load model before inference");return kt.tidy(()=>{let p=kt.cast(r.toBatchTensor(l,!1),"float32");return p=this.config.meanRgb?di(p,this.config.meanRgb):p,p=p.div(kt.scalar(256)),this.config.withSeparableConvs?this.runMobilenet(p,u):this.runTinyYolov2(p,u)})}async forward(r,l){return await this.forwardInput(await Rt(r),l)}async detect(r,l={}){const{inputSize:u,scoreThreshold:p}=new Lr(l),g=await Rt(r),f=await this.forwardInput(g,u),I=kt.tidy(()=>kt.unstack(f)[0].expandDims()),S={width:g.getInputWidth(0),height:g.getInputHeight(0)},x=await this.extractBoxes(I,g.getReshapedInputDimensions(0),p);f.dispose(),I.dispose();const v=x.map(te=>te.box),O=x.map(te=>te.score),C=x.map(te=>te.classScore),U=x.map(te=>this.config.classes[te.label]),G=gS(v.map(te=>te.rescale(u)),O,this.config.iouThreshold,!0),ne=G.map(te=>new gc(O[te],C[te],U[te],v[te],S));return ne}getDefaultModelName(){return""}extractParamsFromWeigthMap(r){return KE(r,this.config)}extractParams(r){const l=this.config.filterSizes||Vc.DEFAULT_FILTER_SIZES,u=l?l.length:void 0;if(u!==7&&u!==8&&u!==9)throw new Error(`TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found ${u} filterSizes in config`);return jE(r,this.config,this.boxEncodingSize,l)}async extractBoxes(r,l,u){const{width:p,height:g}=l,f=Math.max(p,g),I=f/p,S=f/g,x=r.shape[1],v=this.config.anchors.length,[O,C,U]=kt.tidy(()=>{const oe=r.reshape([x,x,v,this.boxEncodingSize]),ge=oe.slice([0,0,0,0],[x,x,v,4]),fe=oe.slice([0,0,0,4],[x,x,v,1]),Ae=this.withClassScores?kt.softmax(oe.slice([0,0,0,5],[x,x,v,this.config.classes.length]),3):kt.scalar(0);return[ge,fe,Ae]}),G=[],ne=await C.array(),te=await O.array();for(let oe=0;oeu){const Te=(ge+nu(te[oe][ge][fe][0]))/x*I,Ve=(oe+nu(te[oe][ge][fe][1]))/x*S,rt=Math.exp(te[oe][ge][fe][2])*this.config.anchors[fe].x/x*I,vt=Math.exp(te[oe][ge][fe][3])*this.config.anchors[fe].y/x*S,$t=Te-rt/2,Kt=Ve-vt/2,Dn={row:oe,col:ge,anchor:fe},{classScore:Tn,label:An}=this.withClassScores?await this.extractPredictedClass(U,Dn):{classScore:1,label:0};G.push({box:new tu($t,Kt,$t+rt,Kt+vt),score:Ae,classScore:Ae*Tn,label:An,...Dn})}}return O.dispose(),C.dispose(),U.dispose(),G}async extractPredictedClass(r,l){const{row:u,col:p,anchor:g}=l,f=await r.array();return Array(this.config.classes.length).fill(0).map((I,S)=>f[u][p][g][S]).map((I,S)=>({classScore:I,label:S})).reduce((I,S)=>I.classScore>S.classScore?I:S)}}Vc.DEFAULT_FILTER_SIZES=[3,16,32,64,128,256,512,1024,1024];class Ou extends Vc{constructor(r=!0){const l=Object.assign({},{withSeparableConvs:r,iouThreshold:zE,classes:["face"]},r?{anchors:VE,meanRgb:YE}:{anchors:GE,withClassScores:!0});super(l)}get withSeparableConvs(){return this.config.withSeparableConvs}get anchors(){return this.config.anchors}async locateFaces(r,l){const u=await this.detect(r,l);return u.map(p=>new Ht(p.score,p.relativeBox,{width:p.imageWidth,height:p.imageHeight}))}getDefaultModelName(){return this.withSeparableConvs?qE:HE}extractParamsFromWeigthMap(r){return super.extractParamsFromWeigthMap(r)}}function cZ(r,l=!0){const u=new Ou(l);return u.extractWeights(r),u}class YI extends Lr{constructor(){super(...arguments);this._name="TinyFaceDetectorOptions"}}class wi{async then(r){return r(await this.run())}async run(){throw new Error("ComposableTask - run is not implemented")}}const HI=Ke(Xe());async function ca(r,l,u,p,g=({alignedRect:f})=>f){const f=r.map(x=>oa(x)?g(x):x.detection),I=p||(l instanceof HI.Tensor?await kc(l,f):await Dc(l,f)),S=await u(I);return I.forEach(x=>x instanceof HI.Tensor&&x.dispose()),S}async function Yc(r,l,u,p,g){return ca([r],l,async f=>u(f[0]),p,g)}const XE=.4,JE=[new Je(1.603231,2.094468),new Je(6.041143,7.080126),new Je(2.882459,3.518061),new Je(4.266906,5.178857),new Je(9.041765,10.66308)],ZE=[117.001,114.697,97.404];class Eu extends Vc{constructor(){const r={withSeparableConvs:!0,iouThreshold:XE,classes:["face"],anchors:JE,meanRgb:ZE,isFirstLayerConv2d:!0,filterSizes:[3,16,32,64,128,256,512]};super(r)}get anchors(){return this.config.anchors}async locateFaces(r,l){const u=await this.detect(r,l);return u.map(p=>new Ht(p.score,p.relativeBox,{width:p.imageWidth,height:p.imageHeight}))}getDefaultModelName(){return"tiny_face_detector_model"}extractParamsFromWeigthMap(r){return super.extractParamsFromWeigthMap(r)}}const pt={ssdMobilenetv1:new zc,tinyFaceDetector:new Eu,tinyYolov2:new Ou,faceLandmark68Net:new Au,faceLandmark68TinyNet:new BI,faceRecognitionNet:new Nu,faceExpressionNet:new FI,ageGenderNet:new UI},QE=(r,l)=>pt.ssdMobilenetv1.locateFaces(r,l),lZ=(r,l)=>pt.tinyFaceDetector.locateFaces(r,l),hZ=(r,l)=>pt.tinyYolov2.locateFaces(r,l),eD=r=>pt.faceLandmark68Net.detectLandmarks(r),uZ=r=>pt.faceLandmark68TinyNet.detectLandmarks(r),dZ=r=>pt.faceRecognitionNet.computeFaceDescriptor(r),pZ=r=>pt.faceExpressionNet.predictExpressions(r),mZ=r=>pt.ageGenderNet.predictAgeAndGender(r),tD=r=>pt.ssdMobilenetv1.load(r),fZ=r=>pt.tinyFaceDetector.load(r),gZ=r=>pt.tinyYolov2.load(r),yZ=r=>pt.faceLandmark68Net.load(r),bZ=r=>pt.faceLandmark68TinyNet.load(r),wZ=r=>pt.faceRecognitionNet.load(r),LZ=r=>pt.faceExpressionNet.load(r),SZ=r=>pt.ageGenderNet.load(r),IZ=tD,xZ=QE,TZ=eD;class nD extends wi{constructor(r,l,u){super();this.parentTask=r;this.input=l;this.extractedFaces=u}}class Fu extends nD{async run(){const r=await this.parentTask,l=await ca(r,this.input,async u=>await Promise.all(u.map(p=>pt.faceExpressionNet.predictExpressions(p))),this.extractedFaces);return r.map((u,p)=>Jf(u,l[p]))}withAgeAndGender(){return new Du(this,this.input)}}class _u extends nD{async run(){const r=await this.parentTask;if(!r)return;const l=await Yc(r,this.input,u=>pt.faceExpressionNet.predictExpressions(u),this.extractedFaces);return Jf(r,l)}withAgeAndGender(){return new ku(this,this.input)}}class jc extends Fu{withAgeAndGender(){return new Hc(this,this.input)}withFaceDescriptors(){return new la(this,this.input)}}class Kc extends _u{withAgeAndGender(){return new qc(this,this.input)}withFaceDescriptor(){return new ha(this,this.input)}}class sD extends wi{constructor(r,l,u){super();this.parentTask=r;this.input=l;this.extractedFaces=u}}class Du extends sD{async run(){const r=await this.parentTask,l=await ca(r,this.input,async u=>await Promise.all(u.map(p=>pt.ageGenderNet.predictAgeAndGender(p))),this.extractedFaces);return r.map((u,p)=>{const{age:g,gender:f,genderProbability:I}=l[p];return ng(sg(u,f,I),g)})}withFaceExpressions(){return new Fu(this,this.input)}}class ku extends sD{async run(){const r=await this.parentTask;if(!r)return;const{age:l,gender:u,genderProbability:p}=await Yc(r,this.input,g=>pt.ageGenderNet.predictAgeAndGender(g),this.extractedFaces);return ng(sg(r,u,p),l)}withFaceExpressions(){return new _u(this,this.input)}}class Hc extends Du{withFaceExpressions(){return new jc(this,this.input)}withFaceDescriptors(){return new la(this,this.input)}}class qc extends ku{withFaceExpressions(){return new Kc(this,this.input)}withFaceDescriptor(){return new ha(this,this.input)}}class qI extends wi{constructor(r,l){super();this.parentTask=r;this.input=l}}class la extends qI{async run(){const r=await this.parentTask,l=await ca(r,this.input,u=>Promise.all(u.map(p=>pt.faceRecognitionNet.computeFaceDescriptor(p))),null,u=>u.landmarks.align(null,{useDlibAlignment:!0}));return l.map((u,p)=>tg(r[p],u))}withFaceExpressions(){return new jc(this,this.input)}withAgeAndGender(){return new Hc(this,this.input)}}class ha extends qI{async run(){const r=await this.parentTask;if(!r)return;const l=await Yc(r,this.input,u=>pt.faceRecognitionNet.computeFaceDescriptor(u),null,u=>u.landmarks.align(null,{useDlibAlignment:!0}));return tg(r,l)}withFaceExpressions(){return new Kc(this,this.input)}withAgeAndGender(){return new qc(this,this.input)}}const Wu=Ke(Xe());class jI extends wi{constructor(r,l,u){super();this.parentTask=r;this.input=l;this.useTinyLandmarkNet=u}get landmarkNet(){return this.useTinyLandmarkNet?pt.faceLandmark68TinyNet:pt.faceLandmark68Net}}class KI extends jI{async run(){const r=await this.parentTask,l=r.map(g=>g.detection),u=this.input instanceof Wu.Tensor?await kc(this.input,l):await Dc(this.input,l),p=await Promise.all(u.map(g=>this.landmarkNet.detectLandmarks(g)));return u.forEach(g=>g instanceof Wu.Tensor&&g.dispose()),r.map((g,f)=>Mc(g,p[f]))}withFaceExpressions(){return new jc(this,this.input)}withAgeAndGender(){return new Hc(this,this.input)}withFaceDescriptors(){return new la(this,this.input)}}class XI extends jI{async run(){const r=await this.parentTask;if(!r)return;const{detection:l}=r,u=this.input instanceof Wu.Tensor?await kc(this.input,[l]):await Dc(this.input,[l]),p=await this.landmarkNet.detectLandmarks(u[0]);return u.forEach(g=>g instanceof Wu.Tensor&&g.dispose()),Mc(r,p)}withFaceExpressions(){return new Kc(this,this.input)}withAgeAndGender(){return new qc(this,this.input)}withFaceDescriptor(){return new ha(this,this.input)}}class JI extends wi{constructor(r,l=new yi){super();this.input=r;this.options=l}}class rg extends JI{async run(){const{input:r,options:l}=this,u=l instanceof YI?p=>pt.tinyFaceDetector.locateFaces(p,l):l instanceof yi?p=>pt.ssdMobilenetv1.locateFaces(p,l):l instanceof Lr?p=>pt.tinyYolov2.locateFaces(p,l):null;if(!u)throw new Error("detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options");return u(r)}runAndExtendWithFaceDetections(){return new Promise(async r=>{const l=await this.run();return r(l.map(u=>Yo({},u)))})}withFaceLandmarks(r=!1){return new KI(this.runAndExtendWithFaceDetections(),this.input,r)}withFaceExpressions(){return new Fu(this.runAndExtendWithFaceDetections(),this.input)}withAgeAndGender(){return new Du(this.runAndExtendWithFaceDetections(),this.input)}}class ZI extends JI{async run(){const r=await new rg(this.input,this.options);let l=r[0];return r.forEach(u=>{u.score>l.score&&(l=u)}),l}runAndExtendWithFaceDetection(){return new Promise(async r=>{const l=await this.run();return r(l?Yo({},l):void 0)})}withFaceLandmarks(r=!1){return new XI(this.runAndExtendWithFaceDetection(),this.input,r)}withFaceExpressions(){return new _u(this.runAndExtendWithFaceDetection(),this.input)}withAgeAndGender(){return new ku(this.runAndExtendWithFaceDetection(),this.input)}}function AZ(r,l=new yi){return new ZI(r,l)}function og(r,l=new yi){return new rg(r,l)}async function iD(r,l){return console.warn("allFacesSsdMobilenetv1 is deprecated and will be removed soon, use the high level api instead"),await og(r,new yi(l?{minConfidence:l}:{})).withFaceLandmarks().withFaceDescriptors()}async function vZ(r,l={}){return console.warn("allFacesTinyYolov2 is deprecated and will be removed soon, use the high level api instead"),await og(r,new Lr(l)).withFaceLandmarks().withFaceDescriptors()}const NZ=iD;function QI(r,l){if(r.length!==l.length)throw new Error("euclideanDistance: arr1.length !== arr2.length");const u=Array.from(r),p=Array.from(l);return Math.sqrt(u.map((g,f)=>g-p[f]).reduce((g,f)=>g+Math.pow(f,2),0))}class rD{constructor(r,l=.6){this._distanceThreshold=l;const u=Array.isArray(r)?r:[r];if(!u.length)throw new Error("FaceRecognizer.constructor - expected atleast one input");let p=1;const g=()=>`person ${p++}`;this._labeledDescriptors=u.map(f=>{if(f instanceof Vo)return f;if(f instanceof Float32Array)return new Vo(g(),[f]);if(f.descriptor&&f.descriptor instanceof Float32Array)return new Vo(g(),[f.descriptor]);throw new Error("FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>")})}get labeledDescriptors(){return this._labeledDescriptors}get distanceThreshold(){return this._distanceThreshold}computeMeanDistance(r,l){return l.map(u=>QI(u,r)).reduce((u,p)=>u+p,0)/(l.length||1)}matchDescriptor(r){return this.labeledDescriptors.map(({descriptors:l,label:u})=>new mm(u,this.computeMeanDistance(r,l))).reduce((l,u)=>l.distancer.toJSON())}}static fromJSON(r){const l=r.labeledDescriptors.map(u=>Vo.fromJSON(u));return new rD(l,r.distanceThreshold)}}function CZ(r){const l=new Eu;return l.extractWeights(r),l}function oD(r,l){const{width:u,height:p}=new us(l.width,l.height);if(u<=0||p<=0)throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({width:u,height:p})}`);if(Array.isArray(r))return r.map(g=>oD(g,{width:u,height:p}));if(oa(r)){const g=r.detection.forSize(u,p),f=r.unshiftedLandmarks.forSize(g.box.width,g.box.height);return Mc(Yo(r,g),f)}return Ui(r)?Yo(r,r.detection.forSize(u,p)):r instanceof Gs||r instanceof Ht?r.forSize(u,p):r}var aD="0.8.4";const RZ=Ke(Xe()),OZ=typeof process!="undefined",EZ=typeof navigator!="undefined"&&typeof navigator.userAgent!="undefined",DZ={faceapi:aD,node:OZ,browser:EZ};export{UI as AgeGenderNet,tu as BoundingBox,Ct as Box,wi as ComposableTask,la as ComputeAllFaceDescriptorsTask,qI as ComputeFaceDescriptorsTaskBase,ha as ComputeSingleFaceDescriptorTask,KI as DetectAllFaceLandmarksTask,rg as DetectAllFacesTask,jI as DetectFaceLandmarksTaskBase,JI as DetectFacesTaskBase,XI as DetectSingleFaceLandmarksTask,ZI as DetectSingleFaceTask,us as Dimensions,kI as FACE_EXPRESSION_LABELS,Ht as FaceDetection,rZ as FaceDetectionNet,FI as FaceExpressionNet,ra as FaceExpressions,Au as FaceLandmark68Net,BI as FaceLandmark68TinyNet,V9 as FaceLandmarkNet,Gs as FaceLandmarks,YX as FaceLandmarks5,iu as FaceLandmarks68,mm as FaceMatch,rD as FaceMatcher,Nu as FaceRecognitionNet,gr as Gender,fm as LabeledBox,Vo as LabeledFaceDescriptors,to as NetInput,En as NeuralNetwork,gc as ObjectDetection,Je as Point,HX as PredictedBox,su as Rect,zc as SsdMobilenetv1,yi as SsdMobilenetv1Options,Eu as TinyFaceDetector,YI as TinyFaceDetectorOptions,Ou as TinyYolov2,Lr as TinyYolov2Options,VI as TinyYolov2SizeType,NZ as allFaces,iD as allFacesSsdMobilenetv1,vZ as allFacesTinyYolov2,AS as awaitMediaLoaded,vS as bufferToImage,dZ as computeFaceDescriptor,bc as createCanvas,ou as createCanvasFromMedia,iZ as createFaceDetectionNet,q9 as createFaceRecognitionNet,PE as createSsdMobilenetv1,CZ as createTinyFaceDetector,cZ as createTinyYolov2,og as detectAllFaces,eD as detectFaceLandmarks,uZ as detectFaceLandmarksTiny,TZ as detectLandmarks,AZ as detectSingleFace,WI as draw,gt as env,QI as euclideanDistance,ng as extendWithAge,tg as extendWithFaceDescriptor,Yo as extendWithFaceDetection,Jf as extendWithFaceExpressions,Mc as extendWithFaceLandmarks,sg as extendWithGender,kc as extractFaceTensors,Dc as extractFaces,W9 as fetchImage,OI as fetchJson,$9 as fetchNetWeights,sa as fetchOrThrow,Jn as getContext2dOrThrow,qo as getMediaDimensions,NS as imageTensorToCanvas,RI as imageToSquare,PX as inverseSigmoid,mS as iou,wm as isMediaElement,ru as isMediaLoaded,j9 as isWithAge,Ui as isWithFaceDetection,_I as isWithFaceExpressions,oa as isWithFaceLandmarks,K9 as isWithGender,SZ as loadAgeGenderModel,IZ as loadFaceDetectionModel,LZ as loadFaceExpressionModel,yZ as loadFaceLandmarkModel,bZ as loadFaceLandmarkTinyModel,wZ as loadFaceRecognitionModel,tD as loadSsdMobilenetv1Model,fZ as loadTinyFaceDetectorModel,gZ as loadTinyYolov2Model,EI as loadWeightMap,xZ as locateFaces,U9 as matchDimensions,fS as minBbox,pt as nets,gS as nonMaxSuppression,di as normalize,yS as padToSquare,mZ as predictAgeAndGender,pZ as recognizeFaceExpressions,oD as resizeResults,Ho as resolveInput,MX as shuffleArray,nu as sigmoid,QE as ssdMobilenetv1,RZ as tf,lZ as tinyFaceDetector,hZ as tinyYolov2,Rt as toNetInput,hS as utils,zI as validateConfig,DZ as version}; +`,Q7=$m(Z7),eJ={kernelName:Aa,backendName:"webgl",kernelFunc:Q7};const tJ="return x * x;",nJ=$m(tJ),sJ={kernelName:Nd,backendName:"webgl",kernelFunc:nJ};const jC="return (a - b) * (a - b);",iJ=Sc({opSnippet:jC,packedOpSnippet:jC}),rJ={kernelName:va,backendName:"webgl",kernelFunc:iJ};const KC="return a - b;",oJ=Sc({opSnippet:KC,packedOpSnippet:KC,supportsComplex:!0,cpuKernelImpl:e5}),aJ={kernelName:Na,backendName:"webgl",kernelFunc:oJ};const cJ="return tan(x);",lJ=$m(cJ),hJ={kernelName:Ca,backendName:"webgl",kernelFunc:lJ};const uJ={kernelName:Hl,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{perm:i}=t,o=n,a=s.shape.length,c=new Array(a);for(let d=0;d{Pm(xJ,{isNodejs:()=>TJ});function TJ(){return typeof global=="object"&&!0&&typeof e2!="undefined"&&typeof process!="undefined"&&!!process.version}});function fr(r,l,u=!1){if(r.beginPath(),l.slice(1).forEach(({x:p,y},g)=>{const I=l[g];r.moveTo(I.x,I.y),r.lineTo(p,y)}),u){const p=l[l.length-1],y=l[0];if(!p||!y)return;r.moveTo(p.x,p.y),r.lineTo(y.x,y.y)}r.stroke()}class ms{constructor(r,l){if(!ui(r)||!ui(l))throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({width:r,height:l})}`);this._width=r,this._height=l}get width(){return this._width}get height(){return this._height}reverse(){return new ms(1/this.width,1/this.height)}}const DS={};Pm(DS,{computeReshapedDimensions:()=>_S,getCenterPoint:()=>Xo,isDimensions:()=>Gm,isEven:()=>Vm,isFloat:()=>FS,isTensor:()=>jo,isTensor1D:()=>AJ,isTensor2D:()=>kS,isTensor3D:()=>gr,isTensor4D:()=>Rs,isValidNumber:()=>ui,isValidProbablitiy:()=>vc,range:()=>_i,round:()=>Ko});const n2=Je(Ze());function jo(r,l){return r instanceof n2.Tensor&&r.shape.length===l}function AJ(r){return jo(r,1)}function kS(r){return jo(r,2)}function gr(r){return jo(r,3)}function Rs(r){return jo(r,4)}function FS(r){return r%1!==0}function Vm(r){return r%2===0}function Ko(r,l=2){const u=Math.pow(10,l);return Math.floor(r*u)/u}function Gm(r){return r&&r.width&&r.height}function _S({width:r,height:l},u){const p=u/Math.max(l,r);return new ms(Math.round(r*p),Math.round(l*p))}function Xo(r){return r.reduce((l,u)=>l.add(u),new Qe(0,0)).div(new Qe(r.length,r.length))}function _i(r,l,u){return Array(r).fill(0).map((p,y)=>l+y*u)}function ui(r){return!!r&&r!==Infinity&&r!==-Infinity&&!isNaN(r)||r===0}function vc(r){return ui(r)&&0<=r&&r<=1}class Qe{constructor(r,l){this._x=r,this._y=l}get x(){return this._x}get y(){return this._y}add(r){return new Qe(this.x+r.x,this.y+r.y)}sub(r){return new Qe(this.x-r.x,this.y-r.y)}mul(r){return new Qe(this.x*r.x,this.y*r.y)}div(r){return new Qe(this.x/r.x,this.y/r.y)}abs(){return new Qe(Math.abs(this.x),Math.abs(this.y))}magnitude(){return Math.sqrt(Math.pow(this.x,2)+Math.pow(this.y,2))}floor(){return new Qe(Math.floor(this.x),Math.floor(this.y))}}class _t{static isRect(r){return!!r&&[r.x,r.y,r.width,r.height].every(ui)}static assertIsValidBox(r,l,u=!1){if(!_t.isRect(r))throw new Error(`${l} - invalid box: ${JSON.stringify(r)}, expected object with properties x, y, width, height`);if(!u&&(r.width<0||r.height<0))throw new Error(`${l} - width (${r.width}) and height (${r.height}) must be positive numbers`)}constructor(r,l=!0){const u=r||{},p=[u.left,u.top,u.right,u.bottom].every(ui),y=[u.x,u.y,u.width,u.height].every(ui);if(!y&&!p)throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(u)}`);const[g,I,S,T]=y?[u.x,u.y,u.width,u.height]:[u.left,u.top,u.right-u.left,u.bottom-u.top];_t.assertIsValidBox({x:g,y:I,width:S,height:T},"Box.constructor",l),this._x=g,this._y=I,this._width=S,this._height=T}get x(){return this._x}get y(){return this._y}get width(){return this._width}get height(){return this._height}get left(){return this.x}get top(){return this.y}get right(){return this.x+this.width}get bottom(){return this.y+this.height}get area(){return this.width*this.height}get topLeft(){return new Qe(this.left,this.top)}get topRight(){return new Qe(this.right,this.top)}get bottomLeft(){return new Qe(this.left,this.bottom)}get bottomRight(){return new Qe(this.right,this.bottom)}round(){const[r,l,u,p]=[this.x,this.y,this.width,this.height].map(y=>Math.round(y));return new _t({x:r,y:l,width:u,height:p})}floor(){const[r,l,u,p]=[this.x,this.y,this.width,this.height].map(y=>Math.floor(y));return new _t({x:r,y:l,width:u,height:p})}toSquare(){let{x:r,y:l,width:u,height:p}=this;const y=Math.abs(u-p);return ul&&(I=-D+l+u,D=l),_>r&&(S=-_+r+p,_=r),T<1&&(S=2-T,T=1),C<1&&(S=2-C,C=1),{dy:g,edy:S,dx:y,edx:I,y:C,ey:_,x:T,ex:D,w:u,h:p}}calibrate(r){return new _t({left:this.left+r.left*this.width,top:this.top+r.top*this.height,right:this.right+r.right*this.width,bottom:this.bottom+r.bottom*this.height}).toSquare().round()}}class gu extends _t{constructor(r,l,u,p,y=!1){super({left:r,top:l,right:u,bottom:p},y)}}class Nc{constructor(r,l,u,p,y){this._imageDims=new ms(y.width,y.height),this._score=r,this._classScore=l,this._className=u,this._box=new _t(p).rescale(this._imageDims)}get score(){return this._score}get classScore(){return this._classScore}get className(){return this._className}get box(){return this._box}get imageDims(){return this._imageDims}get imageWidth(){return this.imageDims.width}get imageHeight(){return this.imageDims.height}get relativeBox(){return new _t(this._box).rescale(this.imageDims.reverse())}forSize(r,l){return new Nc(this.score,this.classScore,this.className,this.relativeBox,{width:r,height:l})}}class Jt extends Nc{constructor(r,l,u){super(r,r,"",l,u)}forSize(r,l){const{score:u,relativeBox:p,imageDims:y}=super.forSize(r,l);return new Jt(u,p,y)}}function WS(r,l,u=!0){const p=Math.max(0,Math.min(r.right,l.right)-Math.max(r.left,l.left)),y=Math.max(0,Math.min(r.bottom,l.bottom)-Math.max(r.top,l.top)),g=p*y;return u?g/(r.area+l.area-g):g/Math.min(r.area,l.area)}function $S(r){const l=r.map(S=>S.x),u=r.map(S=>S.y),p=l.reduce((S,T)=>TTSS({score:I,boxIndex:S})).sort((I,S)=>I.score-S.score).map(I=>I.boxIndex);const g=[];for(;y.length>0;){const I=y.pop();g.push(I);const S=y,T=[];for(let C=0;CT[D]<=u)}return g}const Wi=Je(Ze());function di(r,l){return Wi.tidy(()=>{const[u,p,y]=l,g=Wi.fill([...r.shape.slice(0,3),1],u,"float32"),I=Wi.fill([...r.shape.slice(0,3),1],p,"float32"),S=Wi.fill([...r.shape.slice(0,3),1],y,"float32"),T=Wi.concat([g,I,S],3);return Wi.sub(r,T)})}const so=Je(Ze());function BS(r,l=!1){return so.tidy(()=>{const[u,p]=r.shape.slice(1);if(u===p)return r;const y=Math.abs(u-p),g=Math.round(y*(l?.5:1)),I=u>p?2:1,S=A=>{const B=r.shape.slice();return B[I]=A,so.fill(B,0,"float32")},T=S(g),C=y-T.shape[I],D=l&&C?S(C):null,_=[D,r,T].filter(A=>!!A).map(A=>so.cast(A,"float32"));return so.concat(_,I)})}function vJ(r){const l=r.slice();for(let u=l.length-1;u>0;u--){const p=Math.floor(Math.random()*(u+1)),y=l[u];l[u]=l[p],l[p]=y}return l}function yu(r){return 1/(1+Math.exp(-r))}function NJ(r){return Math.log(r/(1-r))}class bu extends _t{constructor(r,l,u,p,y=!1){super({x:r,y:l,width:u,height:p},y)}}const CJ=.5,RJ=.43,OJ=.45;class Gs{constructor(r,l,u=new Qe(0,0)){const{width:p,height:y}=l;this._imgDims=new ms(p,y),this._shift=u,this._positions=r.map(g=>g.mul(new Qe(p,y)).add(u))}get shift(){return new Qe(this._shift.x,this._shift.y)}get imageWidth(){return this._imgDims.width}get imageHeight(){return this._imgDims.height}get positions(){return this._positions}get relativePositions(){return this._positions.map(r=>r.sub(this._shift).div(new Qe(this.imageWidth,this.imageHeight)))}forSize(r,l){return new this.constructor(this.relativePositions,{width:r,height:l})}shiftBy(r,l){return new this.constructor(this.relativePositions,this._imgDims,new Qe(r,l))}shiftByPoint(r){return this.shiftBy(r.x,r.y)}align(r,l={}){if(r){const y=r instanceof Jt?r.box.floor():new _t(r);return this.shiftBy(y.x,y.y).align(null,l)}const{useDlibAlignment:u,minBoxPadding:p}=Object.assign({},{useDlibAlignment:!1,minBoxPadding:.2},l);return u?this.alignDlib():this.alignMinBbox(p)}alignDlib(){const r=this.getRefPointsForAlignment(),[l,u,p]=r,y=D=>p.sub(D).magnitude(),g=(y(l)+y(u))/2,I=Math.floor(g/OJ),S=Xo(r),T=Math.floor(Math.max(0,S.x-CJ*I)),C=Math.floor(Math.max(0,S.y-RJ*I));return new bu(T,C,Math.min(I,this.imageWidth+T),Math.min(I,this.imageHeight+C))}alignMinBbox(r){const l=$S(this.positions);return l.pad(l.width*r,l.height*r)}getRefPointsForAlignment(){throw new Error("getRefPointsForAlignment not implemented by base class")}}class EJ extends Gs{getRefPointsForAlignment(){const r=this.positions;return[r[0],r[1],Xo([r[3],r[4]])]}}class wu extends Gs{getJawOutline(){return this.positions.slice(0,17)}getLeftEyeBrow(){return this.positions.slice(17,22)}getRightEyeBrow(){return this.positions.slice(22,27)}getNose(){return this.positions.slice(27,36)}getLeftEye(){return this.positions.slice(36,42)}getRightEye(){return this.positions.slice(42,48)}getMouth(){return this.positions.slice(48,68)}getRefPointsForAlignment(){return[this.getLeftEye(),this.getRightEye(),this.getMouth()].map(Xo)}}class Ym{constructor(r,l){this._label=r,this._distance=l}get label(){return this._label}get distance(){return this._distance}toString(r=!0){return`${this.label}${r?` (${Ko(this.distance)})`:""}`}}class Hm extends _t{static assertIsValidLabeledBox(r,l){if(_t.assertIsValidBox(r,l),!ui(r.label))throw new Error(`${l} - expected property label (${r.label}) to be a number`)}constructor(r,l){super(r);this._label=l}get label(){return this._label}}class Jo{constructor(r,l){if(!(typeof r=="string"))throw new Error("LabeledFaceDescriptors - constructor expected label to be a string");if(!Array.isArray(l)||l.some(u=>!(u instanceof Float32Array)))throw new Error("LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array");this._label=r,this._descriptors=l}get label(){return this._label}get descriptors(){return this._descriptors}toJSON(){return{label:this.label,descriptors:this.descriptors.map(r=>Array.from(r))}}static fromJSON(r){const l=r.descriptors.map(u=>new Float32Array(u));return new Jo(r.label,l)}}class DJ extends Hm{static assertIsValidPredictedBox(r,l){if(Hm.assertIsValidLabeledBox(r,l),!vc(r.score)||!vc(r.classScore))throw new Error(`${l} - expected properties score (${r.score}) and (${r.classScore}) to be a number between [0, 1]`)}constructor(r,l,u,p){super(r,l);this._score=u,this._classScore=p}get score(){return this._score}get classScore(){return this._classScore}}function $i(r){return r.detection instanceof Jt}function Zo(r,l){const u={detection:l};return Object.assign({},r,u)}function MS(){const r=window.fetch||function(){throw new Error("fetch - missing fetch implementation for browser environment")},l=function(){throw new Error("readFile - filesystem not available for browser environment")};return{Canvas:HTMLCanvasElement,CanvasRenderingContext2D,Image:HTMLImageElement,ImageData,Video:HTMLVideoElement,createCanvasElement:()=>document.createElement("canvas"),createImageElement:()=>document.createElement("img"),fetch:r,readFile:l}}function qm(r){let l="";if(!r)try{r=require("fs")}catch(p){l=p.toString()}const u=r?function(p){return new Promise((y,g)=>{r.readFile(p,function(I,S){return I?g(I):y(S)})})}:function(){throw new Error(`readFile - failed to require fs in nodejs environment with error: ${l}`)};return{readFile:u}}function PS(){const r=global.Canvas||global.HTMLCanvasElement,l=global.Image||global.HTMLImageElement,u=function(){if(r)return new r;throw new Error("createCanvasElement - missing Canvas implementation for nodejs environment")},p=function(){if(l)return new l;throw new Error("createImageElement - missing Image implementation for nodejs environment")},y=global.fetch||function(){throw new Error("fetch - missing fetch implementation for nodejs environment")},g=qm();return{Canvas:r||class{},CanvasRenderingContext2D:global.CanvasRenderingContext2D||class{},Image:l||class{},ImageData:global.ImageData||class{},Video:global.HTMLVideoElement||class{},createCanvasElement:u,createImageElement:p,fetch:y,...g}}function zS(){return typeof window=="object"&&typeof document!="undefined"&&typeof HTMLImageElement!="undefined"&&typeof HTMLCanvasElement!="undefined"&&typeof HTMLVideoElement!="undefined"&&typeof ImageData!="undefined"&&typeof CanvasRenderingContext2D!="undefined"}const VS=Je(t2());let In;function kJ(){if(!In)throw new Error("getEnv - environment is not defined, check isNodejs() and isBrowser()");return In}function GS(r){In=r}function YS(){if(zS())return GS(MS());if(VS.isNodejs())return GS(PS())}function FJ(r){if(In||YS(),!In)throw new Error("monkeyPatch - environment is not defined, check isNodejs() and isBrowser()");const{Canvas:l=In.Canvas,Image:u=In.Image}=r;In.Canvas=l,In.Image=u,In.createCanvasElement=r.createCanvasElement||(()=>new l),In.createImageElement=r.createImageElement||(()=>new u),In.ImageData=r.ImageData||In.ImageData,In.Video=r.Video||In.Video,In.fetch=r.fetch||In.fetch,In.readFile=r.readFile||In.readFile}const St={getEnv:kJ,setEnv:GS,initialize:YS,createBrowserEnv:MS,createFileSystem:qm,createNodejsEnv:PS,monkeyPatch:FJ,isBrowser:zS,isNodejs:VS.isNodejs};YS();function Qo(r){return!St.isNodejs()&&typeof r=="string"?document.getElementById(r):r}function is(r){const{Canvas:l,CanvasRenderingContext2D:u}=St.getEnv();if(r instanceof u)return r;const p=Qo(r);if(!(p instanceof l))throw new Error("resolveContext2d - expected canvas to be of instance of Canvas");const y=p.getContext("2d");if(!y)throw new Error("resolveContext2d - canvas 2d context is null");return y}var Ui;(function(r){r.TOP_LEFT="TOP_LEFT",r.TOP_RIGHT="TOP_RIGHT",r.BOTTOM_LEFT="BOTTOM_LEFT",r.BOTTOM_RIGHT="BOTTOM_RIGHT"})(Ui||(Ui={}));class jm{constructor(r={}){const{anchorPosition:l,backgroundColor:u,fontColor:p,fontSize:y,fontStyle:g,padding:I}=r;this.anchorPosition=l||Ui.TOP_LEFT,this.backgroundColor=u||"rgba(0, 0, 0, 0.5)",this.fontColor=p||"rgba(255, 255, 255, 1)",this.fontSize=y||14,this.fontStyle=g||"Georgia",this.padding=I||4}}class Cc{constructor(r,l,u={}){this.text=typeof r=="string"?[r]:r instanceof Cc?r.text:r,this.anchor=l,this.options=new jm(u)}measureWidth(r){const{padding:l}=this.options;return this.text.map(u=>r.measureText(u).width).reduce((u,p)=>u{const B=S+D.x,ne=S+D.y+(A+1)*g;u.fillText(_,B,ne)})}}class s2{constructor(r={}){const{boxColor:l,lineWidth:u,label:p,drawLabelOptions:y}=r;this.boxColor=l||"rgba(0, 0, 255, 1)",this.lineWidth=u||2,this.label=p;const g={anchorPosition:Ui.BOTTOM_LEFT,backgroundColor:this.boxColor};this.drawLabelOptions=new jm(Object.assign({},g,y))}}class HS{constructor(r,l={}){this.box=new _t(r),this.options=new s2(l)}draw(r){const l=is(r),{boxColor:u,lineWidth:p}=this.options,{x:y,y:g,width:I,height:S}=this.box;l.strokeStyle=u,l.lineWidth=p,l.strokeRect(y,g,I,S);const{label:T}=this.options;T&&new Cc([T],{x:y-p/2,y:g},this.options.drawLabelOptions).draw(r)}}function _J(r,l){const u=Array.isArray(l)?l:[l];u.forEach(p=>{const y=p instanceof Jt?p.score:$i(p)?p.detection.score:void 0,g=p instanceof Jt?p.box:$i(p)?p.detection.box:new _t(p),I=y?`${Ko(y)}`:void 0;new HS(g,{label:I}).draw(r)})}function Lu(r){const{Image:l,Video:u}=St.getEnv();return r instanceof l&&r.complete||r instanceof u&&r.readyState>=3}function qS(r){return new Promise((l,u)=>{if(r instanceof St.getEnv().Canvas||Lu(r))return l(null);function p(g){if(!g.currentTarget)return;g.currentTarget.removeEventListener("load",p),g.currentTarget.removeEventListener("error",y),l(g)}function y(g){if(!g.currentTarget)return;g.currentTarget.removeEventListener("load",p),g.currentTarget.removeEventListener("error",y),u(g)}r.addEventListener("load",p),r.addEventListener("error",y)})}function jS(r){return new Promise((l,u)=>{if(!(r instanceof Blob))return u("bufferToImage - expected buf to be of type: Blob");const p=new FileReader;p.onload=()=>{if(typeof p.result!="string")return u("bufferToImage - expected reader.result to be a string, in onload");const y=St.getEnv().createImageElement();y.onload=()=>l(y),y.onerror=u,y.src=p.result},p.onerror=u,p.readAsDataURL(r)})}function ea(r){const{Image:l,Video:u}=St.getEnv();return r instanceof l?new ms(r.naturalWidth,r.naturalHeight):r instanceof u?new ms(r.videoWidth,r.videoHeight):new ms(r.width,r.height)}function Rc({width:r,height:l}){const{createCanvasElement:u}=St.getEnv(),p=u();return p.width=r,p.height=l,p}function Su(r,l){const{ImageData:u}=St.getEnv();if(!(r instanceof u)&&!Lu(r))throw new Error("createCanvasFromMedia - media has not finished loading yet");const{width:p,height:y}=l||ea(r),g=Rc({width:p,height:y});return r instanceof u?is(g).putImageData(r,0,0):is(g).drawImage(r,0,0,p,y),g}const Km=Je(Ze());async function KS(r,l){const u=l||St.getEnv().createCanvasElement(),[p,y,g]=r.shape.slice(Rs(r)?1:0),I=Km.tidy(()=>r.as3D(p,y,g).toInt());return await Km.browser.toPixels(I,u),I.dispose(),u}function Xm(r){const{Image:l,Canvas:u,Video:p}=St.getEnv();return r instanceof l||r instanceof u||r instanceof p}const WJ=1e-7,$J=1e-4;class i2{time(r){return se("time")}read(r){return se("read")}readSync(r){return se("readSync")}numDataIds(){return se("numDataIds")}disposeData(r){return se("disposeData")}write(r,l,u){return se("write")}move(r,l,u,p){return se("move")}memory(){return se("memory")}floatPrecision(){return se("floatPrecision")}epsilon(){return this.floatPrecision()===32?WJ:$J}batchMatMul(r,l,u,p){return se("batchMatMul")}fusedBatchMatMul({a:r,b:l,transposeA:u,transposeB:p,bias:y,activation:g,preluActivationWeights:I}){return se("fusedBatchMatMul")}slice(r,l,u){return se("slice")}stridedSlice(r,l,u,p){return se("stridedSlice")}unstack(r,l){return se("unstack")}reverse(r,l){return se("reverse")}concat(r,l){return se("concat")}neg(r){return se("neg")}add(r,l){return se("add")}addN(r){return se("addN")}subtract(r,l){return se("subtract")}multiply(r,l){return se("multiply")}realDivide(r,l){return se("realDivide")}floorDiv(r,l){return se("floorDiv")}sum(r,l){return se("sum")}prod(r,l){return se("prod")}unsortedSegmentSum(r,l,u){return se("unsortedSegmentSum")}argMin(r,l){return se("argMin")}argMax(r,l){return se("argMax")}equal(r,l){return se("equal")}notEqual(r,l){return se("notEqual")}less(r,l){return se("less")}lessEqual(r,l){return se("lessEqual")}greater(r,l){return se("greater")}greaterEqual(r,l){return se("greaterEqual")}logicalNot(r){return se("logicalNot")}logicalAnd(r,l){return se("logicalAnd")}logicalOr(r,l){return se("logicalOr")}where(r){return se("where")}select(r,l,u){return se("select")}topk(r,l,u){return se("topk")}min(r,l){return se("min")}minimum(r,l){return se("minimum")}mod(r,l){return se("mod")}max(r,l){return se("max")}maximum(r,l){return se("maximum")}all(r,l){return se("all")}any(r,l){return se("any")}squaredDifference(r,l){return se("squaredDifference")}ceil(r){return se("ceil")}floor(r){return se("floor")}round(r){return se("round")}sign(r){return se("sign")}isNaN(r){return se("isNaN")}isInf(r){return se("isInf")}isFinite(r){return se("isFinite")}pow(r,l){return se("pow")}exp(r){return se("exp")}expm1(r){return se("expm1")}softmax(r,l){return se("softmax")}log(r){return se("log")}log1p(r){return se("log1p")}sqrt(r){return se("sqrt")}rsqrt(r){return se("rsqrt")}square(r){return se("square")}reciprocal(r){return se("reciprocal")}relu(r){return se("relu")}relu6(r){return se("relu6")}prelu(r,l){return se("prelu")}elu(r){return se("elu")}eluDer(r,l){return se("eluDer")}selu(r){return se("selu")}int(r){return se("int")}clip(r,l,u){return se("clip")}abs(r){return se("abs")}complexAbs(r){return se("complexAbs")}sigmoid(r){return se("sigmoid")}softplus(r){return se("softplus")}sin(r){return se("sin")}cos(r){return se("cos")}tan(r){return se("tan")}asin(r){return se("asin")}acos(r){return se("acos")}atan(r){return se("atan")}atan2(r,l){return se("atan2")}sinh(r){return se("sinh")}cosh(r){return se("cosh")}tanh(r){return se("tanh")}asinh(r){return se("asinh")}acosh(r){return se("acosh")}atanh(r){return se("atanh")}erf(r){return se("erf")}step(r,l){return se("step")}fusedConv2d({input:r,filter:l,convInfo:u,bias:p,activation:y,preluActivationWeights:g}){return se("fusedConv2d")}conv2d(r,l,u){return se("conv2d")}conv2dDerInput(r,l,u){return se("conv2dDerInput")}conv2dDerFilter(r,l,u){return se("conv2dDerFilter")}fusedDepthwiseConv2D({input:r,filter:l,convInfo:u,bias:p,activation:y,preluActivationWeights:g}){return se("fusedDepthwiseConv2D")}depthwiseConv2D(r,l,u){return se("depthwiseConv2D")}depthwiseConv2DDerInput(r,l,u){return se("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(r,l,u){return se("depthwiseConv2DDerFilter")}conv3d(r,l,u){return se("conv3d")}conv3dDerInput(r,l,u){return se("conv3dDerInput")}conv3dDerFilter(r,l,u){return se("conv3dDerFilter")}maxPool(r,l){return se("maxPool")}maxPoolBackprop(r,l,u,p){return se("maxPoolBackprop")}avgPool(r,l){return se("avgPool")}avgPoolBackprop(r,l,u){return se("avgPoolBackprop")}avgPool3d(r,l){return se("avgPool3d")}avgPool3dBackprop(r,l,u){return se("avgPool3dBackprop")}maxPool3d(r,l){return se("maxPool3d")}maxPool3dBackprop(r,l,u,p){return se("maxPool3dBackprop")}reshape(r,l){return se("reshape")}cast(r,l){return se("cast")}tile(r,l){return se("tile")}pad(r,l,u){return se("pad")}transpose(r,l){return se("transpose")}gather(r,l,u){return se("gather")}gatherND(r,l){return se("gatherND")}scatterND(r,l,u){return se("scatterND")}batchToSpaceND(r,l,u){return se("batchToSpaceND")}spaceToBatchND(r,l,u){return se("spaceToBatchND")}resizeBilinear(r,l,u,p){return se("resizeBilinear")}resizeBilinearBackprop(r,l,u){return se("resizeBilinearBackprop")}resizeNearestNeighbor(r,l,u,p){return se("resizeNearestNeighbor")}resizeNearestNeighborBackprop(r,l,u){return se("resizeNearestNeighborBackprop")}batchNorm(r,l,u,p,y,g){return se("batchNorm")}localResponseNormalization4D(r,l,u,p,y){return se("localResponseNormalization4D")}LRNGrad(r,l,u,p,y,g,I){return se("LRNGrad")}multinomial(r,l,u,p){return se("multinomial")}oneHot(r,l,u,p){return se("oneHot")}cumsum(r,l,u,p){return se("cumsum")}nonMaxSuppression(r,l,u,p,y){return se("nonMaxSuppression")}fft(r){return se("fft")}ifft(r){return se("ifft")}complex(r,l){return se("complex")}real(r){return se("real")}imag(r){return se("imag")}cropAndResize(r,l,u,p,y,g){return se("cropAndResize")}depthToSpace(r,l,u){return se("depthToSpace")}split(r,l,u){return se("split")}sparseToDense(r,l,u,p){return se("sparseToDense")}diag(r){return se("diag")}fill(r,l,u){return se("fill")}onesLike(r){return se("onesLike")}zerosLike(r){return se("zerosLike")}linspace(r,l,u){return se("linspace")}dispose(){return se("dispose")}}function se(r){throw new Error(`'${r}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}function J(r,l){if(!r)throw new Error(typeof l=="string"?l:l())}function tn(r,l,u=""){J(Iu(r,l),()=>u+` Shapes ${r} and ${l} must match`)}function Oc(r){J(r!=null,()=>"The input to the tensor constructor must be a non-null value.")}function ta(r,l=[],u=!1){if(l==null&&(l=[]),Array.isArray(r)||Os(r)&&!u)for(let p=0;p=0)u*=r[g];else if(r[g]===-1){if(p!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${p} and dim ${g}`);p=g}else if(r[g]<0)throw Error(`Shapes can not be < 0. Found ${r[g]} at dim ${g}`);if(p===-1){if(l>0&&l!==u)throw Error(`Size(${l}) must match the product of shape ${r}`);return r}if(u===0)throw Error(`Cannot infer the missing size in [${r}] when there are 0 elements`);if(l%u!==0)throw Error(`The implicit shape can't be a fractional number. Got ${l} / ${u}`);const y=r.slice();return y[p]=l/u,y}function ft(r,l){const u=l.length;return r=r==null?l.map((p,y)=>y):[].concat(r),J(r.every(p=>p>=-u&&p`All values in axis param must be in range [-${u}, ${u}) but got axis ${r}`),J(r.every(p=>nn(p)),()=>`All values in axis param must be integers but got axis ${r}`),r.map(p=>p<0?u+p:p)}function o2(r,l){const u=[],p=[],y=l!=null&&Array.isArray(l)&&l.length===0,g=l==null||y?null:ft(l,r).sort();let I=0;for(let S=0;SS)&&r[S]===1&&(u.push(r[S]),p.push(S)),g[I]<=S&&I++}r[S]!==1&&(u.push(r[S]),p.push(S))}return{newShape:u,keptDims:p}}function a2(r,l){let u=null;if(r==null||r==="float32")u=new Float32Array(l);else if(r==="int32")u=new Int32Array(l);else if(r==="bool")u=new Uint8Array(l);else if(r==="string")u=new Array(l);else throw new Error(`Unknown data type ${r}`);return u}function c2(r,l){for(let u=0;ul+=u.length),l}function xu(r){return typeof r=="string"||r instanceof String}function UJ(r){return typeof r=="boolean"}function BJ(r){return typeof r=="number"}function Tu(r){return Array.isArray(r)?Tu(r[0]):r instanceof Float32Array?"float32":r instanceof Int32Array||r instanceof Uint8Array?"int32":BJ(r)?"float32":xu(r)?"string":UJ(r)?"bool":"float32"}function XS(r){return!!(r&&r.constructor&&r.call&&r.apply)}function Au(r){const l=r.length;if(l<2)return[];const u=new Array(l-1);u[l-2]=r[l-1];for(let p=l-3;p>=0;--p)u[p]=u[p+1]*r[p+1];return u}function d2(r,l,u){const p=new Array;if(l.length===1){const y=l[0];for(let g=0;gS*T);for(let S=0;Sp*y);if(u===0)return[];if(u!==l.length)throw new Error(`[${r}] does not match the input size ${l.length}.`);return d2(0,r,l)}function Jm(r,l){const u=na(r,l);for(let p=0;p{J(Number.isInteger(l)&&l>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${r}].`)})}function Qm(r){return r&&r.then&&typeof r.then=="function"}const p2="tfjsflags";class m2{constructor(r){this.global=r,this.flags={},this.flagRegistry={},this.urlFlags={},this.populateURLFlags()}setPlatform(r,l){this.platform!=null&&console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${l}.`),this.platformName=r,this.platform=l}registerFlag(r,l,u){if(this.flagRegistry[r]={evaluationFn:l,setHook:u},this.urlFlags[r]!=null){const p=this.urlFlags[r];console.warn(`Setting feature override from URL ${r}: ${p}.`),this.set(r,p)}}async getAsync(r){return r in this.flags?this.flags[r]:(this.flags[r]=await this.evaluateFlag(r),this.flags[r])}get(r){if(r in this.flags)return this.flags[r];const l=this.evaluateFlag(r);if(Qm(l))throw new Error(`Flag ${r} cannot be synchronously evaluated. Please use getAsync() instead.`);return this.flags[r]=l,this.flags[r]}getNumber(r){return this.get(r)}getBool(r){return this.get(r)}getFlags(){return this.flags}get features(){return this.flags}set(r,l){if(this.flagRegistry[r]==null)throw new Error(`Cannot set flag ${r} as it has not been registered.`);this.flags[r]=l,this.flagRegistry[r].setHook!=null&&this.flagRegistry[r].setHook(l)}evaluateFlag(r){if(this.flagRegistry[r]==null)throw new Error(`Cannot evaluate flag '${r}': no evaluation function found.`);return this.flagRegistry[r].evaluationFn()}setFlags(r){this.flags=Object.assign({},r)}reset(){this.flags={},this.urlFlags={},this.populateURLFlags()}populateURLFlags(){if(typeof this.global=="undefined"||typeof this.global.location=="undefined"||typeof this.global.location.search=="undefined")return;const r=MJ(this.global.location.search);if(p2 in r){const l=r[p2].split(",");l.forEach(u=>{const[p,y]=u.split(":");this.urlFlags[p]=PJ(p,y)})}}}function MJ(r){const l={};return r.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(u,...p)=>(zJ(l,p[0],p[1]),p.join("="))),l}function zJ(r,l,u){r[decodeURIComponent(l)]=decodeURIComponent(u||"")}function PJ(r,l){if(l=l.toLowerCase(),l==="true"||l==="false")return l==="true";if(`${+l}`===l)return+l;throw new Error(`Could not parse value flag value ${l} for flag ${r}.`)}function Es(){return f2}let f2=null;function g2(r){f2=r}let ZS;function QS(){if(ZS==null){let r;if(typeof window!="undefined")r=window;else if(typeof global!="undefined")r=global;else if(typeof process!="undefined")r=process;else if(typeof self!="undefined")r=self;else throw new Error("Could not find a global object");ZS=r}return ZS}function VJ(){const r=QS();return r._tfGlobals==null&&(r._tfGlobals=new Map),r._tfGlobals}function eI(r,l){const u=VJ();if(u.has(r))return u.get(r);{const p=l();return u.set(r,p),u.get(r)}}const ef="Abs",y2="Acos",b2="Acosh",Dc="Add",w2="AddN",L2="ArgMax",S2="ArgMin",I2="Asin",x2="Asinh",T2="Atan",A2="Atanh",v2="Atan2",N2="AvgPool",C2="AvgPoolBackprop",R2="AvgPool3D",O2="AvgPool3DBackprop",tf="BatchMatMul",nf="BatchToSpaceND",sf="BroadcastTo",kc="Cast",E2="Ceil",D2="ClipByValue",k2="Complex",rf="Concat",of="Conv2D",F2="Conv2DBackpropFilter",af="Conv2DBackpropInput",_2="Conv3D",W2="Conv3DBackpropFilterV2",$2="Conv3DBackpropInputV2",cf="Cos",lf="Cosh",hf="Cumsum",U2="CropAndResize",B2="DepthwiseConv2dNative",M2="DepthwiseConv2dNativeBackpropFilter",P2="DepthwiseConv2dNativeBackpropInput",z2="Dilation2D",V2="Dilation2DBackpropInput",G2="Dilation2DBackpropFilter",uf="Div",Y2="Elu",H2="EluGrad",q2="Erf",j2="Equal",df="Exp",K2="Expm1",X2="FFT",J2="Fill",Z2="FlipLeftRight",pf="Floor",mf="FloorDiv",Q2="FusedBatchNorm",ff="GatherV2",eR="Greater",gf="GreaterEqual",yf="Identity",tR="IFFT",nR="Imag",sR="IsFinite",iR="IsInf",rR="IsNan",oR="Less",aR="LessEqual",bf="Log",wf="Log1p",cR="LogicalAnd",lR="LogicalNot",hR="LogSoftmax",uR="LRN",dR="LRNBackprop",Lf="Max",Sf="Maximum",pR="MaxPool",mR="MaxPoolBackprop",fR="MaxPool3D",gR="MaxPool3DBackprop",yR="Mean",If="Min",xf="Minimum",bR="MirrorPad",wR="Mod",Tf="Multiply",Af="Negate",LR="NotEqual",SR="NonMaxSuppressionV3",IR="NonMaxSuppressionV4",xR="NonMaxSuppressionV5",TR="OnesLike",AR="OneHot",vf="PadV2",Nf="Pow",vR="Prelu",NR="Range",CR="Real",RR="Reciprocal",Cf="Relu",Rf="Reshape",Of="ResizeNearestNeighbor",OR="ResizeNearestNeighborGrad",Ef="ResizeBilinear",ER="ResizeBilinearGrad",DR="Relu6",Df="Reverse",kR="Round",kf="Rsqrt",Ff="SelectV2",FR="Selu",_f="Slice",Wf="Sin",$f="Sinh",_R="Sign",Uf="Sigmoid",WR="Softplus",Bf="Sqrt",Mf="Sum",Pf="SpaceToBatchND",zf="SplitV",$R="Softmax",Vf="SquaredDifference",UR="Square",Gf="Sub",BR="Tan",MR="Tanh",Yf="Tile",Hf="Transpose",qf="Unpack",jf="UnsortedSegmentSum",Kf="ZerosLike",Xf="Step",tI="FromPixels",PR="RotateWithOffset";const zR=eI("kernelRegistry",()=>new Map),nI=eI("gradRegistry",()=>new Map);function Jf(r,l){const u=GJ(r,l);return zR.get(u)}function sI(r){return nI.get(r)}function iI(r){const l=zR.entries(),u=[];for(;;){const{done:p,value:y}=l.next();if(p)break;const[g,I]=y,[S]=g.split("_");S===r&&u.push(I)}return u}function VR(r){const{kernelName:l}=r;nI.has(l)&&(Es().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${l}'`)),nI.set(l,r)}function GJ(r,l){return`${l}_${r}`}function YJ(r,l){return r instanceof Float32Array&&l==="float32"||r instanceof Int32Array&&l==="int32"||r instanceof Uint8Array&&l==="bool"}function Zf(r,l){if(l==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(r)&&(r=ta(r)),Es().getBool("DEBUG")&&c2(r,l),YJ(r,l))return r;if(l==null||l==="float32"||l==="complex64")return new Float32Array(r);if(l==="int32")return new Int32Array(r);if(l==="bool"){const u=new Uint8Array(r.length);for(let p=0;p{p=u()},g=this.backendTimer.time(y);for(let S=0;S{HJ(C,T.dtype,r)})}const I={kernelName:r,outputs:p,inputs:l,timeMs:g.then(S=>S.kernelMs),extraInfo:g.then(S=>S.getExtraProfileInfo!=null?S.getExtraProfileInfo():"")};return I}logKernelProfile(r){const{kernelName:l,outputs:u,timeMs:p,inputs:y,extraInfo:g}=r;u.forEach(I=>{Promise.all([I.data(),p,g]).then(S=>{this.logger.logKernelProfile(l,I,S[0],S[1],y,S[2])})})}}function HJ(r,l,u){if(l!=="float32")return!1;for(let p=0;p0?ne:""} `}}console.log(`%c${S} %c${I} %c${T}D ${D} %c${C} %c${_} %c${g}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function HR(r,l,u){const p={},y={};for(let T=0;Tp[te.id]=!0),B=!0,y[C.id]=!0;break}if(B)break}}const g={};g[u.id]=!0;const I={};for(let T=r.length-1;T>=0;T--){const C=r[T],D=C.inputs;for(let _=0;_=0;y--){const g=l[y],I=[];if(g.outputs.forEach(T=>{const C=r[T.id];C!=null?I.push(C):I.push(null)}),g.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${g.kernelName}.`);const S=g.gradient(I);for(const T in g.inputs){if(!(T in S))throw new Error(`Cannot backprop through input ${T}. Available gradients found: ${Object.keys(S)}.`);const C=u(()=>S[T]());if(C.dtype!=="float32")throw new Error(`Error in gradient for op ${g.kernelName}. The gradient of input ${T} must have 'float32' dtype, but has '${C.dtype}'`);const D=g.inputs[T];if(!Iu(C.shape,D.shape))throw new Error(`Error in gradient for op ${g.kernelName}. The gradient of input '${T}' has shape '${C.shape}', which does not match the shape of the input '${D.shape}'`);if(r[D.id]==null)r[D.id]=C;else{const _=r[D.id];r[D.id]=p(_,C),_.dispose()}}}}const jR=20,vu=3,aI=7;function KR(r,l,u,p){const y=Au(l),g=jJ(r,l,u,y),I=l.length,S=Qf(r,l,u,y,g),T=["Tensor"];return p&&(T.push(` dtype: ${u}`),T.push(` rank: ${I}`),T.push(` shape: [${l}]`),T.push(" values:")),T.push(S.map(C=>" "+C).join(` +`)),T.join(` +`)}function jJ(r,l,u,p){const y=Zt(l),g=p[p.length-1],I=new Array(g).fill(0),S=l.length,T=u==="complex64"?Cu(r):r;if(S>1)for(let C=0;CjR){const P=vu*I;let ge=Array.from(r.slice(0,P)),ae=Array.from(r.slice((S-vu)*I,S*I));return u==="complex64"&&(ge=Cu(ge),ae=Cu(ae)),["["+ge.map((Le,ve)=>Nu(Le,y[ve],u)).join(", ")+", ..., "+ae.map((Le,ve)=>Nu(Le,y[S-vu+ve],u)).join(", ")+"]"]}const te=u==="complex64"?Cu(r):Array.from(r);return["["+te.map((P,ge)=>Nu(P,y[ge],u)).join(", ")+"]"]}const C=l.slice(1),D=p.slice(1),_=p[0]*I,A=[];if(S>jR){for(let te=0;te`Length of values '${p}' does not match the size inferred by the shape '${this.size}'.`)}if(l==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=u||a2(l,this.size),this.strides=Au(r)}set(r,...l){l.length===0&&(l=[0]),J(l.length===this.rank,()=>`The number of provided coordinates (${l.length}) must match the rank (${this.rank})`);const u=this.locToIndex(l);this.values[u]=r}get(...r){r.length===0&&(r=[0]);let l=0;for(const p of r){if(p<0||p>=this.shape[l]){const y=`Requested out of range element at ${r}. Buffer shape=${this.shape}`;throw new Error(y)}l++}let u=r[r.length-1];for(let p=0;poI(u))}catch(u){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return r}dataSync(){this.throwIfDisposed();const r=Bi().readSync(this.dataId);if(this.dtype==="string")try{return r.map(l=>oI(l))}catch(l){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return r}async bytes(){this.throwIfDisposed();const r=await Bi().read(this.dataId);return this.dtype==="string"?r:new Uint8Array(r.buffer)}dispose(){if(this.isDisposed)return;Bi().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(r=!1){return Fc.print(this,r)}clone(){return this.throwIfDisposed(),Fc.clone(this)}toString(r=!1){const l=this.dataSync();return KR(l,this.shape,this.dtype,r)}cast(r){return this.throwIfDisposed(),Fc.cast(this,r)}variable(r=!0,l,u){return this.throwIfDisposed(),Bi().makeVariable(this,r,l,u)}}Object.defineProperty(En,Symbol.hasInstance,{value:r=>!!r&&r.data!=null&&r.dataSync!=null&&r.throwIfDisposed!=null});class eg extends En{constructor(r,l,u,p){super(r.shape,r.dtype,r.dataId,p);this.trainable=l,this.name=u}assign(r){if(r.dtype!==this.dtype)throw new Error(`dtype of the new value (${r.dtype}) and previous value (${this.dtype}) must match`);if(!Iu(r.shape,this.shape))throw new Error(`shape of the new value (${r.shape}) and previous value (${this.shape}) must match`);Bi().disposeTensor(this),this.dataId=r.dataId,Bi().incRef(this,null)}dispose(){Bi().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty(eg,Symbol.hasInstance,{value:r=>r instanceof En&&r.assign!=null&&r.assign instanceof Function});var tO;(function(r){r.R0="R0",r.R1="R1",r.R2="R2",r.R3="R3",r.R4="R4",r.R5="R5",r.R6="R6"})(tO||(tO={}));var cI;(function(r){r.float32="float32",r.int32="int32",r.bool="int32",r.complex64="complex64"})(cI||(cI={}));var lI;(function(r){r.float32="float32",r.int32="int32",r.bool="bool",r.complex64="complex64"})(lI||(lI={}));var hI;(function(r){r.float32="float32",r.int32="float32",r.bool="float32",r.complex64="complex64"})(hI||(hI={}));var uI;(function(r){r.float32="complex64",r.int32="complex64",r.bool="complex64",r.complex64="complex64"})(uI||(uI={}));const XJ={float32:hI,int32:cI,bool:lI,complex64:uI};function nO(r,l){if(r==="string"||l==="string"){if(r==="string"&&l==="string")return"string";throw new Error(`Can not upcast ${r} with ${l}`)}return XJ[r][l]}function Lt(r,l){if(r.dtype===l.dtype)return[r,l];const u=nO(r.dtype,l.dtype);return[r.cast(u),l.cast(u)]}function tg(r){const l=[],u=new Set;return sO(r,l,u),l}function sO(r,l,u){if(r==null)return;if(r instanceof En){l.push(r);return}if(!JJ(r))return;const p=r;for(const y in p){const g=p[y];u.has(g)||(u.add(g),sO(g,l,u))}}function JJ(r){return Array.isArray(r)||typeof r=="object"}class iO{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(const r in this.registeredVariables)this.registeredVariables[r].dispose()}}class Ru{constructor(r){this.ENV=r,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new iO}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;const r=this.getSortedBackends();for(let l=0;l{l.setupFunc!=null&&l.setupFunc(this.backendInstance)})}disposeRegisteredKernels(r){const l=iI(r);l.forEach(u=>{u.disposeFunc!=null&&u.disposeFunc(this.registry[r])})}initializeBackend(r){const l=this.registryFactory[r];if(l==null)throw new Error(`Cannot initialize backend ${r}, no registration found.`);try{const u=l.factory();if(u&&!(u instanceof i2)&&typeof u.then=="function"){const p=++this.pendingBackendInitId,y=u.then(g=>p(pthis.registryFactory[l].priority-this.registryFactory[r].priority)}initializeBackendsAndReturnBest(){const r=this.getSortedBackends();for(let l=0;lthis.startScope(u),()=>this.endScope(p),()=>(p=l(),p instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),p))}scopedRun(r,l,u){r();try{const p=u();return l(),p}catch(p){throw l(),p}}nextTensorId(){return Ru.nextTensorId++}nextVariableId(){return Ru.nextVariableId++}clone(r){const l=this.makeTensorFromDataId(r.dataId,r.shape,r.dtype),u={x:r},p=g=>({x:()=>{const I="float32",S={x:g},T={dtype:I};return H.runKernelFunc(C=>C.cast(g,I),S,null,kc,T)}}),y=[];return this.addTapeNode(this.state.activeScope.name,u,[l],p,y,{}),l}runKernel(r,l,u,p,y){const g=null,I=null;return this.runKernelFunc(g,l,I,r,u,p,y)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(r,l,u){const p=this.backend.numDataIds();let y=0;u.forEach(S=>{y+=S.dtype==="complex64"?3:1});const g=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],I=p-l-y-g;if(I>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${I} data ids) after running '${r}'`)}runKernelFunc(r,l,u,p,y,g,I){let S,T=[];const C=this.isTapeOn();p==null&&(p=this.state.activeScope!=null?this.state.activeScope.name:"");const D=this.state.numBytes,_=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let A;const B=Jf(p,this.backendName);let ne;if(B!=null)A=()=>{const P=this.backend.numDataIds();ne=B.kernelFunc({inputs:l,attrs:y,backend:this.backend});const ge=Array.isArray(ne)?ne:[ne];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(p,P,ge);const ae=ge.map(({dataId:Le,shape:ve,dtype:Ve})=>this.makeTensorFromDataId(Le,ve,Ve));if(C){let Le=this.getTensorsForGradient(p,l,ae);if(Le==null){I==null&&(I=[]);const ve=ae.filter((Ve,at)=>I[at]);Le=(g||[]).slice().concat(ve)}T=this.saveTensorsForBackwardMode(Le)}return ae};else{const P=ge=>{if(!C)return;T=ge.map(ae=>this.keep(this.clone(ae)))};A=()=>{const ge=this.backend.numDataIds();ne=this.tidy(()=>r(this.backend,P));const ae=Array.isArray(ne)?ne:[ne];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(p,ge,ae),ae}}let te;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?S=A():(te=this.profiler.profileKernel(p,l,()=>A()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(te),S=te.outputs)}),C&&this.addTapeNode(p,l,S,u,T,y),this.state.profiling&&this.state.activeProfile.kernels.push({name:p,bytesAdded:this.state.numBytes-D,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-_,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(l).map(P=>l[P]!=null?l[P].shape:null),outputShapes:S.map(P=>P.shape),kernelTimeMs:te.timeMs,extraInfo:te.extraInfo}),Array.isArray(ne)?S:S[0]}saveTensorsForBackwardMode(r){const l=r.map(u=>this.keep(this.clone(u)));return l}getTensorsForGradient(r,l,u){const p=sI(r);if(p!=null){const y=p.inputsToSave||[],g=p.outputsToSave||[];let I;p.saveAllInputs?(J(Array.isArray(l),()=>"saveAllInputs is true, expected inputs to be an array."),I=Object.keys(l).map(T=>l[T])):I=y.map(T=>l[T]);const S=u.filter((T,C)=>g[C]);return I.concat(S)}return null}makeTensor(r,l,u,p){if(r==null)throw new Error("Values passed to engine.makeTensor() are null");u=u||"float32",p=p||this.backend;let y=r;u==="string"&&xu(r[0])&&(y=r.map(S=>GR(S)));const g=p.write(y,l,u),I=new En(l,u,g,this.nextTensorId());if(this.incRef(I,p),u==="string"){const S=this.state.tensorInfo.get(g),T=u2(y);this.state.numBytes+=T-S.bytes,S.bytes=T}return I}makeTensorFromDataId(r,l,u,p){u=u||"float32";const y=new En(l,u,r,this.nextTensorId());return this.incRef(y,p),y}makeVariable(r,l=!0,u,p){u=u||this.nextVariableId().toString(),p!=null&&p!==r.dtype&&(r=r.cast(p));const y=new eg(r,l,u,this.nextTensorId());if(this.state.registeredVariables[y.name]!=null)throw new Error(`Variable with name ${y.name} was already registered`);return this.state.registeredVariables[y.name]=y,this.incRef(y,this.backend),y}incRef(r,l){const u=this.state.tensorInfo.has(r.dataId)?this.state.tensorInfo.get(r.dataId).refCount:0;if(this.state.numTensors++,r.dtype==="string"&&this.state.numStringTensors++,u===0){this.state.numDataBuffers++;let p=0;r.dtype!=="complex64"&&r.dtype!=="string"&&(p=r.size*h2(r.dtype)),this.state.tensorInfo.set(r.dataId,{backend:l||this.backend,dtype:r.dtype,shape:r.shape,bytes:p,refCount:0}),this.state.numBytes+=p}this.state.tensorInfo.get(r.dataId).refCount++,r instanceof eg||this.track(r)}disposeTensor(r){if(!this.state.tensorInfo.has(r.dataId))return;this.state.numTensors--,r.dtype==="string"&&this.state.numStringTensors--;const l=this.state.tensorInfo.get(r.dataId),u=l.refCount;u<=1?(r.dtype!=="complex64"&&(this.state.numBytes-=l.bytes),this.state.numDataBuffers--,l.backend.disposeData(r.dataId),this.state.tensorInfo.delete(r.dataId)):this.state.tensorInfo.get(r.dataId).refCount--}disposeVariables(){for(const r in this.state.registeredVariables){const l=this.state.registeredVariables[r];this.disposeVariable(l)}}disposeVariable(r){this.disposeTensor(r),this.state.registeredVariables[r.name]!=null&&delete this.state.registeredVariables[r.name]}memory(){const r=this.backend.memory();return r.numTensors=this.state.numTensors,r.numDataBuffers=this.state.numDataBuffers,r.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(r.unreliable=!0,r.reasons==null&&(r.reasons=[]),r.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),r}async profile(r){this.state.profiling=!0;const l=this.state.numBytes,u=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await r(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(p=>p.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-l,this.state.activeProfile.newTensors=this.state.numTensors-u;for(const p of this.state.activeProfile.kernels)p.kernelTimeMs=await p.kernelTimeMs,p.extraInfo=await p.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(r,l,u,p,y,g){const I={id:this.state.nextTapeNodeId++,kernelName:r,inputs:l,outputs:u,saved:y},S=sI(r);S!=null&&(p=S.gradFunc),p!=null&&(I.gradient=T=>(T=T.map((C,D)=>{if(C==null){const _=u[D],A=na(_.size,_.dtype);return this.makeTensor(A,_.shape,_.dtype)}return C}),p(T.length>1?T:T[0],y,g))),this.state.activeTape.push(I)}keep(r){return r.kept=!0,r}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(r){const l={track:[],name:"unnamed scope",id:this.state.nextScopeId++};r&&(l.name=r),this.state.scopeStack.push(l),this.state.activeScope=l}endScope(r){const l=tg(r),u=new Set(l.map(y=>y.id));for(let y=0;y{!y.kept&&y.scopeId===p.id&&this.track(y)})}gradients(r,l,u,p=!1){if(J(l.length>0,()=>"gradients() received an empty list of xs."),u!=null&&u.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${u.dtype}'`);const y=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",r));J(y instanceof En,()=>"The result y returned by f() must be a tensor.");const g=HR(this.state.activeTape,l,y);if(!p&&g.length===0&&l.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{const I={};I[y.id]=u==null?ZJ(y.shape):u,qR(I,g,T=>this.tidy(T),QJ);const S=l.map(T=>I[T.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(T=>{for(const C of T.saved)C.dispose()}),this.state.activeTape=null),{value:y,grads:S}})}customGrad(r){return J(XS(r),()=>"The f passed in customGrad(f) must be a function."),(...l)=>{J(l.every(y=>y instanceof En),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let u;const p={};return l.forEach((y,g)=>{p[g]=y}),this.runKernelFunc((y,g)=>(u=r(...l,g),J(u.value instanceof En,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),J(XS(u.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),u.value),p,(y,g)=>{const I=u.gradFunc(y,g),S=Array.isArray(I)?I:[I];J(S.length===l.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),J(S.every(C=>C instanceof En),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");const T={};return S.forEach((C,D)=>{T[D]=()=>C}),T})}}readSync(r){const l=this.state.tensorInfo.get(r);return l.backend.readSync(r)}read(r){const l=this.state.tensorInfo.get(r);return l.backend.read(r)}async time(r){const l=rI(),u=await this.backend.time(r);return u.wallMs=rI()-l,u}track(r){return this.state.activeScope!=null&&(r.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(r)),r}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new iO;for(const r in this.registry)this.disposeRegisteredKernels(r),this.registry[r].dispose(),delete this.registry[r];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}Ru.nextTensorId=0;Ru.nextVariableId=0;function ZJ(r){const l=Jm(Zt(r),"float32");return H.makeTensor(l,r,"float32")}function dI(){const r=QS();if(r._tfengine==null){const l=new m2(r);r._tfengine=new Ru(l)}return g2(r._tfengine.ENV),ZR(()=>r._tfengine),r._tfengine}const H=dI();function QJ(r,l){const u={a:r,b:l};return H.runKernelFunc((p,y)=>{const g=p.add(r,l);return y([r,l]),g},u,null,Dc)}function rO(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}const yr=Es();yr.registerFlag("DEBUG",()=>!1,r=>{r&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")});yr.registerFlag("IS_BROWSER",()=>rO());yr.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined");yr.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor));yr.registerFlag("PROD",()=>!1);yr.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>yr.getBool("DEBUG"));yr.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0);yr.registerFlag("IS_TEST",()=>!1);function br(r,l){let u=r;if(Os(r))return l==="string"?[]:[r.length];if(!Array.isArray(r))return[];const p=[];for(;Array.isArray(u)||Os(u)&&l!=="string";)p.push(u.length),u=u[0];return Array.isArray(r)&&Es().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&&oO(r,p,[]),p}function oO(r,l,u){if(u=u||[],!Array.isArray(r)&&!Os(r)){J(l.length===0,()=>`Element arr[${u.join("][")}] is a primitive, but should be an array/TypedArray of ${l[0]} elements`);return}J(l.length>0,()=>`Element arr[${u.join("][")}] should be a primitive, but is an array of ${r.length} elements`),J(r.length===l[0],()=>`Element arr[${u.join("][")}] should have ${l[0]} elements, but has ${r.length} elements`);const p=l.slice(1);for(let y=0;y=0&&(y=p),aO(p,y,l,u),r==null||!Os(r)&&!Array.isArray(r)&&typeof r!="number"&&typeof r!="boolean"&&typeof r!="string"){const T=r==null?"null":r.constructor.name;throw new Error(`Argument '${l}' passed to '${u}' must be a Tensor or TensorLike, but got '${T}'`)}const g=br(r,y);!Os(r)&&!Array.isArray(r)&&(r=[r]);const I=!0,S=y!=="string"?Zf(r,y):ta(r,[],I);return H.makeTensor(S,g,y)}function ng(r,l,u,p="numeric"){if(!Array.isArray(r))throw new Error(`Argument ${l} passed to ${u} must be a \`Tensor[]\` or \`TensorLike[]\``);const y=r;return y.map((g,I)=>M(g,`${l}[${I}]`,u),p)}const cO="__op";function V(r){const l=Object.keys(r);if(l.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${l.length} keys.`);let u=l[0];const p=r[u];u.endsWith("_")&&(u=u.substring(0,u.length-1)),u=u+cO;const y=(...g)=>{H.startScope(u);try{const I=p(...g);return Qm(I)&&console.error("Cannot return a Promise inside of tidy."),H.endScope(I),I}catch(I){throw H.endScope(null),I}};return Object.defineProperty(y,"name",{value:u,configurable:!0}),y}function e9(r,l){const u=M(r,"real","complex"),p=M(l,"imag","complex");tn(u.shape,p.shape,`real and imag shapes, ${u.shape} and ${p.shape}, must match in call to tf.complex().`);const y=I=>I.complex(u,p),g={real:u,imag:p};return H.runKernelFunc(y,g,null,k2)}const Mi=V({complex_:e9});function Pi(r,l,u,p){if(p==null&&(p=Tu(r)),p==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!Os(r)&&!Array.isArray(r)&&typeof r!="number"&&typeof r!="boolean"&&typeof r!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(l!=null){Zm(l);const y=Zt(l),g=Zt(u);J(y===g,()=>`Based on the provided shape, [${l}], the tensor should have ${y} values but has ${g}`);for(let I=0;I`Error creating a new Tensor. Inferred shape (${u}) does not match the provided shape (${l}). `)}}return!Os(r)&&!Array.isArray(r)&&(r=[r]),l=l||u,r=p!=="string"?Zf(r,p):ta(r,[],!0),H.makeTensor(r,l,p)}function pI(r,l,u){const p=br(r,u);return Pi(r,l,p,u)}function Ou(r,l="float32",u){return l=l||"float32",Zm(r),new JR(r,l,u)}function t9(r,l){const u=M(r,"x","cast");if(!l2(l))throw new Error(`Failed to cast to unknown dtype ${l}`);if(l==="string"&&u.dtype!=="string"||l!=="string"&&u.dtype==="string")throw new Error("Only strings can be casted to strings");const p={x:u},y={dtype:l};return H.runKernelFunc(g=>g.cast(u,l),p,null,kc,y)}const Ie=V({cast_:t9});function n9(r){const l=M(r,"x","clone",null),u=()=>H.makeTensorFromDataId(l.dataId,l.shape,l.dtype),p={x:l};return H.runKernelFunc(u,p,null,yf)}const pi=V({clone_:n9});function mI(r,l=!1){console.log(r.toString(l))}dI();const s9={buffer:Ou,cast:Ie,clone:pi,print:mI};QR(s9);function i9(r,l){const u=M(r,"x","reshape",null),p={x:u},y={shape:l},g=(I,S)=>(l=r2(l,u.size),J(u.size===Zt(l),()=>"new shape and old shape must have the same number of elements."),S([u]),I.reshape(u,l));return H.runKernelFunc(g,p,null,Rf,y)}const re=V({reshape_:i9});function r9(r,l,u=!1,p=!1){let y=M(r,"a","matMul"),g=M(l,"b","matMul");[y,g]=Lt(y,g);const I=(C,D)=>{D([y,g]);const _=u?y.shape[y.rank-2]:y.shape[y.rank-1],A=p?g.shape[g.rank-1]:g.shape[g.rank-2],B=u?y.shape[y.rank-1]:y.shape[y.rank-2],ne=p?g.shape[g.rank-2]:g.shape[g.rank-1],te=y.shape.slice(0,-2),P=g.shape.slice(0,-2),ge=Zt(te),ae=Zt(P),Le=ge===ae||ge===1||ae===1;J(y.rank>=2&&g.rank>=2&&Le,()=>`Error in matMul: the input batch dimensions must either be the same or at least one input batch dimension must be 1. Got input batch dimensions of (${te}) and (${P}).`),J(_===A,()=>`Error in matMul: inner shapes (${_}) and (${A}) of Tensors with shapes ${y.shape} and ${g.shape} and transposeA=${u} and transposeB=${p} must match.`);const ve=ge>ae?te:P,Ve=ve.concat([B,ne]),at=u?re(y,[ge,_,B]):re(y,[ge,B,_]),pt=p?re(g,[ae,ne,A]):re(g,[ae,A,ne]),$t=C.batchMatMul(at,pt,u,p);return re($t,Ve)},S={a:y,b:g},T={transposeA:u,transposeB:p};return H.runKernelFunc(I,S,null,tf,T)}const yn=V({matMul_:r9});function o9(r,l){const u=M(r,"x","transpose");if(l==null&&(l=u.shape.map((g,I)=>I).reverse()),J(u.rank===l.length,()=>`Error in transpose: rank of input ${u.rank} must match length of perm ${l}.`),l.forEach(g=>{J(g>=0&&g`All entries in 'perm' must be between 0 and ${u.rank-1} but got ${l}`)}),u.rank<=1)return u.clone();const p={x:u},y={perm:l};return H.runKernelFunc(g=>g.transpose(u,l),p,null,Hf,y)}const xn=V({transpose_:o9});function fI(r,l,u){if(Oc(r),l!=null&&l.length!==3)throw new Error("tensor3d() requires shape to have three numbers");const p=br(r,u);if(p.length!==3&&p.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(p.length===1&&l==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return Pi(r,l,p,u)}const gI={};Pm(gI,{fromPixels:()=>l9,toPixels:()=>c9});let _c;function a9(r,l=3){if(l>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(r==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let u=!1,p=!1,y=!1,g=!1,I=!1;if(r.data instanceof Uint8Array)u=!0;else if(typeof ImageData!="undefined"&&r instanceof ImageData)p=!0;else if(typeof HTMLVideoElement!="undefined"&&r instanceof HTMLVideoElement)y=!0;else if(typeof HTMLImageElement!="undefined"&&r instanceof HTMLImageElement)g=!0;else if(r.getContext!=null)I=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${r.constructor.name}`);if(y){const B=2;if(y&&r.readyState element.")}const S=Jf(tI,H.backendName);if(S!=null){const B={pixels:r},ne={numChannels:l};return H.runKernel(tI,B,ne)}const[T,C]=y?[r.videoWidth,r.videoHeight]:[r.width,r.height];let D;I?D=r.getContext("2d").getImageData(0,0,T,C).data:p||u?D=r.data:(g||y)&&(_c==null&&(_c=document.createElement("canvas").getContext("2d")),_c.canvas.width=T,_c.canvas.height=C,_c.drawImage(r,0,0,T,C),D=_c.getImageData(0,0,T,C).data);let _;if(l===4)_=new Int32Array(D);else{const B=T*C;_=new Int32Array(B*l);for(let ne=0;ne4||g===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${g}`);if(u.dtype!=="float32"&&u.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${u.dtype}. Please use float32 or int32 tensors.`);const I=await u.data(),S=u.dtype==="float32"?255:1,T=new Uint8ClampedArray(y*p*4);for(let C=0;C1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${B}.`)}else if(u.dtype==="int32"&&(B<0||B>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${B}.`);g===1?(D[0]=B*S,D[1]=B*S,D[2]=B*S):D[A]=B*S}const _=C*4;T[_+0]=Math.round(D[0]),T[_+1]=Math.round(D[1]),T[_+2]=Math.round(D[2]),T[_+3]=Math.round(D[3])}if(l!=null){l.width=y,l.height=p;const C=l.getContext("2d"),D=new ImageData(T,y,p);C.putImageData(D,0,0)}return u!==r&&u.dispose(),T}const l9=V({fromPixels_:a9});function lO(r,l,u){const p=r.shape.length;J(p===l.length,()=>`Error in slice${p}D: Length of begin ${l} must match the rank of the array (${p}).`),J(p===u.length,()=>`Error in slice${p}D: Length of size ${u} must match the rank of the array (${p}).`);for(let y=0;y`Error in slice${p}D: begin[${y}] + size[${y}] (${l[y]+u[y]}) would overflow input.shape[${y}] (${r.shape[y]})`)}function sg(r,l,u){let p;const y=r.shape.length;typeof l=="number"?p=[l,...new Array(y-1).fill(0)]:l.length{J(I!==-1,()=>"slice() does not support negative begin indexing.")});let g;return u==null?g=new Array(y).fill(-1):typeof u=="number"?g=[u,...new Array(y-1).fill(-1)]:u.lengthI>=0?I:(J(I===-1,()=>`Negative size values should be exactly -1 but got ${I} for the slice() size at index ${S}.`),r.shape[S]-p[S])),[p,g]}function h9(r){Es().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(r+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}eO(h9);function hO(r,l){return H.tidy(r,l)}function uO(r){const l=tg(r);l.forEach(u=>u.dispose())}function u9(r,l){let u=M(r,"a","add"),p=M(l,"b","add");[u,p]=Lt(u,p);const y=(I,S)=>{const T=I.add(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,Dc)}const vt=V({add_:u9});function d9(r,l){let u=M(r,"a","floorDiv"),p=M(l,"b","floorDiv");[u,p]=Lt(u,p);const y=(I,S)=>{const T=I.floorDiv(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,mf)}const yI=V({floorDiv_:d9});function p9(r,l){let u=M(r,"a","div"),p=M(l,"b","div");if([u,p]=Lt(u,p),u.dtype==="int32"&&p.dtype==="int32")return yI(u,p);const y=(S,T)=>{const C=S.realDivide(u,p);return T([u,p]),C},g={a:u,b:p},I={};return H.runKernelFunc(y,g,null,uf,I)}const Me=V({div_:p9});function m9(r,l){let u=M(r,"a","mul"),p=M(l,"b","mul");[u,p]=Lt(u,p);const y=(I,S)=>{const T=I.multiply(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,Tf)}const le=V({mul_:m9});function f9(r){const l=M(r,"x","abs"),u={x:l};return H.runKernelFunc((p,y)=>(y([l]),l.dtype==="complex64"?p.complexAbs(l):p.abs(l)),u,null,ef)}const zn=V({abs_:f9});function g9(r,l){for(let u=0;ur[g]);return[u,y]}function rs(r,l){const u=l.map(p=>1);return y9(r,u,l)}function zi(r,l){if(g9(r,l))return null;const u=[];for(let p=0;pu.push(p)),u}function Wc(r){return r.map((l,u)=>[u,l]).sort((l,u)=>l[1]-u[1]).map(l=>l[0])}function io(r,l){const u=[];for(let p=l-r;p`The output # of rows (${S}) must be an integer. Change the stride and/or zero pad parameters`);const T=sa((I-l+2*p)/u+1,y);return J(nn(T),()=>`The output # of columns (${T}) must be an integer. Change the stride and/or zero pad parameters`),[S,T]}function S9(r,l,u,p,y,g){y==null&&(y=pO(r,l,p));const I=r[0],S=r[1],T=r[2],C=sa((I-l+2*y)/p+1,g);J(nn(C),()=>`The output # of depths (${C}) must be an integer. Change the stride and/or zero pad parameters`);const D=sa((S-l+2*y)/p+1,g);J(nn(D),()=>`The output # of rows (${D}) must be an integer. Change the stride and/or zero pad parameters`);const _=sa((T-l+2*y)/p+1,g);return J(nn(_),()=>`The output # of columns (${_}) must be an integer. Change the stride and/or zero pad parameters`),[C,D,_,u]}function pO(r,l,u,p=1){const y=$c(l,p);return Math.floor((r[0]*(u-1)-u+y)/2)}function ig(r){return typeof r=="number"?[r,r,r]:r.length===2?[r[0],r[1],1]:r}function bI(r){return typeof r=="number"?[r,r,r]:r}function $c(r,l){return l<=1?r:r+(r-1)*(l-1)}function b9(r,l,u,p,y,g,I,S,T){let C,D,_;if(typeof r=="number"){const A=r===0?"VALID":"NUMBER";C={top:r,bottom:r,left:r,right:r,type:A};const B=L9([l,u],g,p,r,S);D=B[0],_=B[1]}else if(r==="same"){D=Math.ceil(l/p),_=Math.ceil(u/y);const A=Math.max(0,(D-1)*p+g-l),B=Math.max(0,(_-1)*y+I-u),ne=Math.floor(A/2),te=A-ne,P=Math.floor(B/2),ge=B-P;C={top:ne,bottom:te,left:P,right:ge,type:"SAME"}}else if(r==="valid")C={top:0,bottom:0,left:0,right:0,type:"VALID"},D=Math.ceil((l-g+1)/p),_=Math.ceil((u-I+1)/y);else if(typeof r=="object"){const A=T==="channelsLast"?r[1][0]:r[2][0],B=T==="channelsLast"?r[1][1]:r[2][1],ne=T==="channelsLast"?r[2][0]:r[3][0],te=T==="channelsLast"?r[2][1]:r[3][1],P=A===0&&B===0&&ne===0&&te===0?"VALID":"EXPLICIT";C={top:A,bottom:B,left:ne,right:te,type:P},D=sa((l-g+A+B)/p+1,S),_=sa((u-I+ne+te)/y+1,S)}else throw Error(`Unknown padding parameter: ${r}`);return{padInfo:C,outHeight:D,outWidth:_}}function w9(r,l,u,p,y,g,I,S,T,C,D){let _,A,B,ne;if(typeof r=="number"){const te=r===0?"VALID":"NUMBER";_={top:r,bottom:r,left:r,right:r,front:r,back:r,type:te};const P=S9([l,u,p,1],S,1,y,r,D);A=P[0],B=P[1],ne=P[2]}else if(r==="same"){A=Math.ceil(l/y),B=Math.ceil(u/g),ne=Math.ceil(p/I);const te=(A-1)*y+S-l,P=(B-1)*g+T-u,ge=(ne-1)*I+C-p,ae=Math.floor(te/2),Le=te-ae,ve=Math.floor(P/2),Ve=P-ve,at=Math.floor(ge/2),pt=ge-at;_={top:ve,bottom:Ve,left:at,right:pt,front:ae,back:Le,type:"SAME"}}else if(r==="valid")_={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},A=Math.ceil((l-S+1)/y),B=Math.ceil((u-T+1)/g),ne=Math.ceil((p-C+1)/I);else throw Error(`Unknown padding parameter: ${r}`);return{padInfo:_,outDepth:A,outHeight:B,outWidth:ne}}function sa(r,l){if(!l)return r;switch(l){case"round":return Math.round(r);case"ceil":return Math.ceil(r);case"floor":return Math.floor(r);default:throw new Error(`Unknown roundingMode ${l}`)}}function ro(r){const[l,u,p]=ig(r);return l===1&&u===1&&p===1}function oo(r,l){return ro(r)||ro(l)}function Uc(r){if(r==="NHWC")return"channelsLast";if(r==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${r}`)}function mO(r,l){const u=r[0].length;r.forEach((y,g)=>{J(y.length===u,()=>`Error in concat${u}D: rank of tensors[${g}] must be the same as the rank of the rest (${u})`)}),J(l>=0&&l`Error in concat${u}D: axis must be between 0 and ${u-1}.`);const p=r[0];r.forEach((y,g)=>{for(let I=0;I`Error in concat${u}D: Shape of tensors[${g}] (${y}) does not match the shape of the rest (${p}) along the non-concatenated axis ${g}.`)})}function fO(r,l){const u=r[0].slice();for(let p=1;p=1,()=>"Pass at least one tensor to concat");let u=ng(r,"tensors","concat");u[0].dtype==="complex64"&&u.forEach(I=>{if(I.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor + with dtype ${I.dtype}. `)});const p=(I,S)=>{const T=ft(l,u[0].shape)[0],C=fO(u.map(A=>A.shape),T);if(Zt(C)===0)return pI([],C);if(u=u.filter(A=>A.size>0),u.length===1)return u[0];const D=u.map(A=>A.shape);mO(D,T);const _=I.concat(u,T);return S(u),_},y=u,g={axis:l};return H.runKernelFunc(p,y,null,rf,g)}const Tn=V({concat_:I9});function x9(r){const l=M(r,"x","sigmoid"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.sigmoid(l);return y([g]),g},u,null,Uf)}const wI=V({sigmoid_:x9});function T9(r,l,u){const p=M(r,"x","slice");if(p.rank===0)throw new Error("Slicing scalar is not possible");const y=(S,T)=>{const[C,D]=sg(p,l,u);return lO(p,C,D),T([p]),S.slice(p,C,D)},g={x:p},I={begin:l,size:u};return H.runKernelFunc(y,g,null,_f,I)}const Tt=V({slice_:T9});function A9(r,l,u){const p=M(r,"x","batchToSpaceND"),y=l.reduce((T,C)=>T*C);J(p.rank>=1+l.length,()=>`input rank is ${p.rank} but should be > than blockShape.length ${l.length}`),J(u.length===l.length,()=>`crops.length is ${u.length} but should be equal to blockShape.length ${l.length}`),J(p.shape[0]%y===0,()=>`input tensor batch is ${p.shape[0]} but is not divisible by the product of the elements of blockShape ${l.join(" * ")} === ${y}`);const g=T=>T.batchToSpaceND(p,l,u),I={x:p},S={blockShape:l,crops:u};return H.runKernelFunc(g,I,null,nf,S)}const LI=V({batchToSpaceND_:A9});function v9(r,l){let u=M(r,"broadcastTo","x");const p=u.shape;if(l.some(D=>!(D>0)||D%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${l}].`);if(l.lengthu.rank){const D=u.shape.slice();for(;D.length=0;D--)if(y[D]===l[D])g[D]=1;else if(u.shape[D]!==1)throw new Error(`broadcastTo(): [${p}] cannot be broadcast to [${l}].`);const I=g.map((D,_)=>D>1?_:-1).filter(D=>D>=0);if(I.length===0)return pi(u);const S=D=>D.tile(u,g),T={x:u},C={shape:l,inputShape:y};return H.runKernelFunc(S,T,null,sf,C)}const ag=V({broadcastTo_:v9});function N9(r,l,u,p,y="NHWC",g=[1,1],I){const S=M(r,"x","conv2d"),T=M(l,"filter","conv2d");let C=S,D=!1;S.rank===3&&(D=!0,C=re(S,[1,S.shape[0],S.shape[1],S.shape[2]])),J(C.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${C.rank}.`),J(T.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${T.rank}.`),I!=null&&J(nn(p),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${I} but got pad ${p}.`);const _=y==="NHWC"?C.shape[3]:C.shape[1];J(_===T.shape[2],()=>`Error in conv2d: depth of input (${_}) must match input depth for filter ${T.shape[2]}.`),J(oo(u,g),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${u} and dilations '${g}'`);const A=(P,ge)=>{const ae=Uc(y),Le=mi(C.shape,T.shape,u,g,p,I,!1,ae),ve=P.conv2d(C,T,Le);return ge([C,T]),ve},B={x:C,filter:T},ne={strides:u,pad:p,dataFormat:y,dilations:g,dimRoundingMode:I},te=H.runKernelFunc(A,B,null,of,ne);return D?re(te,[te.shape[1],te.shape[2],te.shape[3]]):te}const SI=V({conv2d_:N9});function C9(r,l,u,p,y,g="NHWC",I){J(r.length===l.rank,()=>`Length of inShape (${r.length}) and rank of dy (${l.rank}) must match`);let S=r,T=l,C=!1;l.rank===3&&(C=!0,T=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]),S=[1,r[0],r[1],r[2]]),J(S.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${S.length}.`),J(T.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${T.rank}`),J(u.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${u.rank}`);const D=g==="NHWC"?S[3]:S[1],_=g==="NHWC"?T.shape[3]:T.shape[1];J(D===u.shape[2],()=>`Error in conv2dDerInput: depth of input (${D}) must match input depth for filter ${u.shape[2]}.`),J(_===u.shape[3],()=>`Error in conv2dDerInput: depth of output (${_}) must match output depth for filter ${u.shape[3]}.`),I!=null&&J(nn(y),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${I} but got pad ${y}.`);const A=(P,ge)=>{const ae=1,Le=Uc(g),ve=mi(S,u.shape,p,ae,y,I,!1,Le),Ve=P.conv2dDerInput(T,u,ve);return ge([T,u]),Ve},B={dy:T,filter:u},ne={strides:p,pad:y,dataFormat:g,dimRoundingMode:I,inputShape:S},te=H.runKernelFunc(A,B,null,af,ne);return C?re(te,[te.shape[1],te.shape[2],te.shape[3]]):te}const gO=V({conv2DBackpropInput_:C9});function R9(r,l,u,p,y){J(r.length===l.rank,()=>`Length of inShape (${r.length}) and rank of dy (${l.rank}) must match`);let g=r,I=l,S=!1;l.rank===4&&(S=!0,I=re(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]]),g=[1,r[0],r[1],r[2],r[3]]);const T=g[4],C=I.shape[4];J(g.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${g.length}.`),J(I.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${I.rank}`),J(u.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${u.rank}`),J(T===u.shape[3],()=>`Error in conv3dDerInput: depth of input (${T}) must match input depth for filter ${u.shape[3]}.`),J(C===u.shape[4],()=>`Error in conv3dDerInput: depth of output (${C}) must match output depth for filter ${u.shape[4]}.`);const D=ne=>{const te=1,P=Eu(g,u.shape,p,te,y);return ne.conv3dDerInput(I,u,P)},_={dy:I,filter:u},A={pad:y,strides:p,inputShape:g},B=H.runKernelFunc(D,_,null,$2,A);return S?re(B,[B.shape[1],B.shape[2],B.shape[3],B.shape[4]]):B}const yO=V({conv3DBackpropInput_:R9});function O9(r){const l=M(r,"x","cos"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.cos(l);return y([l]),g},u,null,cf)}const Du=V({cos_:O9});function E9(r){const l=M(r,"x","cosh"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.cosh(l);return y([l]),g},u,null,lf)}const II=V({cosh_:E9});function D9(r,l=0,u=!1,p=!1){const y=M(r,"x","cumsum"),g=(T,C)=>{const D=zi([l],y.rank);let _=y;D!=null&&(_=xn(y,D));const A=io(1,y.rank)[0];let B=T.cumsum(_,A,u,p);if(C([y]),D!=null){const ne=Wc(D);B=xn(B,ne)}return B},I={x:y},S={axis:l,exclusive:u,reverse:p};return H.runKernelFunc(g,I,null,hf,S)}const xI=V({cumsum_:D9});function Ot(r,l){const u=[];for(let p=0;p1)&&u.unshift(g)}return u}function ot(r,l){const u=[],p=Math.max(r.length,l.length);for(let y=0;yI.equal(u,p),g={a:u,b:p};return H.runKernelFunc(y,g,null,j2)}const TI=V({equal_:k9});function F9(r,l,u){const p=M(l,"a","where"),y=M(u,"b","where"),g=M(r,"condition","where","bool"),I=ot(p.shape,y.shape),S=ag(p,I),T=ag(y,I);g.rank===1&&J(g.shape[0]===p.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),g.rank!==1&&tn(g.shape,T.shape,"Error in where: ");const C=(_,A)=>{const B=_.select(g,S,T);return A([g]),B},D={condition:g,t:S,e:T};return H.runKernelFunc(C,D,null,Ff)}const Vn=V({where_:F9});function _9(r){const l=M(r,"x","zerosLike"),u={x:l};return H.runKernelFunc(p=>p.zerosLike(l),u,null,Kf)}const Ke=V({zerosLike_:_9});function W9(r){const l=M(r,"x","exp"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.exp(l);return y([g]),g},u,null,df)}const Gn=V({exp_:W9});function $9(r,l=0){const u=null,p=M(r,"x","expandDims",u);J(l<=p.rank,()=>"Axis must be <= rank of the tensor");const y=p.shape.slice();return l<0&&(J(-(p.rank+1)<=l,()=>`Axis must be in the interval [${-(p.rank+1)}, ${p.rank}]`),l=p.rank+l+1),y.splice(l,0,1),re(p,y)}const Ds=V({expandDims_:$9});function U9(r,l){const u=null,p=M(r,"x","tile",u);J(p.rank===l.length,()=>`Error in transpose: rank of input ${p.rank} must match length of reps ${l}.`);const y=(T,C)=>{const D=T.tile(p,l);return C([p]),D},g=[p],I={x:p},S={reps:l};return H.runKernelFunc(y,I,null,Yf,S,g)}const ia=V({tile_:U9});function B9(r,l,u,p="float32"){l==null&&(l=r);const y=Ou([r,l],p),g=r<=l?r:l;for(let S=0;Sy.fill(r,l,u),{},null,J2,p)}function M9(r){const l=M(r,"x","floor"),u={x:l};return H.runKernelFunc(p=>p.floor(l),u,null,pf)}const NI=V({floor_:M9});function bO(r,l,u){const p=r.shape[u],y=[];let g=1,I=1;for(let S=0;S{const D=ft(u,p.shape)[0],_=bO(p,y,D),A=T.gather(p,re(y,[y.size]),D);return C([p,y]),re(A,_.outputShape)};return H.runKernelFunc(S,g,null,ff,I)}const CI=V({gather_:P9});function z9(r,l){let u=M(r,"a","greater"),p=M(l,"b","greater");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=I=>I.greater(u,p),g={a:u,b:p};return H.runKernelFunc(y,g,null,eR)}const fi=V({greater_:z9});function V9(r,l){let u=M(r,"a","greaterEqual"),p=M(l,"b","greaterEqual");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=(I,S)=>{const T=I.greaterEqual(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,gf)}const wr=V({greaterEqual_:V9});function G9(r){const l=M(r,"input","imag"),u=y=>y.imag(l),p={input:l};return H.runKernelFunc(u,p,null,nR)}const ku=V({imag_:G9});function Y9(r,l){let u=M(r,"a","maximum"),p=M(l,"b","maximum");[u,p]=Lt(u,p),u.dtype==="bool"&&(u=Ie(u,"int32"),p=Ie(p,"int32")),ot(u.shape,p.shape);const y=(I,S)=>{const T=I.maximum(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,Sf)}const RI=V({maximum_:Y9});function Fe(r,l){if((Os(r)&&l!=="string"||Array.isArray(r))&&l!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(l==="string"&&Os(r)&&!(r instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");const u=[],p=[];return Pi(r,u,p,l)}function H9(r,l){let u=M(r,"a","less"),p=M(l,"b","less");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=I=>I.less(u,p),g={a:u,b:p};return H.runKernelFunc(y,g,null,oR)}const OI=V({less_:H9});function q9(r,l){let u=M(r,"a","lessEqual"),p=M(l,"b","lessEqual");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=(I,S)=>{const T=I.lessEqual(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,aR)}const Lr=V({lessEqual_:q9});function j9(r){const l=M(r,"x","log"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.log(l);return y([l]),g},u,null,bf)}const ao=V({log_:j9});function K9(r){const l=M(r,"x","log1p"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.log1p(l);return y([l]),g},u,null,wf)}const EI=V({log1p_:K9});function cg(r){return H.customGrad(r)}function X9(r){const l=M(r,"x","neg"),u={x:l};return H.runKernelFunc(p=>p.neg(l),u,null,Af)}const It=V({neg_:X9});function J9(r,l=null,u=!1){const p=M(r,"x","max"),y=(S,T)=>{const C=ft(l,p.shape);let D=C;const _=zi(D,p.rank);let A=p;_!=null&&(A=xn(p,_),D=io(D.length,A.rank));const B=S.max(A,D);_!=null&&A.dispose();let ne=B;if(u){const te=rs(ne.shape,ft(l,p.shape));ne=re(ne,te),B.dispose()}return T([p,ne]),ne},g={x:p},I={reductionIndices:l,keepDims:u};return H.runKernelFunc(y,g,null,Lf,I)}const ra=V({max_:J9});function Z9(r,l){let u=M(r,"a","sub"),p=M(l,"b","sub");[u,p]=Lt(u,p);const y=(I,S)=>{const T=I.subtract(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,Gf)}const Be=V({sub_:Z9});function Q9(r,l=null,u=!1){let p=M(r,"x","sum");p.dtype==="bool"&&(p=Ie(p,"int32"));const y=(S,T)=>{T([p]);const C=ft(l,p.shape),D=zi(C,p.rank);let _=C,A=p;D!=null&&(A=xn(p,D),_=io(_.length,p.rank));let B=S.sum(A,_);if(u){const ne=rs(B.shape,C);B=re(B,ne)}return B},g={x:p},I={axis:l,keepDims:u};return H.runKernelFunc(y,g,null,Mf,I)}const _e=V({sum_:Q9});function eZ(r,l=null,u=!1){const p=M(r,"x","logSumExp"),y=ft(l,p.shape),g=ra(p,y,!0),I=Be(p,g),S=Gn(I),T=_e(S,y),C=ao(T),D=vt(re(g,C.shape),C);if(u){const _=rs(D.shape,y);return re(D,_)}return D}const DI=V({logSumExp_:eZ});function tZ(r,l){const u=M(r,"a","logicalAnd","bool"),p=M(l,"b","logicalAnd","bool");ot(u.shape,p.shape);const y={a:u,b:p};return H.runKernelFunc(g=>g.logicalAnd(u,p),y,null,cR)}const oa=V({logicalAnd_:tZ});function nZ(r){const l=M(r,"x","logicalNot","bool"),u={x:l};return H.runKernelFunc(p=>p.logicalNot(l),u,null,lR)}const kI=V({logicalNot_:nZ});function ks(r,l="float32"){if(l==="complex64"){const p=ks(r,"float32"),y=ks(r,"float32");return Mi(p,y)}const u=na(Zt(r),l);return H.makeTensor(u,r,l)}function Vi(r,l="float32"){if(l==="complex64"){const p=Vi(r,"float32"),y=ks(r,"float32");return Mi(p,y)}const u=Jm(Zt(r),l);return H.makeTensor(u,r,l)}function sZ(r,l=null,u=!1){const p=M(r,"x","mean"),y=ft(l,p.shape),g=dO(p.shape,y),I=g[1],S=Zt(I),T={x:p},C={axis:l,keepDims:u},D=()=>{const A=Fe(S),B=A.dtype===p.dtype?p:Ie(p,A.dtype),ne=Me(B,A);return _e(ne,l,u)},_=cg(A=>{const B=H.runKernelFunc(D,T,null,yR,C),ne=te=>{const P=A.shape.slice();y.forEach(Le=>{P[Le]=1});const ge=re(te,P),ae=Me(le(ge,Vi(A.shape,"float32")),S);return ae};return{value:B,gradFunc:ne}});return _(p)}const FI=V({mean_:sZ});function iZ(r,l=null,u=!1){const p=M(r,"x","min"),y=(S,T)=>{const C=ft(l,p.shape);let D=C;const _=zi(D,p.rank);let A=p;_!=null&&(A=xn(p,_),D=io(D.length,p.rank));const B=S.min(A,D);_!=null&&A.dispose();let ne=B;if(u){const te=rs(ne.shape,C);ne=re(B,te),B.dispose()}return T([p,ne]),ne},g={x:p},I={axis:l,keepDims:u};return H.runKernelFunc(y,g,null,If,I)}const lg=V({min_:iZ});function rZ(r,l){let u=M(r,"a","minimum"),p=M(l,"b","minimum");[u,p]=Lt(u,p),u.dtype==="bool"&&(u=Ie(u,"int32"),p=Ie(p,"int32")),ot(u.shape,p.shape);const y=(I,S)=>{const T=I.minimum(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,xf)}const _I=V({minimum_:rZ});function oZ(r){const l=M(r,"x","square"),u={},p=[l],y=[];return H.runKernelFunc((g,I)=>(I([l]),g.square(l)),{x:l},null,"Square",u,p,y)}const gt=V({square_:oZ});function aZ(r,l){let u=M(r,"a","notEqual"),p=M(l,"b","notEqual");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=I=>I.notEqual(u,p),g={a:u,b:p};return H.runKernelFunc(y,g,null,LR)}const WI=V({notEqual_:aZ});function cZ(r){const l=M(r,"input","real"),u=y=>y.real(l),p={input:l};return H.runKernelFunc(u,p,null,CR)}const Bc=V({real_:cZ});function lZ(r,l,u=0){const p=M(r,"x","pad");if(p.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");const y=(S,T)=>(T([p]),S.pad(p,l,u)),g={paddings:l,constantValue:u},I={x:p};return H.runKernelFunc(y,I,null,vf,g)}const $I=V({pad_:lZ});function hZ(r,l,u){const p=M(r,"x","spaceToBatchND");J(p.rank>=1+l.length,()=>`input rank ${p.rank} should be > than [blockShape] ${l.length}`),J(u.length===l.length,()=>`paddings.shape[0] ${u.length} must be equal to [blockShape] ${l.length}`),J(p.shape.reduce((S,T,C)=>C>0&&C<=l.length?S&&(T+u[C-1][0]+u[C-1][1])%l[C-1]===0:S,!0),()=>`input spatial dimensions ${p.shape.slice(1)} with paddings ${u.toString()} must be divisible by blockShapes ${l.toString()}`);const y=S=>S.spaceToBatchND(p,l,u),g={x:p},I={blockShape:l,paddings:u};return H.runKernelFunc(y,g,null,Pf,I)}const UI=V({spaceToBatchND_:hZ});function uZ(r,l){let u=M(r,"base","pow"),p=M(l,"exp","pow");[u,p]=Lt(u,p);const y={a:u,b:p},g=(I,S)=>{const T=I.pow(u,p);return S([u,p,T]),T};return H.runKernelFunc(g,y,null,Nf)}const aa=V({pow_:uZ});function co(r,l){Oc(r);const u=br(r,l);if(u.length!==1)throw new Error("tensor1d() requires values to be a flat/TypedArray");const p=null;return Pi(r,p,u,l)}function hg(r,l,u=1,p="float32"){if(u===0)throw new Error("Cannot have a step of zero");const y=()=>{const I=r===l,S=r1;if(I||S||T)return ks([0],p);const C=Math.abs(Math.ceil((l-r)/u)),D=na(C,p);l(g([l]),l.dtype==="bool"?Ie(l,"int32"):y.relu(l)),p={x:l};return H.runKernelFunc(u,p,null,Cf)}const Fu=V({relu_:dZ});function pZ(r,l){const u=M(r,"x","reverse"),p=I=>{const S=ft(l,u.shape);if(u.rank===0)return pi(u);const T=I.reverse(u,S);return re(T,u.shape)},y={x:u},g={dims:l};return H.runKernelFunc(p,y,null,Df,g)}const Mc=V({reverse_:pZ});function mZ(r){const l=M(r,"x","rsqrt"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.rsqrt(l);return y([l]),g},u,null,kf)}const BI=V({rsqrt_:mZ});function fZ(r){const l=M(r,"x","sin"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.sin(l);return y([l]),g},u,null,Wf)}const MI=V({sin_:fZ});function gZ(r){const l=M(r,"x","sinh"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.sinh(l);return y([l]),g},u,null,$f)}const PI=V({sinh_:gZ});function yZ(r){J(r.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${r.dtype}.`);const l={input:r};return H.runKernelFunc(u=>{const p=r.shape[r.shape.length-1],y=r.size/p,g=r.as2D(y,p),I=u.fft(g);return I.reshape(r.shape)},l,null,X2)}const _u=V({fft_:yZ});function bZ(r){J(r.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${r.dtype}.`);const l={input:r};return H.runKernelFunc(u=>{const p=r.shape[r.shape.length-1],y=r.size/p,g=re(r,[y,p]),I=u.ifft(g);return re(I,r.shape)},l,null,tR)}const Pc=V({ifft_:bZ});function wZ(r){const l=r.shape[r.shape.length-1],u=r.size/l;let p;if(l<=2){const y=re(r,[u,l]);p=Pc(y)}else{const y=[u,2*(l-1)],g=re(Bc(r),[u,l]),I=re(ku(r),[u,l]),S=Mc(Tt(g,[0,1],[u,l-2]),1),T=le(Mc(Tt(I,[0,1],[u,l-2]),1),Fe(-1)),C=Tn([g,S],1),D=Tn([I,T],1),_=re(Mi(C,D),[y[0],y[1]]);p=Pc(_)}if(p=Bc(p),r.rank===3&&r.shape[0]!==0){const y=p,g=r.shape[0];p=re(p,[g,p.shape[0]/g,p.shape[1]]),y.dispose()}return p}const zI=V({irfft_:wZ});function wO(r,l,u=0){let p=[];if(typeof l=="number")J(r.shape[u]%l===0,()=>"Number of splits must evenly divide the axis."),p=new Array(l).fill(r.shape[u]/l);else{const y=l.reduce((I,S)=>(S===-1&&(I+=1),I),0);J(y<=1,()=>"There should be only one negative value in split array.");const g=l.indexOf(-1);if(g!==-1){const I=l.reduce((S,T)=>T>0?S+T:S);l[g]=r.shape[u]-I}J(r.shape[u]===l.reduce((I,S)=>I+S),()=>"The sum of sizes must match the size of the axis dimension."),p=l}return p}function LZ(r,l,u=0){const p=M(r,"x","split"),y=(S,T)=>{const C=ft(u,p.shape)[0],D=wO(p,l,C);return S.split(p,D,C)},g={x:p},I={numOrSizeSplits:l,axis:u};return H.runKernelFunc(y,g,null,zf,I)}const lo=V({split_:LZ});function SZ(r,l){J(r.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${r.dtype}`);let u=r.shape[r.shape.length-1];const p=r.size/u;let y;if(l!=null&&l0),te=r.shape.map(P=>P);te[r.shape.length-1]=l,y=Tt(r,ne,te),u=l}else if(l!=null&&l>u){const ne=r.shape.map(te=>te);ne[r.shape.length-1]=l-u,y=Tn([r,ks(ne)],r.shape.length-1),u=l}else y=r;const g=Ke(y),I=re(Mi(y,g),[p,u]),S=_u(I),T=Math.floor(u/2)+1,C=Bc(S),D=ku(S),_=lo(C,[T,u-T],C.shape.length-1),A=lo(D,[T,u-T],D.shape.length-1),B=y.shape.slice();return B[y.shape.length-1]=T,re(Mi(_[0],A[0]),B)}const Wu=V({rfft_:SZ});function IZ(r){const l=M(r,"x","sqrt"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.sqrt(l);return y([l]),g},u,null,Bf)}const fs=V({sqrt_:IZ});function xZ(r,l){let u=M(r,"a","squaredDifference"),p=M(l,"b","squaredDifference");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=(S,T)=>{const C=S.squaredDifference(u,p);return T([u,p]),C},g={a:u,b:p},I={};return H.runKernelFunc(y,g,null,Vf,I)}const VI=V({squaredDifference_:xZ});function TZ(r,l){const u=M(r,"x","squeeze");return re(u,o2(u.shape,l).newShape)}const GI=V({squeeze_:TZ});function AZ(r,l=0){const u=ng(r,"tensors","stack");if(J(u.length>=1,()=>"Pass at least one tensor to tf.stack"),u.length===1)return Ds(u[0],l);const p=u[0].rank,y=u[0].shape,g=u[0].dtype;J(l<=p,()=>"Axis must be <= rank of the tensor"),u.forEach(S=>{tn(y,S.shape,"All tensors passed to stack must have matching shapes"),J(g===S.dtype,()=>"All tensors passed to stack must have matching dtypes")});const I=u.map(S=>Ds(S,l));return Tn(I,l)}const Ys=V({stack_:AZ});function vZ(r,l=0){const u=M(r,"x","step"),p={x:u},y={alpha:l};return H.runKernelFunc(g=>g.step(u,l),p,null,Xf,y)}const ca=V({step_:vZ});function la(r,l,u){if(Oc(r),l!=null&&l.length!==2)throw new Error("tensor2d() requires shape to have two numbers");const p=br(r,u);if(p.length!==2&&p.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(p.length===1&&l==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return Pi(r,l,p,u)}function NZ(r,l,u){const p=M(r,"x","unsortedSegmentSum"),y=M(l,"segmentIds","unsortedSegmentSum","int32");J(nn(u),()=>"numSegments must be of dtype int");const g={x:p,segmentIds:y},I={numSegments:u},S=(T,C)=>{const D=T.unsortedSegmentSum(p,y,u);return C([y]),D};return H.runKernelFunc(S,g,null,jf,I)}const YI=V({unsortedSegmentSum_:NZ});function CZ(r,l=0){const u=M(r,"x","unstack");J(l>=-u.shape.length&&l`Axis = ${l} is not in [-${u.shape.length}, ${u.shape.length})`),l<0&&(l+=u.shape.length);const p={value:u},y={axis:l},g=I=>I.unstack(u,l);return H.runKernelFunc(g,p,null,qf,y)}const $u=V({unstack_:CZ});function RZ(r,l="euclidean",u=null,p=!1){r=M(r,"x","norm");const y=LO(r,l,u);let g=y.shape;if(p){const I=ft(u,r.shape);g=rs(y.shape,I)}return re(y,g)}function LO(r,l,u=null){if(r.rank===0)return zn(r);if(r.rank!==1&&u===null)return LO(re(r,[-1]),l,u);if(r.rank===1||typeof u=="number"||Array.isArray(u)&&u.length===1){if(l===1)return _e(zn(r),u);if(l===Infinity)return ra(zn(r),u);if(l===-Infinity)return lg(zn(r),u);if(l==="euclidean"||l===2)return fs(_e(aa(zn(r),Fe(2,"int32")),u));throw new Error(`Error in norm: invalid ord value: ${l}`)}if(Array.isArray(u)&&u.length===2){if(l===1)return ra(_e(zn(r),u[0]),u[1]-1);if(l===Infinity)return ra(_e(zn(r),u[1]),u[0]);if(l===-Infinity)return lg(_e(zn(r),u[1]),u[0]);if(l==="fro"||l==="euclidean")return fs(_e(gt(r),u));throw new Error(`Error in norm: invalid ord value: ${l}`)}throw new Error(`Error in norm: invalid axis: ${u}`)}const ug=V({norm_:RZ});function SO(r){return Math.floor(Math.pow(2,Math.ceil(Math.log(r)/Math.log(2))))}function dg(r,l,u){const p=1-r%2,y=new Float32Array(r);for(let g=0;g`Error in conv2dDerFilter: input must be rank 4, but got shape ${S.shape}.`),J(T.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${T.shape}.`),J(u.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${u}.`);const C=g==="NHWC"?S.shape[3]:S.shape[1],D=g==="NHWC"?T.shape[3]:T.shape[1];J(C===u[2],()=>`Error in conv2dDerFilter: depth of input ${C}) must match input depth in filter (${u[2]}.`),J(D===u[3],()=>`Error in conv2dDerFilter: depth of dy (${D}) must match output depth for filter (${u[3]}).`),I!=null&&J(nn(y),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${I} but got pad ${y}.`);const _=ne=>{const te=1,P=Uc(g),ge=mi(S.shape,u,p,te,y,I,!1,P);return ne.conv2dDerFilter(S,T,ge)},A={x:S,dy:T},B={strides:p,pad:y,dataFormat:g,dimRoundingMode:I,filterShape:u};return H.runKernelFunc(_,A,null,F2,B)}const pg=V({conv2DBackpropFilter_:OZ});function EZ(r,l,u,p,y,g=[1,1],I){let S=r;r.rank===3&&(S=re(r,[1,r.shape[0],r.shape[1],r.shape[2]]));let T=l;T.rank===3&&(T=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]));const C=A=>{const B=mi(r.shape,u,p,g,y,I,!0);return A.depthwiseConv2DDerFilter(S,T,B)},D={x:S,dy:T},_={strides:p,pad:y,dimRoundingMode:I,dilations:g,filterShape:u};return H.runKernelFunc(C,D,null,M2,_)}const IO=V({depthwiseConv2dNativeBackpropFilter_:EZ});function DZ(r,l,u,p,y,g=[1,1],I){let S=l,T=!1;l.rank===3&&(T=!0,S=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]));const C=B=>{const ne=mi(r,u.shape,p,g,y,I,!0);return B.depthwiseConv2DDerInput(S,u,ne)},D={dy:S,filter:u},_={strides:p,pad:y,dimRoundingMode:I,dilations:g,inputShape:r},A=H.runKernelFunc(C,D,null,P2,_);return T?re(A,[A.shape[1],A.shape[2],A.shape[3]]):A}const xO=V({depthwiseConv2dNativeBackpropInput_:DZ});function kZ(r){return dg(r,.54,.46)}const TO=V({hammingWindow_:kZ});function FZ(r){return dg(r,.5,.5)}const mg=V({hannWindow_:FZ});function _Z(r,l,u,p=!1,y=0){let g=0;const I=[];for(;g+l<=r.size;)I.push(Tt(r,g,l)),g+=u;if(p)for(;g`Error in cropAndResize: image must be rank 4,but got rank ${I.rank}.`),J(S.rank===2&&S.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${C},4] but had shape ${S.shape}.`),J(T.rank===1&&T.shape[0]===C,()=>`Error in cropAndResize: boxInd must be have size [${C}] but had shape ${S.shape}.`),J(p.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${p.length}.`),J(p[0]>=1&&p[1]>=1,()=>`cropSize must be atleast [1,1], but was ${p}`),J(y==="bilinear"||y==="nearest",()=>`method must be bilinear or nearest, but was ${y}`);const D=ne=>ne.cropAndResize(I,S,T,p,y,g),_={image:I,boxes:S,boxInd:T},A={method:y,extrapolationValue:g,cropSize:p},B=H.runKernelFunc(D,_,null,U2,A);return B}const vO=V({cropAndResize_:$Z});function UZ(r){const l=M(r,"image","flipLeftRight","float32");J(l.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${l.rank}.`);const u={image:l},p=H.runKernel(Z2,u,{});return p}const NO=V({flipLeftRight_:UZ});function BZ(r,l,u=0,p=.5){const y=M(r,"image","rotateWithOffset","float32");J(y.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${y.rank}.`);const g={image:y},I={radians:l,fillValue:u,center:p},S=H.runKernel(PR,g,I);return S}const CO=V({rotateWithOffset_:BZ});function Hs(r,l,u,p,y,g){p==null&&(p=.5),y==null&&(y=Number.NEGATIVE_INFINITY),g==null&&(g=0);const I=r.shape[0];return u=Math.min(u,I),J(0<=p&&p<=1,()=>`iouThreshold must be in [0, 1], but was '${p}'`),J(r.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${r.rank}'`),J(r.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${r.shape[1]}`),J(l.rank===1,()=>"scores must be a 1D tensor"),J(l.shape[0]===I,()=>`scores has incompatible shape with boxes. Expected ${I}, but was ${l.shape[0]}`),J(0<=g&&g<=1,()=>`softNmsSigma must be in [0, 1], but was '${g}'`),{maxOutputSize:u,iouThreshold:p,scoreThreshold:y,softNmsSigma:g}}function MZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY){const g=M(r,"boxes","nonMaxSuppression"),I=M(l,"scores","nonMaxSuppression"),S=Hs(g,I,u,p,y);u=S.maxOutputSize,p=S.iouThreshold,y=S.scoreThreshold;const T={maxOutputSize:u,iouThreshold:p,scoreThreshold:y};return H.runKernelFunc(C=>C.nonMaxSuppression(g,I,u,p,y),{boxes:g,scores:I},null,SR,T)}const RO=V({nonMaxSuppression_:MZ});function OO(r,l,u){const p=PZ(r,l,u),y=p<0?-(p+1):p;r.splice(y,0,l)}function PZ(r,l,u){return VZ(r,l,u||zZ)}function zZ(r,l){return r>l?1:r>>1);const S=u(l,r[g]);S>0?p=g+1:(y=g,I=!S)}return I?p:-p-1}function EO(r,l,u,p,y){return HI(r,l,u,p,y,0).selectedIndices}function DO(r,l,u,p,y,g){return HI(r,l,u,p,y,0,!1,g,!0)}function kO(r,l,u,p,y,g){return HI(r,l,u,p,y,g,!0)}function HI(r,l,u,p,y,g,I=!1,S=!1,T=!1){const C=[];for(let P=0;Py&&C.push({score:l[P],boxIndex:P,suppressBeginIndex:0});C.sort(FO);const D=g>0?-.5/g:0,_=[],A=[];for(;_.length0;){const P=C.pop(),{score:ge,boxIndex:ae,suppressBeginIndex:Le}=P;if(ge=Le;--Ve){const at=GZ(r,ae,_[Ve]);if(at>=p){ve=!0;break}if(P.score=P.score*YZ(p,D,at),P.score<=y)break}P.suppressBeginIndex=_.length,ve||(P.score===ge?(_.push(ae),A.push(P.score)):P.score>y&&OO(C,P,FO))}const B=_.length,ne=u-B;S&&ne>0&&(_.push(...new Array(ne).fill(0)),A.push(...new Array(ne).fill(0)));const te={selectedIndices:co(_,"int32")};return I&&(te.selectedScores=co(A,"float32")),T&&(te.validOutputs=Fe(B,"int32")),te}function GZ(r,l,u){const p=r.subarray(l*4,l*4+4),y=r.subarray(u*4,u*4+4),g=Math.min(p[0],p[2]),I=Math.min(p[1],p[3]),S=Math.max(p[0],p[2]),T=Math.max(p[1],p[3]),C=Math.min(y[0],y[2]),D=Math.min(y[1],y[3]),_=Math.max(y[0],y[2]),A=Math.max(y[1],y[3]),B=(S-g)*(T-I),ne=(_-C)*(A-D);if(B<=0||ne<=0)return 0;const te=Math.max(g,C),P=Math.max(I,D),ge=Math.min(S,_),ae=Math.min(T,A),Le=Math.max(ge-te,0)*Math.max(ae-P,0);return Le/(B+ne-Le)}function YZ(r,l,u){const p=Math.exp(l*u*u);return u<=r?p:0}function FO(r,l){return r.score-l.score||r.score===l.score&&l.boxIndex-r.boxIndex}async function HZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY){const g=M(r,"boxes","nonMaxSuppressionAsync"),I=M(l,"scores","nonMaxSuppressionAsync"),S=Hs(g,I,u,p,y);u=S.maxOutputSize,p=S.iouThreshold,y=S.scoreThreshold;const T=await Promise.all([g.data(),I.data()]),C=T[0],D=T[1],_=EO(C,D,u,p,y);return g!==r&&g.dispose(),I!==l&&I.dispose(),_}const _O=HZ;function qZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY,g=0){const I=M(r,"boxes","nonMaxSuppression"),S=M(l,"scores","nonMaxSuppression"),T=Hs(I,S,u,p,y,g);u=T.maxOutputSize,p=T.iouThreshold,y=T.scoreThreshold,g=T.softNmsSigma;const C={boxes:I,scores:S},D={maxOutputSize:u,iouThreshold:p,scoreThreshold:y,softNmsSigma:g},_=H.runKernel(xR,C,D);return{selectedIndices:_[0],selectedScores:_[1]}}const WO=V({nonMaxSuppressionWithScore_:qZ});async function jZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY,g=0){const I=M(r,"boxes","nonMaxSuppressionAsync"),S=M(l,"scores","nonMaxSuppressionAsync"),T=Hs(I,S,u,p,y,g);u=T.maxOutputSize,p=T.iouThreshold,y=T.scoreThreshold,g=T.softNmsSigma;const C=await Promise.all([I.data(),S.data()]),D=C[0],_=C[1],A=kO(D,_,u,p,y,g);return I!==r&&I.dispose(),S!==l&&S.dispose(),A}const $O=jZ;function KZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY,g=!1){const I=M(r,"boxes","nonMaxSuppression"),S=M(l,"scores","nonMaxSuppression"),T=Hs(I,S,u,p,y,null),C=T.maxOutputSize,D=T.iouThreshold,_=T.scoreThreshold,A={boxes:I,scores:S},B={maxOutputSize:C,iouThreshold:D,scoreThreshold:_,padToMaxOutputSize:g},ne=H.runKernel(IR,A,B);return{selectedIndices:ne[0],validOutputs:ne[1]}}const UO=V({nonMaxSuppressionPadded_:KZ});async function XZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY,g=!1){const I=M(r,"boxes","nonMaxSuppressionAsync"),S=M(l,"scores","nonMaxSuppressionAsync"),T=Hs(I,S,u,p,y,null),C=T.maxOutputSize,D=T.iouThreshold,_=T.scoreThreshold,[A,B]=await Promise.all([I.data(),S.data()]),ne=DO(A,B,C,D,_,g);return I!==r&&I.dispose(),S!==l&&S.dispose(),ne}const BO=XZ;function JZ(r,l,u=!1){const p=M(r,"images","resizeBilinear");J(p.rank===3||p.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${p.rank}.`),J(l.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${l}.`);let y=p,g=!1;p.rank===3&&(g=!0,y=re(p,[1,p.shape[0],p.shape[1],p.shape[2]]));const[I,S]=l,T=(A,B)=>(B([y]),A.resizeBilinear(y,I,S,u)),C={images:y},D={alignCorners:u,size:l},_=H.runKernelFunc(T,C,null,Ef,D);return g?re(_,[_.shape[1],_.shape[2],_.shape[3]]):_}const MO=V({resizeBilinear_:JZ});function ZZ(r,l,u=!1){const p=M(r,"images","resizeNearestNeighbor");J(p.rank===3||p.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${p.rank}.`),J(l.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${l}.`),J(p.dtype==="float32"||p.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let y=p,g=!1;p.rank===3&&(g=!0,y=re(p,[1,p.shape[0],p.shape[1],p.shape[2]]));const[I,S]=l,T={images:y},C={alignCorners:u,size:l},D=(A,B)=>(B([y]),A.resizeNearestNeighbor(y,I,S,u)),_=H.runKernelFunc(D,T,null,Of,C);return g?re(_,[_.shape[1],_.shape[2],_.shape[3]]):_}const PO=V({resizeNearestNeighbor_:ZZ});function QZ(r,l,u){J(l%1===0,()=>`bandPart(): numLower must be an integer, got ${l}.`),J(u%1===0,()=>`bandPart(): numUpper must be an integer, got ${u}.`);const p=M(r,"a","bandPart");J(p.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${p.rank}.`);const y=p.shape,[g,I]=p.shape.slice(-2);if(!(l<=g))throw new Error(`bandPart(): numLower (${l}) must not be greater than the number of rows (${g}).`);if(!(u<=I))throw new Error(`bandPart(): numUpper (${u}) must not be greater than the number of columns (${I}).`);l<0&&(l=g),u<0&&(u=I);const S=re(hg(0,g,1,"int32"),[-1,1]),T=hg(0,I,1,"int32"),C=Be(S,T),D=oa(Lr(C,Fe(+l,"int32")),wr(C,Fe(-u,"int32"))),_=ks([g,I],p.dtype);return re(Ys($u(re(p,[-1,g,I])).map(A=>Vn(D,A,_))),y)}const zO=V({bandPart_:QZ});function eQ(r){let l;if(Array.isArray(r)){l=!1,J(r!=null&&r.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");const y=r[0].shape[0];for(let g=1;g`Gram-Schmidt: Non-unique lengths found in the input vectors: (${r[g].shape[0]} vs. ${y})`)}else l=!0,r=lo(r,r.shape[0],0).map(y=>GI(y,[0]));J(r.length<=r[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${r.length}) exceeds number of dimensions (${r[0].shape[0]}).`);const u=[],p=r;for(let y=0;y{let g=p[y];if(y>0)for(let I=0;I=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${r.rank}`),r.rank===2)return GO(r,l);{const u=r.shape.slice(0,r.shape.length-2).reduce((T,C)=>T*C),p=$u(re(r,[u,r.shape[r.shape.length-2],r.shape[r.shape.length-1]]),0),y=[],g=[];p.forEach(T=>{const[C,D]=GO(T,l);y.push(C),g.push(D)});const I=re(Ys(y,0),r.shape),S=re(Ys(g,0),r.shape);return[I,S]}}function GO(r,l=!1){return H.tidy(()=>{J(r.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${r.shape.length}D Tensor.`);const u=r.shape[0],p=r.shape[1];let y=AI(u),g=pi(r);const I=la([[1]],[1,1]);let S=pi(I);const T=u>=p?p:u;for(let C=0;C{const B=Tt(g,[C,C],[u-C,1]),ne=ug(B),te=Tt(g,[C,C],[1,1]),P=Vn(fi(te,0),la([[-1]]),la([[1]])),ge=Be(te,le(P,ne)),ae=Me(B,ge);ae.shape[0]===1?S=pi(I):S=Tn([I,Tt(ae,[1,0],[ae.shape[0]-1,ae.shape[1]])],0);const Le=It(Me(yn(P,ge),ne)),ve=Tt(g,[C,0],[u-C,p]),Ve=le(Le,S),at=xn(S);if(C===0)g=Be(ve,yn(Ve,yn(at,ve)));else{const Vt=Be(ve,yn(Ve,yn(at,ve)));g=Tn([Tt(g,[0,0],[C,p]),Vt],0)}const pt=xn(Ve),$t=Tt(y,[0,C],[u,y.shape[1]-C]);if(C===0)y=Be($t,yn(yn($t,S),pt));else{const Vt=Be($t,yn(yn($t,S),pt));y=Tn([Tt(y,[0,0],[u,C]),Vt],1)}return[S,g,y]}),uO([D,_,A])}return!l&&u>p&&(y=Tt(y,[0,0],[u,p]),g=Tt(g,[0,0],[p,p])),[y,g]})}const YO=V({qr_:tQ});var Qt;(function(r){r[r.NONE=0]="NONE",r[r.MEAN=1]="MEAN",r[r.SUM=2]="SUM",r[r.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(Qt||(Qt={}));function nQ(r,l,u=Qt.SUM_BY_NONZERO_WEIGHTS){const p=M(r,"losses","computeWeightedLoss");let y=null;l!=null&&(y=M(l,"weights","computeWeightedLoss"));const g=y==null?p:le(p,y);if(u===Qt.NONE)return g;if(u===Qt.SUM)return _e(g);if(u===Qt.MEAN){if(y==null)return FI(g);{const I=p.size/y.size,S=Me(_e(g),_e(y));return I>1?Me(S,Fe(I)):S}}if(u===Qt.SUM_BY_NONZERO_WEIGHTS){if(y==null)return Me(_e(g),Fe(p.size));{const I=le(y,Vi(p.shape)),S=Ie(_e(WI(I,Fe(0))),"float32");return Me(_e(g),S)}}throw Error(`Unknown reduction: ${u}`)}const Dn=V({computeWeightedLoss_:nQ});function sQ(r,l,u,p=Qt.SUM_BY_NONZERO_WEIGHTS){const y=M(r,"labels","absoluteDifference"),g=M(l,"predictions","absoluteDifference");let I=null;u!=null&&(I=M(u,"weights","absoluteDifference")),tn(y.shape,g.shape,"Error in absoluteDifference: ");const S=zn(Be(y,g));return Dn(S,I,p)}const HO=V({absoluteDifference_:sQ});function iQ(r,l,u,p,y=Qt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","cosineDistance"),I=M(l,"predictions","cosineDistance");let S=null;p!=null&&(S=M(p,"weights","cosineDistance")),tn(g.shape,I.shape,"Error in cosineDistance: ");const T=Fe(1),C=Be(T,_e(le(g,I),u,!0));return Dn(C,S,y)}const qO=V({cosineDistance_:iQ});function rQ(r,l,u,p=Qt.SUM_BY_NONZERO_WEIGHTS){let y=M(r,"labels","hingeLoss");const g=M(l,"predictions","hingeLoss");let I=null;u!=null&&(I=M(u,"weights","hingeLoss")),tn(y.shape,g.shape,"Error in hingeLoss: ");const S=Fe(1);y=Be(le(Fe(2),y),S);const T=Fu(Be(S,le(y,g)));return Dn(T,I,p)}const jO=V({hingeLoss_:rQ});function oQ(r,l,u,p=1,y=Qt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","huberLoss"),I=M(l,"predictions","huberLoss");let S=null;u!=null&&(S=M(u,"weights","huberLoss")),tn(g.shape,I.shape,"Error in huberLoss: ");const T=Fe(p),C=zn(Be(I,g)),D=_I(C,T),_=Be(C,D),A=vt(le(Fe(.5),gt(D)),le(T,_));return Dn(A,S,y)}const KO=V({huberLoss_:oQ});function aQ(r,l,u,p=1e-7,y=Qt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","logLoss"),I=M(l,"predictions","logLoss");let S=null;u!=null&&(S=M(u,"weights","logLoss")),tn(g.shape,I.shape,"Error in logLoss: ");const T=Fe(1),C=Fe(p),D=It(le(g,ao(vt(I,C)))),_=le(Be(T,g),ao(vt(Be(T,I),C))),A=Be(D,_);return Dn(A,S,y)}const XO=V({logLoss_:aQ});function cQ(r,l,u,p=Qt.SUM_BY_NONZERO_WEIGHTS){const y=M(r,"labels","meanSquaredError"),g=M(l,"predictions","meanSquaredError");let I=null;u!=null&&(I=M(u,"weights","meanSquaredError")),tn(y.shape,g.shape,"Error in meanSquaredError: ");const S=VI(y,g);return Dn(S,I,p)}const JO=V({meanSquaredError_:cQ});function lQ(r,l){const u=M(r,"labels","sigmoidCrossEntropyWithLogits"),p=M(l,"logits","sigmoidCrossEntropyWithLogits");tn(u.shape,p.shape,"Error in sigmoidCrossEntropyWithLogits: ");const y=Fu(p),g=le(p,u),I=EI(Gn(It(zn(p))));return vt(Be(y,g),I)}function hQ(r,l,u,p=0,y=Qt.SUM_BY_NONZERO_WEIGHTS){let g=M(r,"multiClassLabels","sigmoidCrossEntropy");const I=M(l,"logits","sigmoidCrossEntropy");let S=null;if(u!=null&&(S=M(u,"weights","sigmoidCrossEntropy")),tn(g.shape,I.shape,"Error in sigmoidCrossEntropy: "),p>0){const C=Fe(p),D=Fe(1),_=Fe(.5);g=vt(le(g,Be(D,C)),le(_,C))}const T=lQ(g,I);return Dn(T,S,y)}const ZO=V({sigmoidCrossEntropy_:hQ});function uQ(r,l,u=-1){if(u===-1&&(u=l.rank-1),u!==l.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${l.rank} and dim was ${u}`);const p=cg((y,g,I)=>{const S=!0,T=DI(g,[u],S),C=Be(Ie(g,"float32"),T);I([y,C]);const D=It(le(C,y)),_=_e(D,[u]),A=(B,ne)=>{const[te,P]=ne,ge=rs(B.shape,[u]);return[le(re(B,ge),Be(Ie(te,"float32"),Gn(P))),le(re(B,ge),Be(Gn(P),Ie(te,"float32")))]};return{value:_,gradFunc:A}});return p(r,l)}function dQ(r,l,u,p=0,y=Qt.SUM_BY_NONZERO_WEIGHTS){let g=M(r,"onehotLabels","softmaxCrossEntropy");const I=M(l,"logits","softmaxCrossEntropy");let S=null;if(u!=null&&(S=M(u,"weights","softmaxCrossEntropy")),tn(g.shape,I.shape,"Error in softmaxCrossEntropy: "),p>0){const C=Fe(p),D=Fe(1),_=Fe(g.shape[1]);g=vt(le(g,Be(D,C)),Me(C,_))}const T=uQ(g,I);return Dn(T,S,y)}const QO=V({softmaxCrossEntropy_:dQ});const xve={fft:_u,ifft:Pc,rfft:Wu,irfft:zI},Cve={hammingWindow:TO,hannWindow:mg,frame:fg,stft:AO},e1={flipLeftRight:NO,resizeNearestNeighbor:PO,resizeBilinear:MO,rotateWithOffset:CO,cropAndResize:vO,nonMaxSuppression:RO,nonMaxSuppressionAsync:_O,nonMaxSuppressionWithScore:WO,nonMaxSuppressionWithScoreAsync:$O,nonMaxSuppressionPadded:UO,nonMaxSuppressionPaddedAsync:BO},Vve={bandPart:zO,gramSchmidt:VO,qr:YO},Qve={absoluteDifference:HO,computeWeightedLoss:Dn,cosineDistance:qO,hingeLoss:jO,huberLoss:KO,logLoss:XO,meanSquaredError:JO,sigmoidCrossEntropy:ZO,softmaxCrossEntropy:QO};const t1=1.7580993408473768,n1=1.0507009873554805;const s1={kernelName:ef,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,ca(Ie(u,"float32"),-1))}}};const i1={kernelName:y2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=gt(Ie(u,"float32")),y=fs(Be(Fe(1),p));return It(Me(r,y))}}}};const r1={kernelName:b2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=fs(Be(gt(Ie(u,"float32")),1));return Me(r,p)}}}};const o1={kernelName:Dc,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{let S=r;const T=Ot(u.shape,y);return T.length>0&&(S=_e(S,T)),re(S,u.shape)},I=()=>{let S=r;const T=Ot(p.shape,y);return T.length>0&&(S=_e(S,T)),re(S,p.shape)};return{a:g,b:I}}};const a1={kernelName:w2,saveAllInputs:!0,gradFunc:(r,l)=>{const u={};return l.forEach((p,y)=>{u[y]=()=>r.clone()}),u}};const c1={kernelName:L2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Ke(u)}}};const l1={kernelName:S2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Ke(u)}}};const h1={kernelName:I2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,fs(Be(Fe(1),gt(Ie(u,"float32")))))}}};const u1={kernelName:x2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=fs(vt(Fe(1),gt(Ie(u,"float32"))));return Me(r,p)}}}};const d1={kernelName:v2,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{const S=vt(gt(u),gt(p));let T=le(r,Me(p,S));const C=Ot(u.shape,y);return C.length>0&&(T=_e(T,C)),re(T,u.shape)},I=()=>{const S=vt(gt(u),gt(p));let T=It(le(r,Me(u,S)));const C=Ot(p.shape,y);return C.length>0&&(T=_e(T,C)),re(T,p.shape)};return{a:g,b:I}}};const p1={kernelName:T2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,vt(gt(Ie(u,"float32")),1))}}};const m1={kernelName:A2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,Be(Fe(1),gt(Ie(u,"float32"))))}}};function pQ(r,l,u,p,y=[1,1,1],g,I){const S=M(r,"dy","avgPool3dBackprop"),T=M(l,"input","avgPool3dBackprop");let C=S,D=T,_=!1;T.rank===4&&(_=!0,C=re(S,[1,S.shape[0],S.shape[1],S.shape[2],S.shape[3]]),D=re(T,[1,T.shape[0],T.shape[1],T.shape[2],T.shape[3]])),J(C.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${C.rank}.`),J(D.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${D.rank}.`),J(oo(p,y),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${p} and dilations '${y}'`),I!=null&&J(nn(g),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${I} but got pad ${g}.`);const A=P=>{const ge=og(D.shape,u,p,y,g,I);return P.avgPool3dBackprop(C,D,ge)},B={dy:C,input:D},ne={filterSize:u,strides:p,dilations:y,pad:g,dimRoundingMode:I},te=H.runKernelFunc(A,B,null,O2,ne);return _?re(te,[te.shape[1],te.shape[2],te.shape[3],te.shape[4]]):te}const f1=V({avgPool3dBackprop_:pQ});const g1={kernelName:R2,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{filterSize:y,strides:g,dilations:I,pad:S,dimRoundingMode:T}=u,C=I==null?[1,1,1]:I;return{x:()=>f1(r,p,y,g,C,S,T)}}};function mQ(r,l,u,p,y){const g=M(r,"dy","avgPoolBackprop"),I=M(l,"input","avgPoolBackprop");J(I.rank===g.rank,()=>`Rank of input (${I.rank}) does not match rank of dy (${g.rank})`);let S=I,T=g,C=!1;I.rank===3&&(C=!0,S=re(I,[1,I.shape[0],I.shape[1],I.shape[2]]),T=re(g,[1,g.shape[0],g.shape[1],g.shape[2]])),J(T.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${T.rank}.`),J(S.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${S.rank}.`);const D=ne=>{const te=rg(S.shape,u,p,1,y);return ne.avgPoolBackprop(T,S,te)},_={dy:T,input:S},A={filterSize:u,strides:p,pad:y},B=H.runKernelFunc(D,_,null,C2,A);return C?re(B,[B.shape[1],B.shape[2],B.shape[3]]):B}const y1=V({avgPoolBackprop_:mQ});const b1={kernelName:N2,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{filterSize:y,strides:g,pad:I}=u;return{x:()=>y1(r,p,y,g,I)}}};const w1={kernelName:tf,inputsToSave:["a","b"],gradFunc:(r,l,u)=>{const[p,y]=l,{transposeA:g,transposeB:I}=u;return!g&&!I?{a:()=>yn(r,y,!1,!0),b:()=>yn(p,r,!0,!1)}:!g&&I?{a:()=>yn(r,y,!1,!1),b:()=>yn(r,p,!0,!1)}:g&&!I?{a:()=>yn(y,r,!1,!0),b:()=>yn(p,r,!1,!1)}:{a:()=>yn(y,r,!0,!0),b:()=>yn(r,p,!0,!0)}}};const L1={kernelName:nf,gradFunc:(r,l,u)=>{const{blockShape:p,crops:y}=u;return{x:()=>UI(r,p,y)}}};const S1={kernelName:sf,gradFunc:(r,l,u)=>{const p=u,y=p.inputShape,g=p.shape,I=Array.from(g);for(let T=y.length-1;T>=0;T--)if(y[T]===g[T])I[T]=1;else if(y[T]!==1)throw new Error(`broadcastTo(): [${y}] cannot be broadcast to [${g}].`);const S=[];for(let T=0;T1&&S.push(T);return{x:()=>_e(r,S,!0)}}};const I1={kernelName:kc,gradFunc:r=>({x:()=>r.clone()})};const x1={kernelName:E2,gradFunc:r=>({x:()=>Ke(r)})};const T1={kernelName:D2,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{clipValueMin:y,clipValueMax:g}=u;return{x:()=>Vn(oa(wr(p,y),Lr(p,g)),r,Ke(r))}}};const A1={kernelName:rf,saveAllInputs:!0,gradFunc:(r,l,u)=>{const p=l.map(T=>T.shape),{axis:y}=u,g=ft(y,l[0].shape)[0],I=p.map(T=>T[g]),S=lo(r,I,g);return S.map(T=>()=>T)}};const v1={kernelName:of,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const[p,y]=l,{dilations:g,strides:I,pad:S,dataFormat:T}=u;return J(ro(g),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${g}'`),{x:()=>gO(p.shape,r,y,I,S,T),filter:()=>pg(p,r,y.shape,I,S,T)}}};const N1={kernelName:af,inputsToSave:["dy","filter"],gradFunc:(r,l,u)=>{const[p,y]=l,{strides:g,pad:I,dataFormat:S,dimRoundingMode:T}=u;return{dy:()=>SI(r,y,g,I,S,1,T),filter:()=>pg(r,p,y.shape,g,I,S,T)}}};function fQ(r,l,u,p,y){let g=r;r.rank===4&&(g=re(r,[1,r.shape[0],r.shape[1],r.shape[2],r.shape[3]]));let I=l;I.rank===4&&(I=re(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]])),J(g.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${g.shape}.`),J(I.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${I.shape}.`),J(u.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${u}.`),J(g.shape[4]===u[3],()=>`Error in conv3dDerFilter: depth of input ${g.shape[4]}) must match input depth in filter (${u[3]}.`),J(I.shape[4]===u[4],()=>`Error in conv3dDerFilter: depth of dy (${I.shape[4]}) must match output depth for filter (${u[4]}).`);const S=D=>{const _=1,A=Eu(g.shape,u,p,_,y);return D.conv3dDerFilter(g,I,A)},T={x:g,dy:I},C={strides:p,pad:y,filterShape:u};return H.runKernelFunc(S,T,null,W2,C)}const C1=V({conv3DBackpropFilter_:fQ});const R1={kernelName:_2,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const{dilations:p,strides:y,pad:g}=u;J(ro(p),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${p}'`);const[I,S]=l;return{x:()=>yO(I.shape,r,S,y,g),filter:()=>C1(I,r,S.shape,y,g)}}};const O1={kernelName:cf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(It(MI(Ie(u,"float32"))),r)}}};const E1={kernelName:lf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(PI(Ie(u,"float32")),r)}}};const D1={kernelName:hf,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{axis:y,exclusive:g,reverse:I}=u;return{x:()=>{const S=zi([y],p.rank);let T=xI(r,y,g,!I);return S!=null&&(T=xn(T,S)),T}}}};const k1={kernelName:B2,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const{dilations:p,strides:y,pad:g,dimRoundingMode:I}=u,S=p==null?[1,1]:p;J(ro(S),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${S}'`);const[T,C]=l;return J(T.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${T.rank}.`),J(C.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${C.rank}.`),J(T.shape[3]===C.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${T.shape[3]}) must match the inChannels dimension in filter ${C.shape[2]}.`),J(oo(y,S),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${y} and dilations '${S}'.`),I!=null&&J(nn(g),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${I} but got pad ${g}.`),{x:()=>xO(T.shape,r,C,y,g,p,I),filter:()=>IO(T,r,C.shape,y,g,p,I)}}};const F1={kernelName:z2,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const[p,y]=l,g={x:p,filter:y,dy:r},I={x:p,filter:y,dy:r};return{x:()=>H.runKernel(V2,g,u),filter:()=>H.runKernel(G2,I,u)}}};const _1={kernelName:uf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{const S=Me(r,Ie(p,"float32")),T=Ot(u.shape,y);return T.length>0?re(_e(S,T),u.shape):S},I=()=>{let S=le(r,Ie(u,"float32"));const T=Ot(p.shape,y);T.length>0&&(S=re(_e(S,T),p.shape));const C=gt(p);return It(Me(S,Ie(C,"float32")))};return{a:g,b:I}}};const W1={kernelName:Y2,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l,p=g=>g.eluDer(r,u),y={dy:r,y:u};return{x:()=>H.runKernelFunc(p,y,null,H2)}}};const $1={kernelName:q2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l,p=le(Gn(It(gt(u))),2/Math.sqrt(Math.PI));return{x:()=>le(r,p)}}};const U1={kernelName:df,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,u)}}};const B1={kernelName:K2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,Gn(u))}}};const M1={kernelName:pf,gradFunc:r=>({x:()=>Ke(r)})};const P1={kernelName:mf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{const S=Me(r,Ie(p,"float32")),T=Ot(u.shape,y);return T.length>0?re(_e(S,T),u.shape):S},I=()=>{let S=le(r,Ie(u,"float32"));const T=Ot(p.shape,y);T.length>0&&(S=re(_e(S,T),p.shape));const C=gt(p);return It(Me(S,Ie(C,"float32")))};return{a:g,b:I}}};const z1={kernelName:Q2,inputsToSave:["x","mean","variance","scale"],gradFunc:(r,l,u)=>{const{varianceEpsilon:p}=u,[y,g,I,S]=l,T=S==null?Fe(1):S,C=Ot(g.shape,y.shape),D=[];if(g.rank===1){for(let ve=0;veg.rank===1?re(le(le(r,ia(re(B,[1,1,1,g.shape[0]]),D)),T),y.shape):re(le(le(r,B),T),y.shape),P=()=>{let ve=le(le(B,Fe(-1)),A);return g.rank===1&&(ve=_e(ve,C)),re(ve,g.shape)},ge=()=>{let ve=le(le(ne,_),A);return g.rank===1&&(ve=_e(ve,C)),re(ve,g.shape)},ae=()=>{const ve=le(_,B);let Ve=le(r,ve);return g.rank===1&&(Ve=_e(Ve,C)),re(Ve,g.shape)},Le=()=>{let ve=r;return g.rank===1&&(ve=_e(ve,C)),re(ve,g.shape)};return{x:te,mean:P,variance:ge,scale:ae,offset:Le}}};const Y1={kernelName:ff,inputsToSave:["x","indices"],gradFunc:(r,l,u)=>{const[p,y]=l,{axis:g}=u,I=ft(g,p.shape)[0],S=()=>{const T=p.shape,C=y.size,D=T.slice(0,I),_=D.length,A=T.slice(g,T.length).slice(1),B=A.length,ne=V1(0,_),te=V1(_+1,_+1+B),P=G1([D,[C],A]),ge=re(r,P),ae=re(y,[C]),Le=G1([[_],ne,te]),ve=xn(ge,Le);let Ve=YI(ve,ae,p.shape[I]);const at=Wc(Le);return Ve=xn(Ve,at),Ve};return{x:S,indices:()=>y}}};function V1(r,l){const u=[];for(let p=r;p{const[u,p]=l;return{a:()=>Ke(u),b:()=>Ke(p)}}};const q1={kernelName:yf,gradFunc:r=>({x:()=>Ie(r,"float32")})};const j1={kernelName:sR,gradFunc:r=>({x:()=>Ke(r)})};const K1={kernelName:iR,gradFunc:r=>({x:()=>Ke(r)})};const X1={kernelName:rR,gradFunc:r=>({x:()=>Ke(r)})};const J1={kernelName:wf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,vt(u,1))}}};const Z1={kernelName:bf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,Ie(u,"float32"))}}};const Q1={kernelName:hR,inputsToSave:[],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p]=l,{axis:y}=u;return{logits:()=>{const g=!0,I=Gn(p);return Be(r,le(_e(r,y,g),I))}}}};function gQ(r,l,u,p=5,y=1,g=1,I=.5){const S=D=>D.LRNGrad(u,r,l,p,y,g,I),T={x:r,y:l,dy:u},C={depthRadius:p,bias:y,alpha:g,beta:I};return H.runKernelFunc(S,T,null,dR,C)}const eE=V({localResponseNormalizationBackprop_:gQ});const tE={kernelName:uR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p,y]=l,{depthRadius:g,bias:I,alpha:S,beta:T}=u;return{x:()=>eE(p,y,r,g,I,S,T)}}};function gg(r,l,u,p){return l.rank{const y=le(r,Ie(TI(u,l),r.dtype));return y}}}const qI={kernelName:Lf,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const p=u,{reductionIndices:y}=p,g=l[0],I=l[1],S=ft(y,g.shape),T=gg(r,I,g,S);return{x:()=>T.x()}}};const nE={kernelName:Sf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=()=>le(r,Ie(wr(u,p),"float32")),g=()=>le(r,Ie(OI(u,p),"float32"));return{a:y,b:g}}};function yQ(r,l,u,p,y,g=[1,1,1],I,S){const T=M(r,"dy","maxPool3dBackprop"),C=M(l,"input","maxPool3dBackprop"),D=M(u,"output","maxPool3dBackprop");let _=T,A=C,B=D,ne=!1;C.rank===4&&(ne=!0,_=re(T,[1,T.shape[0],T.shape[1],T.shape[2],T.shape[3]]),A=re(C,[1,C.shape[0],C.shape[1],C.shape[2],C.shape[3]]),B=re(D,[1,D.shape[0],D.shape[1],D.shape[2],D.shape[3]])),J(_.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${_.rank}.`),J(A.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${A.rank}.`),J(B.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${B.rank}.`),J(oo(y,g),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${y} and dilations '${g}'`),S!=null&&J(nn(I),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${S} but got pad ${I}.`);const te=Le=>{const ve=og(A.shape,p,y,g,I,S);return Le.maxPool3dBackprop(_,A,B,ve)},P={dy:_,input:A,output:B},ge={filterSize:p,strides:y,dilations:g,pad:I,dimRoundingMode:S},ae=H.runKernelFunc(te,P,null,gR,ge);return ne?re(ae,[ae.shape[1],ae.shape[2],ae.shape[3],ae.shape[4]]):ae}const sE=V({maxPool3dBackprop_:yQ});const iE={kernelName:fR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p,y]=l,{filterSize:g,strides:I,dilations:S,pad:T,dimRoundingMode:C}=u,D=S==null?[1,1,1]:S;return{x:()=>sE(r,p,y,g,I,D,T,C)}}};function bQ(r,l,u,p,y,g,I){const S=M(r,"dy","maxPoolBackprop"),T=M(l,"input","maxPoolBackprop"),C=M(u,"output","maxPoolBackprop");J(T.rank===S.rank,()=>`Rank of input (${T.rank}) does not match rank of dy (${S.rank})`),J(S.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${S.rank}.`),J(T.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${T.rank}.`),I!=null&&J(nn(g),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${I} but got pad ${g}.`);const D=B=>{const ne=rg(T.shape,p,y,1,g,I);return B.maxPoolBackprop(S,T,C,ne)},_={dy:S,input:T,output:C},A={filterSize:p,strides:y,pad:g,dimRoundingMode:I};return H.runKernelFunc(D,_,null,mR,A)}const rE=V({maxPoolBackprop_:bQ});const oE={kernelName:pR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p,y]=l,{filterSize:g,strides:I,pad:S}=u;return{x:()=>rE(r,p,y,g,I,S)}}};const aE={kernelName:If,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const p=u,{axis:y}=p,[g,I]=l,S=ft(y,g.shape),T=gg(r,I,g,S);return{x:()=>T.x()}}};const cE={kernelName:xf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=()=>le(r,Ie(Lr(u,p),"float32")),g=()=>le(r,Ie(fi(u,p),"float32"));return{a:y,b:g}}};const lE={kernelName:bR,inputsToSave:["x"],gradFunc:(r,l,u)=>{const p=l[0],{paddings:y}=u,g=y.map(I=>I[0]);return{x:()=>Tt(r,g,p.shape)}}};const hE={kernelName:wR,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{const S=Ot(u.shape,y);return S.length>0?re(_e(r,S),u.shape):r},I=()=>{const S=le(r,It(NI(Me(u,p)))),T=Ot(p.shape,y);return T.length>0?re(_e(S,T),p.shape):S};return{a:g,b:I}}};const uE={kernelName:Tf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{const S=le(r,Ie(p,"float32")),T=Ot(u.shape,y);return T.length>0?re(_e(S,T),u.shape):S},I=()=>{const S=le(r,Ie(u,"float32")),T=Ot(p.shape,y);return T.length>0?re(_e(S,T),p.shape):S};return{a:g,b:I}}};const dE={kernelName:Af,gradFunc:r=>({x:()=>It(r)})};const pE={kernelName:AR,inputsToSave:["indices"],gradFunc:(r,l)=>{const u=l[0];return{indices:()=>ks(u.shape,"float32")}}};const mE={kernelName:TR,gradFunc:r=>({x:()=>Ke(r)})};const jI={kernelName:vf,inputsToSave:["x"],gradFunc:(r,l,u)=>{const p=l[0],{paddings:y}=u,g=y.map(I=>I[0]);return{x:()=>Tt(r,g,p.shape)}}};const fE={kernelName:Nf,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(r,l)=>{const[u,p,y]=l,g=u,I=p,S=ot(g.shape,I.shape),T=()=>{const D=Ie(I,"float32");let _=le(r,le(D,aa(g,Be(D,Fe(1)))));const A=Ot(g.shape,S);return A.length>0&&(_=_e(_,A)),re(_,g.shape)},C=()=>{const D=fi(g,0),_=Vn(D,ao(g),Ke(g));let A=le(r,le(y,_));const B=Ot(I.shape,S);return B.length>0&&(A=_e(A,B)),re(A,I.shape)};return{a:T,b:C}}};const gE={kernelName:vR,inputsToSave:["x","alpha"],gradFunc:(r,l)=>{const[u,p]=l,y=fi(u,0);return{x:()=>Vn(y,r,le(r,p)),alpha:()=>{let g=Vn(y,Ke(r),le(r,u));const I=Ot(p.shape,r.shape);return I.length>0&&(g=_e(g,I)),re(g,p.shape)}}}};const yE={kernelName:RR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,It(gt(u)))}}};const bE={kernelName:DR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l,p=le(Lr(u,6),ca(u));return{x:()=>le(r,Ie(p,"float32"))}}};const wE={kernelName:Cf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,Ie(ca(u),"float32"))}}};const LE={kernelName:Rf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>re(r,u.shape)}}};const SE={kernelName:Ef,inputsToSave:["images"],gradFunc:(r,l,u)=>{const[p]=l,y=S=>{const{alignCorners:T}=u;return S.resizeBilinearBackprop(r,p,T)},g={images:p},I=()=>H.runKernelFunc(y,g,null,ER,u);return{images:I}}};const IE={kernelName:Of,inputsToSave:["images"],gradFunc:(r,l,u)=>{const[p]=l,y=S=>{const{alignCorners:T}=u;return S.resizeNearestNeighborBackprop(r,p,T)},g={images:p},I=()=>H.runKernelFunc(y,g,null,OR,u);return{images:I}}};const xE={kernelName:Df,gradFunc:(r,l,u)=>{const{dims:p}=u,y=ft(p,r.shape);return{x:()=>Mc(r,y)}}};const TE={kernelName:kR,gradFunc:r=>({x:()=>Ke(r)})};const AE={kernelName:kf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>It(Me(r,le(aa(u,1.5),2)))}}};const vE={kernelName:Ff,inputsToSave:["condition"],gradFunc:(r,l)=>{const[u]=l;return{condition:()=>Ie(Ke(u),"float32"),t:()=>le(r,Ie(u,r.dtype)),e:()=>le(r,Ie(kI(u),r.dtype))}}};const NE={kernelName:FR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=fi(u,Fe(0)),y=Fe(t1),g=Fe(n1),I=le(r,g),S=le(le(r,y),Gn(Ie(u,"float32")));return Vn(p,I,S)}}}};const CE={kernelName:Uf,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,le(u,Be(Fe(1),u)))}}};const RE={kernelName:_R,gradFunc:r=>({x:()=>Ke(r)})};const OE={kernelName:Wf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(Du(Ie(u,"float32")),r)}}};const EE={kernelName:$f,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(II(Ie(u,"float32")),r)}}};const DE={kernelName:_f,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{begin:y,size:g}=u,I=p.shape,[S,T]=sg(p,y,g),C=[];for(let D=0;D$I(r,C)}}};const kE={kernelName:$R,outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p]=l,{dim:y}=u,g=!0,I=le(r,p);return{logits:()=>Be(I,le(_e(I,[y],g),p))}}};const FE={kernelName:WR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,wI(u))}}};const KI={kernelName:Pf,gradFunc:(r,l,u)=>{const{blockShape:p,paddings:y}=u;return{x:()=>LI(r,p,y)}}};const XI={kernelName:zf,gradFunc:(r,l,u)=>{const{axis:p}=u;return{x:()=>Tn(r,p)}}};const _E={kernelName:Bf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,le(fs(Ie(u,"float32")),2))}}};const WE={kernelName:UR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,le(Ie(u,"float32"),2))}}};const $E={kernelName:Vf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=Fe(2),g=()=>le(r,le(y,Be(u,p))),I=()=>le(r,le(y,Be(p,u)));return{a:g,b:I}}};const UE={kernelName:Xf,gradFunc:r=>({x:()=>Ke(r)})};const BE={kernelName:Gf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{let S=r;const T=Ot(u.shape,y);return T.length>0&&(S=_e(S,T)),re(S,u.shape)},I=()=>{let S=r;const T=Ot(p.shape,y);return T.length>0&&(S=_e(S,T)),re(It(S),p.shape)};return{a:g,b:I}}};const ME={kernelName:Mf,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,y=p.shape.slice(),{axis:g}=u,I=ft(g,p.shape);I.forEach(C=>{y[C]=1});const S=re(r,y),T=le(S,Vi(p.shape,"float32"));return{x:()=>T}}};const PE={kernelName:BR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,gt(Du(u)))}}};const zE={kernelName:MR,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(Be(Fe(1),gt(u)),r)}}};const VE={kernelName:Yf,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{reps:y}=u,g=()=>{let I=Ke(p);if(p.rank===1)for(let S=0;S{const p=u,{perm:y}=p,g=Wc(y);return{x:()=>xn(r,g)}}};const YE={kernelName:qf,gradFunc:(r,l,u)=>{const p=u,{axis:y}=p;return{value:()=>Ys(r,y)}}};const HE={kernelName:jf,inputsToSave:["segmentIds"],gradFunc:(r,l)=>{const[u]=l,p=()=>wQ(r,u);return{x:p}}};function wQ(r,l){const u=RI(l,Ke(l)),p=CI(r,u);let y=wr(l,Fe(0,"int32"));const g=p.rank-y.rank;for(let S=0;S({x:()=>Ke(r)})};const LQ=[s1,i1,r1,o1,a1,c1,l1,h1,u1,d1,p1,m1,g1,b1,w1,L1,S1,I1,x1,T1,A1,N1,v1,R1,O1,E1,D1,k1,F1,_1,W1,$1,U1,B1,P1,M1,z1,Y1,H1,q1,j1,K1,X1,J1,Z1,Q1,tE,qI,qI,nE,iE,oE,aE,cE,lE,hE,uE,dE,pE,mE,jI,jI,fE,gE,yE,bE,wE,LE,SE,IE,xE,TE,AE,vE,NE,CE,RE,OE,EE,DE,kE,FE,KI,KI,XI,XI,_E,$E,WE,UE,BE,ME,PE,zE,VE,GE,YE,HE,qE];for(const r of LQ)VR(r);function JI(r,l,u=!1){const{Image:p,Canvas:y}=St.getEnv();if(!(r instanceof p||r instanceof y))throw new Error("imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement");const g=ea(r),I=l/Math.max(g.height,g.width),S=I*g.width,T=I*g.height,C=Rc({width:l,height:l}),D=r instanceof y?r:Su(r),_=Math.abs(S-T)/2,A=u&&S{if(gr(u)){this._imageTensors[p]=u,this._inputDimensions[p]=u.shape;return}if(Rs(u)){const g=u.shape[0];if(g!==1)throw new Error(`NetInput - tf.Tensor4D with batchSize ${g} passed, but not supported in input array`);this._imageTensors[p]=u,this._inputDimensions[p]=u.shape.slice(1);return}const y=u instanceof St.getEnv().Canvas?u:Su(u);this._canvases[p]=y,this._inputDimensions[p]=[y.height,y.width,3]})}get imageTensors(){return this._imageTensors}get canvases(){return this._canvases}get isBatchInput(){return this.batchSize>1||this._treatAsBatchInput}get batchSize(){return this._batchSize}get inputDimensions(){return this._inputDimensions}get inputSize(){return this._inputSize}get reshapedInputDimensions(){return _i(this.batchSize,0,1).map((r,l)=>this.getReshapedInputDimensions(l))}getInput(r){return this.canvases[r]||this.imageTensors[r]}getInputDimensions(r){return this._inputDimensions[r]}getInputHeight(r){return this._inputDimensions[r][0]}getInputWidth(r){return this._inputDimensions[r][1]}getReshapedInputDimensions(r){if(typeof this.inputSize!="number")throw new Error("getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet");const l=this.getInputWidth(r),u=this.getInputHeight(r);return _S({width:l,height:u},this.inputSize)}toBatchTensor(r,l=!0){return this._inputSize=r,hO(()=>{const u=_i(this.batchSize,0,1).map(y=>{const g=this.getInput(y);if(g instanceof En){let I=Rs(g)?g:g.expandDims();return I=BS(I,l),(I.shape[1]!==r||I.shape[2]!==r)&&(I=e1.resizeBilinear(I,[r,r])),I.as3D(r,r,3)}if(g instanceof St.getEnv().Canvas)return gI.fromPixels(JI(g,r,l));throw new Error(`toBatchTensor - at batchIdx ${y}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${g}`)}),p=Ys(u.map(y=>Ie(y,"float32"))).as4D(this.batchSize,r,r,3);return p})}}async function Wt(r){if(r instanceof ho)return r;let l=Array.isArray(r)?r:[r];if(!l.length)throw new Error("toNetInput - empty array passed as input");const u=y=>Array.isArray(r)?` at input index ${y}:`:"",p=l.map(Qo);return p.forEach((y,g)=>{if(!Xm(y)&&!gr(y)&&!Rs(y))throw typeof l[g]=="string"?new Error(`toNetInput -${u(g)} string passed, but could not resolve HTMLElement for element id ${l[g]}`):new Error(`toNetInput -${u(g)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`);if(Rs(y)){const I=y.shape[0];if(I!==1)throw new Error(`toNetInput -${u(g)} tf.Tensor4D with batchSize ${I} passed, but not supported in input array`)}}),await Promise.all(p.map(y=>Xm(y)&&qS(y))),new ho(p,Array.isArray(r))}async function zc(r,l){const{Canvas:u}=St.getEnv();let p=r;if(!(r instanceof u)){const I=await Wt(r);if(I.batchSize>1)throw new Error("extractFaces - batchSize > 1 not supported");const S=I.getInput(0);p=S instanceof u?S:await KS(S)}const y=is(p),g=l.map(I=>I instanceof Jt?I.forSize(p.width,p.height).box.floor():I).map(I=>I.clipAtImageBorders(p.width,p.height));return g.map(({x:I,y:S,width:T,height:C})=>{const D=Rc({width:T,height:C});return is(D).putImageData(y.getImageData(I,S,T,C),0,0),D})}const yg=Je(Ze());async function Vc(r,l){if(!gr(r)&&!Rs(r))throw new Error("extractFaceTensors - expected image tensor to be 3D or 4D");if(Rs(r)&&r.shape[0]>1)throw new Error("extractFaceTensors - batchSize > 1 not supported");return yg.tidy(()=>{const[u,p,y]=r.shape.slice(Rs(r)?1:0),g=l.map(S=>S instanceof Jt?S.forSize(p,u).box:S).map(S=>S.clipAtImageBorders(p,u)),I=g.map(({x:S,y:T,width:C,height:D})=>yg.slice3d(r.as3D(u,p,y),[T,S,0],[D,C,y]));return I})}async function ha(r,l){const u=St.getEnv().fetch,p=await u(r,l);if(!(p.status<400))throw new Error(`failed to fetch: (${p.status}) ${p.statusText}, from url: ${p.url}`);return p}async function SQ(r){const l=await ha(r),u=await l.blob();if(!u.type.startsWith("image/"))throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${u.type}, for url: ${l.url}`);return jS(u)}async function ZI(r){return(await ha(r)).json()}async function IQ(r){return new Float32Array(await(await ha(r)).arrayBuffer())}function bg(r,l){const u=`${l}-weights_manifest.json`;if(!r)return{modelBaseUri:"",manifestUri:u};if(r==="/")return{modelBaseUri:"/",manifestUri:`/${u}`};const p=r.startsWith("http://")?"http://":r.startsWith("https://")?"https://":"";r=r.replace(p,"");const y=r.split("/").filter(S=>S),g=r.endsWith(".json")?y[y.length-1]:u;let I=p+(r.endsWith(".json")?y.slice(0,y.length-1):y).join("/");return I=r.startsWith("/")?`/${I}`:I,{modelBaseUri:I,manifestUri:I==="/"?`/${g}`:`${I}/${g}`}}const jE=Je(Ze());async function QI(r,l){const{manifestUri:u,modelBaseUri:p}=bg(r,l);let y=await ZI(u);return jE.io.loadWeights(y,p)}function xQ(r,l,u=!1){const{width:p,height:y}=u?ea(l):l;return r.width=p,r.height=y,{width:p,height:y}}const Sr=Je(Ze());class Wn{constructor(r){this._name=r;this._params=void 0;this._paramMappings=[]}get params(){return this._params}get paramMappings(){return this._paramMappings}get isLoaded(){return!!this.params}getParamFromPath(r){const{obj:l,objProp:u}=this.traversePropertyPath(r);return l[u]}reassignParamFromPath(r,l){const{obj:u,objProp:p}=this.traversePropertyPath(r);u[p].dispose(),u[p]=l}getParamList(){return this._paramMappings.map(({paramPath:r})=>({path:r,tensor:this.getParamFromPath(r)}))}getTrainableParams(){return this.getParamList().filter(r=>r.tensor instanceof Sr.Variable)}getFrozenParams(){return this.getParamList().filter(r=>!(r.tensor instanceof Sr.Variable))}variable(){this.getFrozenParams().forEach(({path:r,tensor:l})=>{this.reassignParamFromPath(r,l.variable())})}freeze(){this.getTrainableParams().forEach(({path:r,tensor:l})=>{const u=Sr.tensor(l.dataSync());l.dispose(),this.reassignParamFromPath(r,u)})}dispose(r=!0){this.getParamList().forEach(l=>{if(r&&l.tensor.isDisposed)throw new Error(`param tensor has already been disposed for path ${l.path}`);l.tensor.dispose()}),this._params=void 0}serializeParams(){return new Float32Array(this.getParamList().map(({tensor:r})=>Array.from(r.dataSync())).reduce((r,l)=>r.concat(l)))}async load(r){if(r instanceof Float32Array){this.extractWeights(r);return}await this.loadFromUri(r)}async loadFromUri(r){if(r&&typeof r!="string")throw new Error(`${this._name}.loadFromUri - expected model uri`);const l=await QI(r,this.getDefaultModelName());this.loadFromWeightMap(l)}async loadFromDisk(r){if(r&&typeof r!="string")throw new Error(`${this._name}.loadFromDisk - expected model file path`);const{readFile:l}=St.getEnv(),{manifestUri:u,modelBaseUri:p}=bg(r,this.getDefaultModelName()),y=T=>Promise.all(T.map(C=>l(C).then(D=>D.buffer))),g=Sr.io.weightsLoaderFactory(y),I=JSON.parse((await l(u)).toString()),S=await g(I,p);this.loadFromWeightMap(S)}loadFromWeightMap(r){const{paramMappings:l,params:u}=this.extractParamsFromWeigthMap(r);this._paramMappings=l,this._params=u}extractWeights(r){const{paramMappings:l,params:u}=this.extractParams(r);this._paramMappings=l,this._params=u}traversePropertyPath(r){if(!this.params)throw new Error("traversePropertyPath - model has no loaded params");const l=r.split("/").reduce((y,g)=>{if(!y.nextObj.hasOwnProperty(g))throw new Error(`traversePropertyPath - object does not have property ${g}, for path ${r}`);return{obj:y.nextObj,objProp:g,nextObj:y.nextObj[g]}},{nextObj:this.params}),{obj:u,objProp:p}=l;if(!u||!p||!(u[p]instanceof Sr.Tensor))throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${r}`);return{obj:u,objProp:p}}}const Gc=Je(Ze());function os(r,l,u){return Gc.tidy(()=>{let p=Gc.separableConv2d(r,l.depthwise_filter,l.pointwise_filter,u,"same");return p=Gc.add(p,l.bias),p})}const Bt=Je(Ze());function wg(r,l,u=!1){return Bt.tidy(()=>{const p=Bt.relu(u?Bt.add(Bt.conv2d(r,l.conv0.filters,[2,2],"same"),l.conv0.bias):os(r,l.conv0,[2,2])),y=os(p,l.conv1,[1,1]),g=Bt.relu(Bt.add(p,y)),I=os(g,l.conv2,[1,1]);return Bt.relu(Bt.add(p,Bt.add(y,I)))})}function Uu(r,l,u=!1,p=!0){return Bt.tidy(()=>{const y=Bt.relu(u?Bt.add(Bt.conv2d(r,l.conv0.filters,p?[2,2]:[1,1],"same"),l.conv0.bias):os(r,l.conv0,p?[2,2]:[1,1])),g=os(y,l.conv1,[1,1]),I=Bt.relu(Bt.add(y,g)),S=os(I,l.conv2,[1,1]),T=Bt.relu(Bt.add(y,Bt.add(g,S))),C=os(T,l.conv3,[1,1]);return Bt.relu(Bt.add(y,Bt.add(g,Bt.add(S,C))))})}const uo=Je(Ze());function ua(r,l,u="same",p=!1){return uo.tidy(()=>{const y=uo.add(uo.conv2d(r,l.filters,[1,1],u),l.bias);return p?uo.relu(y):y})}function Yn(r,l){Object.keys(r).forEach(u=>{l.some(p=>p.originalPath===u)||r[u].dispose()})}const Lg=Je(Ze());function Yc(r,l){return function(u,p,y,g){const I=Lg.tensor4d(r(u*p*y*y),[y,y,u,p]),S=Lg.tensor1d(r(p));return l.push({paramPath:`${g}/filters`},{paramPath:`${g}/bias`}),{filters:I,bias:S}}}const Sg=Je(Ze());function Ig(r,l){return function(u,p,y){const g=Sg.tensor2d(r(u*p),[u,p]),I=Sg.tensor1d(r(p));return l.push({paramPath:`${y}/weights`},{paramPath:`${y}/bias`}),{weights:g,bias:I}}}class ex{constructor(r,l,u){this.depthwise_filter=r;this.pointwise_filter=l;this.bias=u}}const Bu=Je(Ze());function Hc(r,l){return function(u,p,y){const g=Bu.tensor4d(r(3*3*u),[3,3,u,1]),I=Bu.tensor4d(r(u*p),[1,1,u,p]),S=Bu.tensor1d(r(p));return l.push({paramPath:`${y}/depthwise_filter`},{paramPath:`${y}/pointwise_filter`},{paramPath:`${y}/bias`}),new ex(g,I,S)}}function qc(r){return function(l){const u=r(`${l}/depthwise_filter`,4),p=r(`${l}/pointwise_filter`,4),y=r(`${l}/bias`,1);return new ex(u,p,y)}}function gs(r,l){return function(u,p,y){const g=r[u];if(!jo(g,p))throw new Error(`expected weightMap[${u}] to be a Tensor${p}D, instead have ${g}`);return l.push({originalPath:u,paramPath:y||u}),g}}function Hn(r){let l=r;function u(y){const g=l.slice(0,y);return l=l.slice(y),g}function p(){return l}return{extractWeights:u,getRemainingWeights:p}}function xg(r,l){const u=Yc(r,l),p=Hc(r,l);function y(I,S,T,C=!1){const D=C?u(I,S,3,`${T}/conv0`):p(I,S,`${T}/conv0`),_=p(S,S,`${T}/conv1`),A=p(S,S,`${T}/conv2`);return{conv0:D,conv1:_,conv2:A}}function g(I,S,T,C=!1){const{conv0:D,conv1:_,conv2:A}=y(I,S,T,C),B=p(S,S,`${T}/conv3`);return{conv0:D,conv1:_,conv2:A,conv3:B}}return{extractDenseBlock3Params:y,extractDenseBlock4Params:g}}function KE(r){const l=[],{extractWeights:u,getRemainingWeights:p}=Hn(r),{extractDenseBlock4Params:y}=xg(u,l),g=y(3,32,"dense0",!0),I=y(32,64,"dense1"),S=y(64,128,"dense2"),T=y(128,256,"dense3");if(p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{paramMappings:l,params:{dense0:g,dense1:I,dense2:S,dense3:T}}}function Tg(r){return function(l){const u=r(`${l}/filters`,4),p=r(`${l}/bias`,1);return{filters:u,bias:p}}}function Ag(r,l){const u=gs(r,l),p=Tg(u),y=qc(u);function g(S,T=!1){const C=T?p(`${S}/conv0`):y(`${S}/conv0`),D=y(`${S}/conv1`),_=y(`${S}/conv2`);return{conv0:C,conv1:D,conv2:_}}function I(S,T=!1){const C=T?p(`${S}/conv0`):y(`${S}/conv0`),D=y(`${S}/conv1`),_=y(`${S}/conv2`),A=y(`${S}/conv3`);return{conv0:C,conv1:D,conv2:_,conv3:A}}return{extractDenseBlock3Params:g,extractDenseBlock4Params:I}}function XE(r){const l=[],{extractDenseBlock4Params:u}=Ag(r,l),p={dense0:u("dense0",!0),dense1:u("dense1"),dense2:u("dense2"),dense3:u("dense3")};return Yn(r,l),{params:p,paramMappings:l}}const po=Je(Ze());class vg extends Wn{constructor(){super("FaceFeatureExtractor")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("FaceFeatureExtractor - load model before inference");return po.tidy(()=>{const u=po.cast(r.toBatchTensor(112,!0),"float32"),p=[122.782,117.001,104.298],y=di(u,p).div(po.scalar(255));let g=Uu(y,l.dense0,!0);return g=Uu(g,l.dense1),g=Uu(g,l.dense2),g=Uu(g,l.dense3),g=po.avgPool(g,[7,7],[2,2],"valid"),g})}async forward(r){return this.forwardInput(await Wt(r))}getDefaultModelName(){return"face_feature_extractor_model"}extractParamsFromWeigthMap(r){return XE(r)}extractParams(r){return KE(r)}}const jc=Je(Ze());function Mu(r,l){return jc.tidy(()=>jc.add(jc.matMul(r,l.weights),l.bias))}function JE(r,l,u){const p=[],{extractWeights:y,getRemainingWeights:g}=Hn(r),I=Ig(y,p),S=I(l,u,"fc");if(g().length!==0)throw new Error(`weights remaing after extract: ${g().length}`);return{paramMappings:p,params:{fc:S}}}function ZE(r){const l=[],u=gs(r,l);function p(g){const I=u(`${g}/weights`,2),S=u(`${g}/bias`,1);return{weights:I,bias:S}}const y={fc:p("fc")};return Yn(r,l),{params:y,paramMappings:l}}function Ng(r){const l={},u={};return Object.keys(r).forEach(p=>{const y=p.startsWith("fc")?u:l;y[p]=r[p]}),{featureExtractorMap:l,classifierMap:u}}const QE=Je(Ze());class Cg extends Wn{constructor(r,l){super(r);this._faceFeatureExtractor=l}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(r){const{params:l}=this;if(!l)throw new Error(`${this._name} - load model before inference`);return QE.tidy(()=>{const u=r instanceof ho?this.faceFeatureExtractor.forwardInput(r):r;return Mu(u.as2D(u.shape[0],-1),l.fc)})}dispose(r=!0){this.faceFeatureExtractor.dispose(r),super.dispose(r)}loadClassifierParams(r){const{params:l,paramMappings:u}=this.extractClassifierParams(r);this._params=l,this._paramMappings=u}extractClassifierParams(r){return JE(r,this.getClassifierChannelsIn(),this.getClassifierChannelsOut())}extractParamsFromWeigthMap(r){const{featureExtractorMap:l,classifierMap:u}=Ng(r);return this.faceFeatureExtractor.loadFromWeightMap(l),ZE(u)}extractParams(r){const l=this.getClassifierChannelsIn(),u=this.getClassifierChannelsOut(),p=u*l+u,y=r.slice(0,r.length-p),g=r.slice(r.length-p);return this.faceFeatureExtractor.extractWeights(y),this.extractClassifierParams(g)}}const tx=["neutral","happy","sad","angry","fearful","disgusted","surprised"];class da{constructor(r){if(r.length!==7)throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${r.length}`);tx.forEach((l,u)=>{this[l]=r[u]})}asSortedArray(){return tx.map(r=>({expression:r,probability:this[r]})).sort((r,l)=>l.probability-r.probability)}}const Kc=Je(Ze());class nx extends Cg{constructor(r=new vg){super("FaceExpressionNet",r)}forwardInput(r){return Kc.tidy(()=>Kc.softmax(this.runNet(r)))}async forward(r){return this.forwardInput(await Wt(r))}async predictExpressions(r){const l=await Wt(r),u=await this.forwardInput(l),p=await Promise.all(Kc.unstack(u).map(async g=>{const I=await g.data();return g.dispose(),I}));u.dispose();const y=p.map(g=>new da(g));return l.isBatchInput?y:y[0]}getDefaultModelName(){return"face_expression_model"}getClassifierChannelsIn(){return 256}getClassifierChannelsOut(){return 7}}function sx(r){return r.expressions instanceof da}function Rg(r,l){const u={expressions:l};return Object.assign({},r,u)}function TQ(r,l,u=.1,p){const y=Array.isArray(l)?l:[l];y.forEach(g=>{const I=g instanceof da?g:sx(g)?g.expressions:void 0;if(!I)throw new Error("drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof");const S=I.asSortedArray(),T=S.filter(_=>_.probability>u),C=$i(g)?g.detection.box.bottomLeft:p||new Qe(0,0),D=new Cc(T.map(_=>`${_.expression} (${Ko(_.probability)})`),C);D.draw(r)})}function pa(r){return $i(r)&&r.landmarks instanceof Gs&&r.unshiftedLandmarks instanceof Gs&&r.alignedRect instanceof Jt}function Xc(r,l){const{box:u}=r.detection,p=l.shiftBy(u.x,u.y),y=p.align(),{imageDims:g}=r.detection,I=new Jt(r.detection.score,y.rescale(g.reverse()),g),S={landmarks:p,unshiftedLandmarks:l,alignedRect:I};return Object.assign({},r,S)}class eD{constructor(r={}){const{drawLines:l=!0,drawPoints:u=!0,lineWidth:p,lineColor:y,pointSize:g,pointColor:I}=r;this.drawLines=l,this.drawPoints=u,this.lineWidth=p||1,this.pointSize=g||2,this.lineColor=y||"rgba(0, 255, 255, 1)",this.pointColor=I||"rgba(255, 0, 255, 1)"}}class tD{constructor(r,l={}){this.faceLandmarks=r,this.options=new eD(l)}draw(r){const l=is(r),{drawLines:u,drawPoints:p,lineWidth:y,lineColor:g,pointSize:I,pointColor:S}=this.options;if(u&&this.faceLandmarks instanceof wu&&(l.strokeStyle=g,l.lineWidth=y,fr(l,this.faceLandmarks.getJawOutline()),fr(l,this.faceLandmarks.getLeftEyeBrow()),fr(l,this.faceLandmarks.getRightEyeBrow()),fr(l,this.faceLandmarks.getNose()),fr(l,this.faceLandmarks.getLeftEye(),!0),fr(l,this.faceLandmarks.getRightEye(),!0),fr(l,this.faceLandmarks.getMouth(),!0)),p){l.strokeStyle=S,l.fillStyle=S;const T=C=>{l.beginPath(),l.arc(C.x,C.y,I,0,2*Math.PI),l.fill()};this.faceLandmarks.positions.forEach(T)}}}function AQ(r,l){const u=Array.isArray(l)?l:[l];u.forEach(p=>{const y=p instanceof Gs?p:pa(p)?p.landmarks:void 0;if(!y)throw new Error("drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks> or array thereof");new tD(y).draw(r)})}const ix={};Pm(ix,{AnchorPosition:()=>Ui,DrawBox:()=>HS,DrawBoxOptions:()=>s2,DrawFaceLandmarks:()=>tD,DrawFaceLandmarksOptions:()=>eD,DrawTextField:()=>Cc,DrawTextFieldOptions:()=>jm,drawContour:()=>fr,drawDetections:()=>_J,drawFaceExpressions:()=>TQ,drawFaceLandmarks:()=>AQ});function vQ(r,l){const u=Yc(r,l),p=Hc(r,l);function y(I,S,T){const C=p(I,S,`${T}/separable_conv0`),D=p(S,S,`${T}/separable_conv1`),_=u(I,S,1,`${T}/expansion_conv`);return{separable_conv0:C,separable_conv1:D,expansion_conv:_}}function g(I,S){const T=p(I,I,`${S}/separable_conv0`),C=p(I,I,`${S}/separable_conv1`),D=p(I,I,`${S}/separable_conv2`);return{separable_conv0:T,separable_conv1:C,separable_conv2:D}}return{extractConvParams:u,extractSeparableConvParams:p,extractReductionBlockParams:y,extractMainBlockParams:g}}function nD(r,l){const u=[],{extractWeights:p,getRemainingWeights:y}=Hn(r),{extractConvParams:g,extractSeparableConvParams:I,extractReductionBlockParams:S,extractMainBlockParams:T}=vQ(p,u),C=g(3,32,3,"entry_flow/conv_in"),D=S(32,64,"entry_flow/reduction_block_0"),_=S(64,128,"entry_flow/reduction_block_1"),A={conv_in:C,reduction_block_0:D,reduction_block_1:_},B={};_i(l,0,1).forEach(ge=>{B[`main_block_${ge}`]=T(128,`middle_flow/main_block_${ge}`)});const ne=S(128,256,"exit_flow/reduction_block"),te=I(256,512,"exit_flow/separable_conv"),P={reduction_block:ne,separable_conv:te};if(y().length!==0)throw new Error(`weights remaing after extract: ${y().length}`);return{paramMappings:u,params:{entry_flow:A,middle_flow:B,exit_flow:P}}}function NQ(r,l){const u=gs(r,l),p=Tg(u),y=qc(u);function g(S){const T=y(`${S}/separable_conv0`),C=y(`${S}/separable_conv1`),D=p(`${S}/expansion_conv`);return{separable_conv0:T,separable_conv1:C,expansion_conv:D}}function I(S){const T=y(`${S}/separable_conv0`),C=y(`${S}/separable_conv1`),D=y(`${S}/separable_conv2`);return{separable_conv0:T,separable_conv1:C,separable_conv2:D}}return{extractConvParams:p,extractSeparableConvParams:y,extractReductionBlockParams:g,extractMainBlockParams:I}}function sD(r,l){const u=[],{extractConvParams:p,extractSeparableConvParams:y,extractReductionBlockParams:g,extractMainBlockParams:I}=NQ(r,u),S=p("entry_flow/conv_in"),T=g("entry_flow/reduction_block_0"),C=g("entry_flow/reduction_block_1"),D={conv_in:S,reduction_block_0:T,reduction_block_1:C},_={};_i(l,0,1).forEach(te=>{_[`main_block_${te}`]=I(`middle_flow/main_block_${te}`)});const A=g("exit_flow/reduction_block"),B=y("exit_flow/separable_conv"),ne={reduction_block:A,separable_conv:B};return Yn(r,u),{params:{entry_flow:D,middle_flow:_,exit_flow:ne},paramMappings:u}}const on=Je(Ze());function iD(r,l,u){return on.add(on.conv2d(r,l.filters,u,"same"),l.bias)}function rx(r,l,u=!0){let p=u?on.relu(r):r;return p=os(p,l.separable_conv0,[1,1]),p=os(on.relu(p),l.separable_conv1,[1,1]),p=on.maxPool(p,[3,3],[2,2],"same"),p=on.add(p,iD(r,l.expansion_conv,[2,2])),p}function CQ(r,l){let u=os(on.relu(r),l.separable_conv0,[1,1]);return u=os(on.relu(u),l.separable_conv1,[1,1]),u=os(on.relu(u),l.separable_conv2,[1,1]),u=on.add(u,r),u}class rD extends Wn{constructor(r){super("TinyXception");this._numMainBlocks=r}forwardInput(r){const{params:l}=this;if(!l)throw new Error("TinyXception - load model before inference");return on.tidy(()=>{const u=on.cast(r.toBatchTensor(112,!0),"float32"),p=[122.782,117.001,104.298],y=di(u,p).div(on.scalar(256));let g=on.relu(iD(y,l.entry_flow.conv_in,[2,2]));return g=rx(g,l.entry_flow.reduction_block_0,!1),g=rx(g,l.entry_flow.reduction_block_1),_i(this._numMainBlocks,0,1).forEach(I=>{g=CQ(g,l.middle_flow[`main_block_${I}`])}),g=rx(g,l.exit_flow.reduction_block),g=on.relu(os(g,l.exit_flow.separable_conv,[1,1])),g})}async forward(r){return this.forwardInput(await Wt(r))}getDefaultModelName(){return"tiny_xception_model"}extractParamsFromWeigthMap(r){return sD(r,this._numMainBlocks)}extractParams(r){return nD(r,this._numMainBlocks)}}function oD(r){const l=[],{extractWeights:u,getRemainingWeights:p}=Hn(r),y=Ig(u,l),g=y(512,1,"fc/age"),I=y(512,2,"fc/gender");if(p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{paramMappings:l,params:{fc:{age:g,gender:I}}}}function aD(r){const l=[],u=gs(r,l);function p(g){const I=u(`${g}/weights`,2),S=u(`${g}/bias`,1);return{weights:I,bias:S}}const y={fc:{age:p("fc/age"),gender:p("fc/gender")}};return Yn(r,l),{params:y,paramMappings:l}}var Ir;(function(r){r.FEMALE="female",r.MALE="male"})(Ir||(Ir={}));const Gi=Je(Ze());class ox extends Wn{constructor(r=new rD(2)){super("AgeGenderNet");this._faceFeatureExtractor=r}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(r){const{params:l}=this;if(!l)throw new Error(`${this._name} - load model before inference`);return Gi.tidy(()=>{const u=r instanceof ho?this.faceFeatureExtractor.forwardInput(r):r,p=Gi.avgPool(u,[7,7],[2,2],"valid").as2D(u.shape[0],-1),y=Mu(p,l.fc.age).as1D(),g=Mu(p,l.fc.gender);return{age:y,gender:g}})}forwardInput(r){return Gi.tidy(()=>{const{age:l,gender:u}=this.runNet(r);return{age:l,gender:Gi.softmax(u)}})}async forward(r){return this.forwardInput(await Wt(r))}async predictAgeAndGender(r){const l=await Wt(r),u=await this.forwardInput(l),p=Gi.unstack(u.age),y=Gi.unstack(u.gender),g=p.map((S,T)=>({ageTensor:S,genderTensor:y[T]})),I=await Promise.all(g.map(async({ageTensor:S,genderTensor:T})=>{const C=(await S.data())[0],D=(await T.data())[0],_=D>.5,A=_?Ir.MALE:Ir.FEMALE,B=_?D:1-D;return S.dispose(),T.dispose(),{age:C,gender:A,genderProbability:B}}));return u.age.dispose(),u.gender.dispose(),l.isBatchInput?I:I[0]}getDefaultModelName(){return"age_gender_model"}dispose(r=!0){this.faceFeatureExtractor.dispose(r),super.dispose(r)}loadClassifierParams(r){const{params:l,paramMappings:u}=this.extractClassifierParams(r);this._params=l,this._paramMappings=u}extractClassifierParams(r){return oD(r)}extractParamsFromWeigthMap(r){const{featureExtractorMap:l,classifierMap:u}=Ng(r);return this.faceFeatureExtractor.loadFromWeightMap(l),aD(u)}extractParams(r){const l=512*1+1+(512*2+2),u=r.slice(0,r.length-l),p=r.slice(r.length-l);return this.faceFeatureExtractor.extractWeights(u),this.extractClassifierParams(p)}}const ys=Je(Ze());class Og extends Cg{postProcess(r,l,u){const p=u.map(({width:g,height:I})=>{const S=l/Math.max(I,g);return{width:g*S,height:I*S}}),y=p.length;return ys.tidy(()=>{const g=(D,_)=>ys.stack([ys.fill([68],D,"float32"),ys.fill([68],_,"float32")],1).as2D(1,136).as1D(),I=(D,_)=>{const{width:A,height:B}=p[D];return _(A,B)?Math.abs(A-B)/2:0},S=D=>I(D,(_,A)=>_I(D,(_,A)=>A<_),C=r.mul(ys.fill([y,136],l,"float32")).sub(ys.stack(Array.from(Array(y),(D,_)=>g(S(_),T(_))))).div(ys.stack(Array.from(Array(y),(D,_)=>g(p[_].width,p[_].height))));return C})}forwardInput(r){return ys.tidy(()=>{const l=this.runNet(r);return this.postProcess(l,r.inputSize,r.inputDimensions.map(([u,p])=>({height:u,width:p})))})}async forward(r){return this.forwardInput(await Wt(r))}async detectLandmarks(r){const l=await Wt(r),u=ys.tidy(()=>ys.unstack(this.forwardInput(l))),p=await Promise.all(u.map(async(y,g)=>{const I=Array.from(await y.data()),S=I.filter((C,D)=>Vm(D)),T=I.filter((C,D)=>!Vm(D));return new wu(Array(68).fill(0).map((C,D)=>new Qe(S[D],T[D])),{height:l.getInputHeight(g),width:l.getInputWidth(g)})}));return u.forEach(y=>y.dispose()),l.isBatchInput?p:p[0]}getClassifierChannelsOut(){return 136}}class Pu extends Og{constructor(r=new vg){super("FaceLandmark68Net",r)}getDefaultModelName(){return"face_landmark_68_model"}getClassifierChannelsIn(){return 256}}function cD(r){const l=[],{extractDenseBlock3Params:u}=Ag(r,l),p={dense0:u("dense0",!0),dense1:u("dense1"),dense2:u("dense2")};return Yn(r,l),{params:p,paramMappings:l}}function lD(r){const l=[],{extractWeights:u,getRemainingWeights:p}=Hn(r),{extractDenseBlock3Params:y}=xg(u,l),g=y(3,32,"dense0",!0),I=y(32,64,"dense1"),S=y(64,128,"dense2");if(p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{paramMappings:l,params:{dense0:g,dense1:I,dense2:S}}}const mo=Je(Ze());class hD extends Wn{constructor(){super("TinyFaceFeatureExtractor")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("TinyFaceFeatureExtractor - load model before inference");return mo.tidy(()=>{const u=mo.cast(r.toBatchTensor(112,!0),"float32"),p=[122.782,117.001,104.298],y=di(u,p).div(mo.scalar(255));let g=wg(y,l.dense0,!0);return g=wg(g,l.dense1),g=wg(g,l.dense2),g=mo.avgPool(g,[14,14],[2,2],"valid"),g})}async forward(r){return this.forwardInput(await Wt(r))}getDefaultModelName(){return"face_feature_extractor_tiny_model"}extractParamsFromWeigthMap(r){return cD(r)}extractParams(r){return lD(r)}}class ax extends Og{constructor(r=new hD){super("FaceLandmark68TinyNet",r)}getDefaultModelName(){return"face_landmark_68_tiny_model"}getClassifierChannelsIn(){return 128}}class RQ extends Pu{}const Eg=Je(Ze());function uD(r,l){return Eg.add(Eg.mul(r,l.weights),l.biases)}const Jc=Je(Ze());function cx(r,l,u,p,y="same"){const{filters:g,bias:I}=l.conv;let S=Jc.conv2d(r,g,u,y);return S=Jc.add(S,I),S=uD(S,l.scale),p?Jc.relu(S):S}function dD(r,l){return cx(r,l,[1,1],!0)}function lx(r,l){return cx(r,l,[1,1],!1)}function Dg(r,l){return cx(r,l,[2,2],!0,"valid")}const bs=Je(Ze());function OQ(r,l){function u(S,T,C){const D=r(S),_=D.length/(T*C*C);if(FS(_))throw new Error(`depth has to be an integer: ${_}, weights.length: ${D.length}, numFilters: ${T}, filterSize: ${C}`);return bs.tidy(()=>bs.transpose(bs.tensor4d(D,[T,_,C,C]),[2,3,1,0]))}function p(S,T,C,D){const _=u(S,T,C),A=bs.tensor1d(r(T));return l.push({paramPath:`${D}/filters`},{paramPath:`${D}/bias`}),{filters:_,bias:A}}function y(S,T){const C=bs.tensor1d(r(S)),D=bs.tensor1d(r(S));return l.push({paramPath:`${T}/weights`},{paramPath:`${T}/biases`}),{weights:C,biases:D}}function g(S,T,C,D){const _=p(S,T,C,`${D}/conv`),A=y(T,`${D}/scale`);return{conv:_,scale:A}}function I(S,T,C,D,_=!1){const A=g((_?.5:1)*S,T,C,`${D}/conv1`),B=g(S,T,C,`${D}/conv2`);return{conv1:A,conv2:B}}return{extractConvLayerParams:g,extractResidualLayerParams:I}}function pD(r){const{extractWeights:l,getRemainingWeights:u}=Hn(r),p=[],{extractConvLayerParams:y,extractResidualLayerParams:g}=OQ(l,p),I=y(4704,32,7,"conv32_down"),S=g(9216,32,3,"conv32_1"),T=g(9216,32,3,"conv32_2"),C=g(9216,32,3,"conv32_3"),D=g(36864,64,3,"conv64_down",!0),_=g(36864,64,3,"conv64_1"),A=g(36864,64,3,"conv64_2"),B=g(36864,64,3,"conv64_3"),ne=g(147456,128,3,"conv128_down",!0),te=g(147456,128,3,"conv128_1"),P=g(147456,128,3,"conv128_2"),ge=g(589824,256,3,"conv256_down",!0),ae=g(589824,256,3,"conv256_1"),Le=g(589824,256,3,"conv256_2"),ve=g(589824,256,3,"conv256_down_out"),Ve=bs.tidy(()=>bs.transpose(bs.tensor2d(l(256*128),[128,256]),[1,0]));if(p.push({paramPath:"fc"}),u().length!==0)throw new Error(`weights remaing after extract: ${u().length}`);const at={conv32_down:I,conv32_1:S,conv32_2:T,conv32_3:C,conv64_down:D,conv64_1:_,conv64_2:A,conv64_3:B,conv128_down:ne,conv128_1:te,conv128_2:P,conv256_down:ge,conv256_1:ae,conv256_2:Le,conv256_down_out:ve,fc:Ve};return{params:at,paramMappings:p}}function EQ(r,l){const u=gs(r,l);function p(I){const S=u(`${I}/scale/weights`,1),T=u(`${I}/scale/biases`,1);return{weights:S,biases:T}}function y(I){const S=u(`${I}/conv/filters`,4),T=u(`${I}/conv/bias`,1),C=p(I);return{conv:{filters:S,bias:T},scale:C}}function g(I){return{conv1:y(`${I}/conv1`),conv2:y(`${I}/conv2`)}}return{extractConvLayerParams:y,extractResidualLayerParams:g}}function mD(r){const l=[],{extractConvLayerParams:u,extractResidualLayerParams:p}=EQ(r,l),y=u("conv32_down"),g=p("conv32_1"),I=p("conv32_2"),S=p("conv32_3"),T=p("conv64_down"),C=p("conv64_1"),D=p("conv64_2"),_=p("conv64_3"),A=p("conv128_down"),B=p("conv128_1"),ne=p("conv128_2"),te=p("conv256_down"),P=p("conv256_1"),ge=p("conv256_2"),ae=p("conv256_down_out"),Le=r.fc;if(l.push({originalPath:"fc",paramPath:"fc"}),!kS(Le))throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${Le}`);const ve={conv32_down:y,conv32_1:g,conv32_2:I,conv32_3:S,conv64_down:T,conv64_1:C,conv64_2:D,conv64_3:_,conv128_down:A,conv128_1:B,conv128_2:ne,conv256_down:te,conv256_1:P,conv256_2:ge,conv256_down_out:ae,fc:Le};return Yn(r,l),{params:ve,paramMappings:l}}const qn=Je(Ze());function gi(r,l){let u=dD(r,l.conv1);return u=lx(u,l.conv2),u=qn.add(u,r),u=qn.relu(u),u}function zu(r,l){let u=Dg(r,l.conv1);u=lx(u,l.conv2);let p=qn.avgPool(r,2,2,"valid");const y=qn.zeros(p.shape),g=p.shape[3]!==u.shape[3],I=p.shape[1]!==u.shape[1]||p.shape[2]!==u.shape[2];if(I){const S=[...u.shape];S[1]=1;const T=qn.zeros(S);u=qn.concat([u,T],1);const C=[...u.shape];C[2]=1;const D=qn.zeros(C);u=qn.concat([u,D],2)}return p=g?qn.concat([p,y],3):p,u=qn.add(p,u),u=qn.relu(u),u}const Fs=Je(Ze());class Vu extends Wn{constructor(){super("FaceRecognitionNet")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("FaceRecognitionNet - load model before inference");return Fs.tidy(()=>{const u=Fs.cast(r.toBatchTensor(150,!0),"float32"),p=[122.782,117.001,104.298],y=di(u,p).div(Fs.scalar(256));let g=Dg(y,l.conv32_down);g=Fs.maxPool(g,3,2,"valid"),g=gi(g,l.conv32_1),g=gi(g,l.conv32_2),g=gi(g,l.conv32_3),g=zu(g,l.conv64_down),g=gi(g,l.conv64_1),g=gi(g,l.conv64_2),g=gi(g,l.conv64_3),g=zu(g,l.conv128_down),g=gi(g,l.conv128_1),g=gi(g,l.conv128_2),g=zu(g,l.conv256_down),g=gi(g,l.conv256_1),g=gi(g,l.conv256_2),g=zu(g,l.conv256_down_out);const I=g.mean([1,2]),S=Fs.matMul(I,l.fc);return S})}async forward(r){return this.forwardInput(await Wt(r))}async computeFaceDescriptor(r){const l=await Wt(r),u=Fs.tidy(()=>Fs.unstack(this.forwardInput(l))),p=await Promise.all(u.map(y=>y.data()));return u.forEach(y=>y.dispose()),l.isBatchInput?p:p[0]}getDefaultModelName(){return"face_recognition_model"}extractParamsFromWeigthMap(r){return mD(r)}extractParams(r){return pD(r)}}function DQ(r){const l=new Vu;return l.extractWeights(r),l}function kg(r,l){const u={descriptor:l};return Object.assign({},r,u)}function kQ(r){return typeof r.age=="number"}function Fg(r,l){const u={age:l};return Object.assign({},r,u)}function FQ(r){return(r.gender===Ir.MALE||r.gender===Ir.FEMALE)&&vc(r.genderProbability)}function _g(r,l,u){const p={gender:l,genderProbability:u};return Object.assign({},r,p)}const yi=Je(Ze());function _Q(r,l){function u(T,C){const D=yi.tensor4d(r(3*3*T),[3,3,T,1]),_=yi.tensor1d(r(T)),A=yi.tensor1d(r(T)),B=yi.tensor1d(r(T)),ne=yi.tensor1d(r(T));return l.push({paramPath:`${C}/filters`},{paramPath:`${C}/batch_norm_scale`},{paramPath:`${C}/batch_norm_offset`},{paramPath:`${C}/batch_norm_mean`},{paramPath:`${C}/batch_norm_variance`}),{filters:D,batch_norm_scale:_,batch_norm_offset:A,batch_norm_mean:B,batch_norm_variance:ne}}function p(T,C,D,_,A){const B=yi.tensor4d(r(T*C*D*D),[D,D,T,C]),ne=yi.tensor1d(r(C));return l.push({paramPath:`${_}/filters`},{paramPath:`${_}/${A?"batch_norm_offset":"bias"}`}),{filters:B,bias:ne}}function y(T,C,D,_){const{filters:A,bias:B}=p(T,C,D,_,!0);return{filters:A,batch_norm_offset:B}}function g(T,C,D){const _=u(T,`${D}/depthwise_conv`),A=y(T,C,1,`${D}/pointwise_conv`);return{depthwise_conv:_,pointwise_conv:A}}function I(){const T=y(3,32,3,"mobilenetv1/conv_0"),C=g(32,64,"mobilenetv1/conv_1"),D=g(64,128,"mobilenetv1/conv_2"),_=g(128,128,"mobilenetv1/conv_3"),A=g(128,256,"mobilenetv1/conv_4"),B=g(256,256,"mobilenetv1/conv_5"),ne=g(256,512,"mobilenetv1/conv_6"),te=g(512,512,"mobilenetv1/conv_7"),P=g(512,512,"mobilenetv1/conv_8"),ge=g(512,512,"mobilenetv1/conv_9"),ae=g(512,512,"mobilenetv1/conv_10"),Le=g(512,512,"mobilenetv1/conv_11"),ve=g(512,1024,"mobilenetv1/conv_12"),Ve=g(1024,1024,"mobilenetv1/conv_13");return{conv_0:T,conv_1:C,conv_2:D,conv_3:_,conv_4:A,conv_5:B,conv_6:ne,conv_7:te,conv_8:P,conv_9:ge,conv_10:ae,conv_11:Le,conv_12:ve,conv_13:Ve}}function S(){const T=y(1024,256,1,"prediction_layer/conv_0"),C=y(256,512,3,"prediction_layer/conv_1"),D=y(512,128,1,"prediction_layer/conv_2"),_=y(128,256,3,"prediction_layer/conv_3"),A=y(256,128,1,"prediction_layer/conv_4"),B=y(128,256,3,"prediction_layer/conv_5"),ne=y(256,64,1,"prediction_layer/conv_6"),te=y(64,128,3,"prediction_layer/conv_7"),P=p(512,12,1,"prediction_layer/box_predictor_0/box_encoding_predictor"),ge=p(512,9,1,"prediction_layer/box_predictor_0/class_predictor"),ae=p(1024,24,1,"prediction_layer/box_predictor_1/box_encoding_predictor"),Le=p(1024,18,1,"prediction_layer/box_predictor_1/class_predictor"),ve=p(512,24,1,"prediction_layer/box_predictor_2/box_encoding_predictor"),Ve=p(512,18,1,"prediction_layer/box_predictor_2/class_predictor"),at=p(256,24,1,"prediction_layer/box_predictor_3/box_encoding_predictor"),pt=p(256,18,1,"prediction_layer/box_predictor_3/class_predictor"),$t=p(256,24,1,"prediction_layer/box_predictor_4/box_encoding_predictor"),Vt=p(256,18,1,"prediction_layer/box_predictor_4/class_predictor"),qe=p(128,24,1,"prediction_layer/box_predictor_5/box_encoding_predictor"),ln=p(128,18,1,"prediction_layer/box_predictor_5/class_predictor"),bt={box_encoding_predictor:P,class_predictor:ge},ws={box_encoding_predictor:ae,class_predictor:Le},Nr={box_encoding_predictor:ve,class_predictor:Ve},Cr={box_encoding_predictor:at,class_predictor:pt},ba={box_encoding_predictor:$t,class_predictor:Vt},hn={box_encoding_predictor:qe,class_predictor:ln};return{conv_0:T,conv_1:C,conv_2:D,conv_3:_,conv_4:A,conv_5:B,conv_6:ne,conv_7:te,box_predictor_0:bt,box_predictor_1:ws,box_predictor_2:Nr,box_predictor_3:Cr,box_predictor_4:ba,box_predictor_5:hn}}return{extractMobilenetV1Params:I,extractPredictionLayerParams:S}}function fD(r){const l=[],{extractWeights:u,getRemainingWeights:p}=Hn(r),{extractMobilenetV1Params:y,extractPredictionLayerParams:g}=_Q(u,l),I=y(),S=g(),T=yi.tensor3d(u(5118*4),[1,5118,4]),C={extra_dim:T};if(l.push({paramPath:"output_layer/extra_dim"}),p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{params:{mobilenetv1:I,prediction_layer:S,output_layer:C},paramMappings:l}}function WQ(r,l){const u=gs(r,l);function p(C,D,_){const A=u(`${C}/Conv2d_${D}_pointwise/weights`,4,`${_}/filters`),B=u(`${C}/Conv2d_${D}_pointwise/convolution_bn_offset`,1,`${_}/batch_norm_offset`);return{filters:A,batch_norm_offset:B}}function y(C){const D=`mobilenetv1/conv_${C}`,_=`MobilenetV1/Conv2d_${C}_depthwise`,A=`${D}/depthwise_conv`,B=`${D}/pointwise_conv`,ne=u(`${_}/depthwise_weights`,4,`${A}/filters`),te=u(`${_}/BatchNorm/gamma`,1,`${A}/batch_norm_scale`),P=u(`${_}/BatchNorm/beta`,1,`${A}/batch_norm_offset`),ge=u(`${_}/BatchNorm/moving_mean`,1,`${A}/batch_norm_mean`),ae=u(`${_}/BatchNorm/moving_variance`,1,`${A}/batch_norm_variance`);return{depthwise_conv:{filters:ne,batch_norm_scale:te,batch_norm_offset:P,batch_norm_mean:ge,batch_norm_variance:ae},pointwise_conv:p("MobilenetV1",C,B)}}function g(){return{conv_0:p("MobilenetV1",0,"mobilenetv1/conv_0"),conv_1:y(1),conv_2:y(2),conv_3:y(3),conv_4:y(4),conv_5:y(5),conv_6:y(6),conv_7:y(7),conv_8:y(8),conv_9:y(9),conv_10:y(10),conv_11:y(11),conv_12:y(12),conv_13:y(13)}}function I(C,D){const _=u(`${C}/weights`,4,`${D}/filters`),A=u(`${C}/biases`,1,`${D}/bias`);return{filters:_,bias:A}}function S(C){const D=I(`Prediction/BoxPredictor_${C}/BoxEncodingPredictor`,`prediction_layer/box_predictor_${C}/box_encoding_predictor`),_=I(`Prediction/BoxPredictor_${C}/ClassPredictor`,`prediction_layer/box_predictor_${C}/class_predictor`);return{box_encoding_predictor:D,class_predictor:_}}function T(){return{conv_0:p("Prediction",0,"prediction_layer/conv_0"),conv_1:p("Prediction",1,"prediction_layer/conv_1"),conv_2:p("Prediction",2,"prediction_layer/conv_2"),conv_3:p("Prediction",3,"prediction_layer/conv_3"),conv_4:p("Prediction",4,"prediction_layer/conv_4"),conv_5:p("Prediction",5,"prediction_layer/conv_5"),conv_6:p("Prediction",6,"prediction_layer/conv_6"),conv_7:p("Prediction",7,"prediction_layer/conv_7"),box_predictor_0:S(0),box_predictor_1:S(1),box_predictor_2:S(2),box_predictor_3:S(3),box_predictor_4:S(4),box_predictor_5:S(5)}}return{extractMobilenetV1Params:g,extractPredictionLayerParams:T}}function gD(r){const l=[],{extractMobilenetV1Params:u,extractPredictionLayerParams:p}=WQ(r,l),y=r["Output/extra_dim"];if(l.push({originalPath:"Output/extra_dim",paramPath:"output_layer/extra_dim"}),!gr(y))throw new Error(`expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have ${y}`);const g={mobilenetv1:u(),prediction_layer:p(),output_layer:{extra_dim:y}};return Yn(r,l),{params:g,paramMappings:l}}const fo=Je(Ze());function qs(r,l,u){return fo.tidy(()=>{let p=fo.conv2d(r,l.filters,u,"same");return p=fo.add(p,l.batch_norm_offset),fo.clipByValue(p,0,6)})}const xr=Je(Ze()),$Q=.0010000000474974513;function UQ(r,l,u){return xr.tidy(()=>{let p=xr.depthwiseConv2d(r,l.filters,u,"same");return p=xr.batchNorm(p,l.batch_norm_mean,l.batch_norm_variance,l.batch_norm_offset,l.batch_norm_scale,$Q),xr.clipByValue(p,0,6)})}function BQ(r){return[2,4,6,12].some(l=>l===r)?[2,2]:[1,1]}function yD(r,l){return xr.tidy(()=>{let u,p=qs(r,l.conv_0,[2,2]);const y=[l.conv_1,l.conv_2,l.conv_3,l.conv_4,l.conv_5,l.conv_6,l.conv_7,l.conv_8,l.conv_9,l.conv_10,l.conv_11,l.conv_12,l.conv_13];if(y.forEach((g,I)=>{const S=I+1,T=BQ(S);p=UQ(p,g.depthwise_conv,T),p=qs(p,g.pointwise_conv,[1,1]),S===11&&(u=p)}),u===null)throw new Error("mobileNetV1 - output of conv layer 11 is null");return{out:p,conv11:u}})}function bD(r,l,u,p,y){const g=r.shape[0],I=Math.min(u,g),S=l.map((D,_)=>({score:D,boxIndex:_})).filter(D=>D.score>y).sort((D,_)=>_.score-D.score),T=D=>D<=p?1:0,C=[];return S.forEach(D=>{if(C.length>=I)return;const _=D.score;for(let A=C.length-1;A>=0;--A){const B=MQ(r,D.boxIndex,C[A]);if(B===0)continue;if(D.score*=T(B),D.score<=y)break}_===D.score&&C.push(D.boxIndex)}),C}function MQ(r,l,u){const p=r.arraySync(),y=Math.min(p[l][0],p[l][2]),g=Math.min(p[l][1],p[l][3]),I=Math.max(p[l][0],p[l][2]),S=Math.max(p[l][1],p[l][3]),T=Math.min(p[u][0],p[u][2]),C=Math.min(p[u][1],p[u][3]),D=Math.max(p[u][0],p[u][2]),_=Math.max(p[u][1],p[u][3]),A=(I-y)*(S-g),B=(D-T)*(_-C);if(A<=0||B<=0)return 0;const ne=Math.max(y,T),te=Math.max(g,C),P=Math.min(I,D),ge=Math.min(S,_),ae=Math.max(P-ne,0)*Math.max(ge-te,0);return ae/(A+B-ae)}const ke=Je(Ze());function PQ(r){const l=ke.unstack(ke.transpose(r,[1,0])),u=[ke.sub(l[2],l[0]),ke.sub(l[3],l[1])],p=[ke.add(l[0],ke.div(u[0],ke.scalar(2))),ke.add(l[1],ke.div(u[1],ke.scalar(2)))];return{sizes:u,centers:p}}function zQ(r,l){const{sizes:u,centers:p}=PQ(r),y=ke.unstack(ke.transpose(l,[1,0])),g=ke.div(ke.mul(ke.exp(ke.div(y[2],ke.scalar(5))),u[0]),ke.scalar(2)),I=ke.add(ke.mul(ke.div(y[0],ke.scalar(10)),u[0]),p[0]),S=ke.div(ke.mul(ke.exp(ke.div(y[3],ke.scalar(5))),u[1]),ke.scalar(2)),T=ke.add(ke.mul(ke.div(y[1],ke.scalar(10)),u[1]),p[1]);return ke.transpose(ke.stack([ke.sub(I,g),ke.sub(T,S),ke.add(I,g),ke.add(T,S)]),[1,0])}function wD(r,l,u){return ke.tidy(()=>{const p=r.shape[0];let y=zQ(ke.reshape(ke.tile(u.extra_dim,[p,1,1]),[-1,4]),ke.reshape(r,[-1,4]));y=ke.reshape(y,[p,y.shape[0]/p,4]);const g=ke.sigmoid(ke.slice(l,[0,0,1],[-1,-1,-1]));let I=ke.slice(g,[0,0,0],[-1,-1,1]);I=ke.reshape(I,[p,I.shape[1]]);const S=ke.unstack(y),T=ke.unstack(I);return{boxes:S,scores:T}})}const Gu=Je(Ze());function ma(r,l){return Gu.tidy(()=>{const u=r.shape[0],p=Gu.reshape(ua(r,l.box_encoding_predictor),[u,-1,1,4]),y=Gu.reshape(ua(r,l.class_predictor),[u,-1,3]);return{boxPredictionEncoding:p,classPrediction:y}})}const Yu=Je(Ze());function LD(r,l,u){return Yu.tidy(()=>{const p=qs(r,u.conv_0,[1,1]),y=qs(p,u.conv_1,[2,2]),g=qs(y,u.conv_2,[1,1]),I=qs(g,u.conv_3,[2,2]),S=qs(I,u.conv_4,[1,1]),T=qs(S,u.conv_5,[2,2]),C=qs(T,u.conv_6,[1,1]),D=qs(C,u.conv_7,[2,2]),_=ma(l,u.box_predictor_0),A=ma(r,u.box_predictor_1),B=ma(y,u.box_predictor_2),ne=ma(I,u.box_predictor_3),te=ma(T,u.box_predictor_4),P=ma(D,u.box_predictor_5),ge=Yu.concat([_.boxPredictionEncoding,A.boxPredictionEncoding,B.boxPredictionEncoding,ne.boxPredictionEncoding,te.boxPredictionEncoding,P.boxPredictionEncoding],1),ae=Yu.concat([_.classPrediction,A.classPrediction,B.classPrediction,ne.classPrediction,te.classPrediction,P.classPrediction],1);return{boxPredictions:ge,classPredictions:ae}})}class bi{constructor({minConfidence:r,maxResults:l}={}){this._name="SsdMobilenetv1Options";if(this._minConfidence=r||.5,this._maxResults=l||100,typeof this._minConfidence!="number"||this._minConfidence<=0||this._minConfidence>=1)throw new Error(`${this._name} - expected minConfidence to be a number between 0 and 1`);if(typeof this._maxResults!="number")throw new Error(`${this._name} - expected maxResults to be a number`)}get minConfidence(){return this._minConfidence}get maxResults(){return this._maxResults}}const wi=Je(Ze());class Zc extends Wn{constructor(){super("SsdMobilenetv1")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("SsdMobilenetv1 - load model before inference");return wi.tidy(()=>{const u=wi.cast(r.toBatchTensor(512,!1),"float32"),p=wi.sub(wi.mul(u,wi.scalar(.007843137718737125)),wi.scalar(1)),y=yD(p,l.mobilenetv1),{boxPredictions:g,classPredictions:I}=LD(y.out,y.conv11,l.prediction_layer);return wD(g,I,l.output_layer)})}async forward(r){return this.forwardInput(await Wt(r))}async locateFaces(r,l={}){const{maxResults:u,minConfidence:p}=new bi(l),y=await Wt(r),{boxes:g,scores:I}=this.forwardInput(y),S=g[0],T=I[0];for(let ae=1;ae{const[Le,ve]=[Math.max(0,P[ae][0]),Math.min(1,P[ae][2])].map(pt=>pt*te),[Ve,at]=[Math.max(0,P[ae][1]),Math.min(1,P[ae][3])].map(pt=>pt*ne);return new Jt(C[ae],new bu(Ve,Le,at-Ve,ve-Le),{height:y.getInputHeight(0),width:y.getInputWidth(0)})});return S.dispose(),T.dispose(),ge}getDefaultModelName(){return"ssd_mobilenetv1_model"}extractParamsFromWeigthMap(r){return gD(r)}extractParams(r){return fD(r)}}function SD(r){const l=new Zc;return l.extractWeights(r),l}function VQ(r){return SD(r)}class GQ extends Zc{}const ID=.4,xD=[new Qe(.738768,.874946),new Qe(2.42204,2.65704),new Qe(4.30971,7.04493),new Qe(10.246,4.59428),new Qe(12.6868,11.8741)],TD=[new Qe(1.603231,2.094468),new Qe(6.041143,7.080126),new Qe(2.882459,3.518061),new Qe(4.266906,5.178857),new Qe(9.041765,10.66308)],AD=[117.001,114.697,97.404],vD="tiny_yolov2_model",ND="tiny_yolov2_separable_conv_model";const Wg=r=>typeof r=="number";function hx(r){if(!r)throw new Error(`invalid config: ${r}`);if(typeof r.withSeparableConvs!="boolean")throw new Error(`config.withSeparableConvs has to be a boolean, have: ${r.withSeparableConvs}`);if(!Wg(r.iouThreshold)||r.iouThreshold<0||r.iouThreshold>1)throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${r.iouThreshold}`);if(!Array.isArray(r.classes)||!r.classes.length||!r.classes.every(l=>typeof l=="string"))throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(r.classes)}`);if(!Array.isArray(r.anchors)||!r.anchors.length||!r.anchors.map(l=>l||{}).every(l=>Wg(l.x)&&Wg(l.y)))throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(r.anchors)}`);if(r.meanRgb&&(!Array.isArray(r.meanRgb)||r.meanRgb.length!==3||!r.meanRgb.every(Wg)))throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(r.meanRgb)}`)}const js=Je(Ze());function Qc(r){return js.tidy(()=>{const l=js.mul(r,js.scalar(.10000000149011612));return js.add(js.relu(js.sub(r,l)),l)})}const Ks=Je(Ze());function Tr(r,l){return Ks.tidy(()=>{let u=Ks.pad(r,[[0,0],[1,1],[1,1],[0,0]]);return u=Ks.conv2d(u,l.conv.filters,[1,1],"valid"),u=Ks.sub(u,l.bn.sub),u=Ks.mul(u,l.bn.truediv),u=Ks.add(u,l.conv.bias),Qc(u)})}const go=Je(Ze());function Ar(r,l){return go.tidy(()=>{let u=go.pad(r,[[0,0],[1,1],[1,1],[0,0]]);return u=go.separableConv2d(u,l.depthwise_filter,l.pointwise_filter,[1,1],"valid"),u=go.add(u,l.bias),Qc(u)})}const ux=Je(Ze());function YQ(r,l){const u=Yc(r,l);function p(I,S){const T=ux.tensor1d(r(I)),C=ux.tensor1d(r(I));return l.push({paramPath:`${S}/sub`},{paramPath:`${S}/truediv`}),{sub:T,truediv:C}}function y(I,S,T){const C=u(I,S,3,`${T}/conv`),D=p(S,`${T}/bn`);return{conv:C,bn:D}}const g=Hc(r,l);return{extractConvParams:u,extractConvWithBatchNormParams:y,extractSeparableConvParams:g}}function CD(r,l,u,p){const{extractWeights:y,getRemainingWeights:g}=Hn(r),I=[],{extractConvParams:S,extractConvWithBatchNormParams:T,extractSeparableConvParams:C}=YQ(y,I);let D;if(l.withSeparableConvs){const[_,A,B,ne,te,P,ge,ae,Le]=p,ve=l.isFirstLayerConv2d?S(_,A,3,"conv0"):C(_,A,"conv0"),Ve=C(A,B,"conv1"),at=C(B,ne,"conv2"),pt=C(ne,te,"conv3"),$t=C(te,P,"conv4"),Vt=C(P,ge,"conv5"),qe=ae?C(ge,ae,"conv6"):void 0,ln=Le?C(ae,Le,"conv7"):void 0,bt=S(Le||ae||ge,5*u,1,"conv8");D={conv0:ve,conv1:Ve,conv2:at,conv3:pt,conv4:$t,conv5:Vt,conv6:qe,conv7:ln,conv8:bt}}else{const[_,A,B,ne,te,P,ge,ae,Le]=p,ve=T(_,A,"conv0"),Ve=T(A,B,"conv1"),at=T(B,ne,"conv2"),pt=T(ne,te,"conv3"),$t=T(te,P,"conv4"),Vt=T(P,ge,"conv5"),qe=T(ge,ae,"conv6"),ln=T(ae,Le,"conv7"),bt=S(Le,5*u,1,"conv8");D={conv0:ve,conv1:Ve,conv2:at,conv3:pt,conv4:$t,conv5:Vt,conv6:qe,conv7:ln,conv8:bt}}if(g().length!==0)throw new Error(`weights remaing after extract: ${g().length}`);return{params:D,paramMappings:I}}function HQ(r,l){const u=gs(r,l);function p(S){const T=u(`${S}/sub`,1),C=u(`${S}/truediv`,1);return{sub:T,truediv:C}}function y(S){const T=u(`${S}/filters`,4),C=u(`${S}/bias`,1);return{filters:T,bias:C}}function g(S){const T=y(`${S}/conv`),C=p(`${S}/bn`);return{conv:T,bn:C}}const I=qc(u);return{extractConvParams:y,extractConvWithBatchNormParams:g,extractSeparableConvParams:I}}function RD(r,l){const u=[],{extractConvParams:p,extractConvWithBatchNormParams:y,extractSeparableConvParams:g}=HQ(r,u);let I;if(l.withSeparableConvs){const S=l.filterSizes&&l.filterSizes.length||9;I={conv0:l.isFirstLayerConv2d?p("conv0"):g("conv0"),conv1:g("conv1"),conv2:g("conv2"),conv3:g("conv3"),conv4:g("conv4"),conv5:g("conv5"),conv6:S>7?g("conv6"):void 0,conv7:S>8?g("conv7"):void 0,conv8:p("conv8")}}else I={conv0:y("conv0"),conv1:y("conv1"),conv2:y("conv2"),conv3:y("conv3"),conv4:y("conv4"),conv5:y("conv5"),conv6:y("conv6"),conv7:y("conv7"),conv8:p("conv8")};return Yn(r,u),{params:I,paramMappings:u}}var dx;(function(r){r[r.XS=224]="XS",r[r.SM=320]="SM",r[r.MD=416]="MD",r[r.LG=608]="LG"})(dx||(dx={}));class vr{constructor({inputSize:r,scoreThreshold:l}={}){this._name="TinyYolov2Options";if(this._inputSize=r||416,this._scoreThreshold=l||.5,typeof this._inputSize!="number"||this._inputSize%32!==0)throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`);if(typeof this._scoreThreshold!="number"||this._scoreThreshold<=0||this._scoreThreshold>=1)throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`)}get inputSize(){return this._inputSize}get scoreThreshold(){return this._scoreThreshold}}const Mt=Je(Ze());class el extends Wn{constructor(r){super("TinyYolov2");hx(r),this._config=r}get config(){return this._config}get withClassScores(){return this.config.withClassScores||this.config.classes.length>1}get boxEncodingSize(){return 5+(this.withClassScores?this.config.classes.length:0)}runTinyYolov2(r,l){let u=Tr(r,l.conv0);return u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Tr(u,l.conv1),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Tr(u,l.conv2),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Tr(u,l.conv3),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Tr(u,l.conv4),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Tr(u,l.conv5),u=Mt.maxPool(u,[2,2],[1,1],"same"),u=Tr(u,l.conv6),u=Tr(u,l.conv7),ua(u,l.conv8,"valid",!1)}runMobilenet(r,l){let u=this.config.isFirstLayerConv2d?Qc(ua(r,l.conv0,"valid",!1)):Ar(r,l.conv0);return u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Ar(u,l.conv1),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Ar(u,l.conv2),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Ar(u,l.conv3),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Ar(u,l.conv4),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Ar(u,l.conv5),u=Mt.maxPool(u,[2,2],[1,1],"same"),u=l.conv6?Ar(u,l.conv6):u,u=l.conv7?Ar(u,l.conv7):u,ua(u,l.conv8,"valid",!1)}forwardInput(r,l){const{params:u}=this;if(!u)throw new Error("TinyYolov2 - load model before inference");return Mt.tidy(()=>{let p=Mt.cast(r.toBatchTensor(l,!1),"float32");return p=this.config.meanRgb?di(p,this.config.meanRgb):p,p=p.div(Mt.scalar(256)),this.config.withSeparableConvs?this.runMobilenet(p,u):this.runTinyYolov2(p,u)})}async forward(r,l){return await this.forwardInput(await Wt(r),l)}async detect(r,l={}){const{inputSize:u,scoreThreshold:p}=new vr(l),y=await Wt(r),g=await this.forwardInput(y,u),I=Mt.tidy(()=>Mt.unstack(g)[0].expandDims()),S={width:y.getInputWidth(0),height:y.getInputHeight(0)},T=await this.extractBoxes(I,y.getReshapedInputDimensions(0),p);g.dispose(),I.dispose();const C=T.map(te=>te.box),D=T.map(te=>te.score),_=T.map(te=>te.classScore),A=T.map(te=>this.config.classes[te.label]),B=US(C.map(te=>te.rescale(u)),D,this.config.iouThreshold,!0),ne=B.map(te=>new Nc(D[te],_[te],A[te],C[te],S));return ne}getDefaultModelName(){return""}extractParamsFromWeigthMap(r){return RD(r,this.config)}extractParams(r){const l=this.config.filterSizes||el.DEFAULT_FILTER_SIZES,u=l?l.length:void 0;if(u!==7&&u!==8&&u!==9)throw new Error(`TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found ${u} filterSizes in config`);return CD(r,this.config,this.boxEncodingSize,l)}async extractBoxes(r,l,u){const{width:p,height:y}=l,g=Math.max(p,y),I=g/p,S=g/y,T=r.shape[1],C=this.config.anchors.length,[D,_,A]=Mt.tidy(()=>{const P=r.reshape([T,T,C,this.boxEncodingSize]),ge=P.slice([0,0,0,0],[T,T,C,4]),ae=P.slice([0,0,0,4],[T,T,C,1]),Le=this.withClassScores?Mt.softmax(P.slice([0,0,0,5],[T,T,C,this.config.classes.length]),3):Mt.scalar(0);return[ge,ae,Le]}),B=[],ne=await _.array(),te=await D.array();for(let P=0;Pu){const ve=(ge+yu(te[P][ge][ae][0]))/T*I,Ve=(P+yu(te[P][ge][ae][1]))/T*S,at=Math.exp(te[P][ge][ae][2])*this.config.anchors[ae].x/T*I,pt=Math.exp(te[P][ge][ae][3])*this.config.anchors[ae].y/T*S,$t=ve-at/2,Vt=Ve-pt/2,qe={row:P,col:ge,anchor:ae},{classScore:ln,label:bt}=this.withClassScores?await this.extractPredictedClass(A,qe):{classScore:1,label:0};B.push({box:new gu($t,Vt,$t+at,Vt+pt),score:Le,classScore:Le*ln,label:bt,...qe})}}return D.dispose(),_.dispose(),A.dispose(),B}async extractPredictedClass(r,l){const{row:u,col:p,anchor:y}=l,g=await r.array();return Array(this.config.classes.length).fill(0).map((I,S)=>g[u][p][y][S]).map((I,S)=>({classScore:I,label:S})).reduce((I,S)=>I.classScore>S.classScore?I:S)}}el.DEFAULT_FILTER_SIZES=[3,16,32,64,128,256,512,1024,1024];class Hu extends el{constructor(r=!0){const l=Object.assign({},{withSeparableConvs:r,iouThreshold:ID,classes:["face"]},r?{anchors:TD,meanRgb:AD}:{anchors:xD,withClassScores:!0});super(l)}get withSeparableConvs(){return this.config.withSeparableConvs}get anchors(){return this.config.anchors}async locateFaces(r,l){const u=await this.detect(r,l);return u.map(p=>new Jt(p.score,p.relativeBox,{width:p.imageWidth,height:p.imageHeight}))}getDefaultModelName(){return this.withSeparableConvs?ND:vD}extractParamsFromWeigthMap(r){return super.extractParamsFromWeigthMap(r)}}function qQ(r,l=!0){const u=new Hu(l);return u.extractWeights(r),u}class px extends vr{constructor(){super(...arguments);this._name="TinyFaceDetectorOptions"}}class Li{async then(r){return r(await this.run())}async run(){throw new Error("ComposableTask - run is not implemented")}}const mx=Je(Ze());async function fa(r,l,u,p,y=({alignedRect:g})=>g){const g=r.map(T=>pa(T)?y(T):T.detection),I=p||(l instanceof mx.Tensor?await Vc(l,g):await zc(l,g)),S=await u(I);return I.forEach(T=>T instanceof mx.Tensor&&T.dispose()),S}async function tl(r,l,u,p,y){return fa([r],l,async g=>u(g[0]),p,y)}const OD=.4,ED=[new Qe(1.603231,2.094468),new Qe(6.041143,7.080126),new Qe(2.882459,3.518061),new Qe(4.266906,5.178857),new Qe(9.041765,10.66308)],DD=[117.001,114.697,97.404];class qu extends el{constructor(){const r={withSeparableConvs:!0,iouThreshold:OD,classes:["face"],anchors:ED,meanRgb:DD,isFirstLayerConv2d:!0,filterSizes:[3,16,32,64,128,256,512]};super(r)}get anchors(){return this.config.anchors}async locateFaces(r,l){const u=await this.detect(r,l);return u.map(p=>new Jt(p.score,p.relativeBox,{width:p.imageWidth,height:p.imageHeight}))}getDefaultModelName(){return"tiny_face_detector_model"}extractParamsFromWeigthMap(r){return super.extractParamsFromWeigthMap(r)}}const yt={ssdMobilenetv1:new Zc,tinyFaceDetector:new qu,tinyYolov2:new Hu,faceLandmark68Net:new Pu,faceLandmark68TinyNet:new ax,faceRecognitionNet:new Vu,faceExpressionNet:new nx,ageGenderNet:new ox},kD=(r,l)=>yt.ssdMobilenetv1.locateFaces(r,l),jQ=(r,l)=>yt.tinyFaceDetector.locateFaces(r,l),KQ=(r,l)=>yt.tinyYolov2.locateFaces(r,l),FD=r=>yt.faceLandmark68Net.detectLandmarks(r),XQ=r=>yt.faceLandmark68TinyNet.detectLandmarks(r),JQ=r=>yt.faceRecognitionNet.computeFaceDescriptor(r),ZQ=r=>yt.faceExpressionNet.predictExpressions(r),QQ=r=>yt.ageGenderNet.predictAgeAndGender(r),_D=r=>yt.ssdMobilenetv1.load(r),eee=r=>yt.tinyFaceDetector.load(r),tee=r=>yt.tinyYolov2.load(r),nee=r=>yt.faceLandmark68Net.load(r),see=r=>yt.faceLandmark68TinyNet.load(r),iee=r=>yt.faceRecognitionNet.load(r),ree=r=>yt.faceExpressionNet.load(r),oee=r=>yt.ageGenderNet.load(r),aee=_D,cee=kD,lee=FD;class WD extends Li{constructor(r,l,u){super();this.parentTask=r;this.input=l;this.extractedFaces=u}}class Xu extends WD{async run(){const r=await this.parentTask,l=await fa(r,this.input,async u=>await Promise.all(u.map(p=>yt.faceExpressionNet.predictExpressions(p))),this.extractedFaces);return r.map((u,p)=>Rg(u,l[p]))}withAgeAndGender(){return new ju(this,this.input)}}class Ju extends WD{async run(){const r=await this.parentTask;if(!r)return;const l=await tl(r,this.input,u=>yt.faceExpressionNet.predictExpressions(u),this.extractedFaces);return Rg(r,l)}withAgeAndGender(){return new Ku(this,this.input)}}class il extends Xu{withAgeAndGender(){return new nl(this,this.input)}withFaceDescriptors(){return new ga(this,this.input)}}class rl extends Ju{withAgeAndGender(){return new sl(this,this.input)}withFaceDescriptor(){return new ya(this,this.input)}}class $D extends Li{constructor(r,l,u){super();this.parentTask=r;this.input=l;this.extractedFaces=u}}class ju extends $D{async run(){const r=await this.parentTask,l=await fa(r,this.input,async u=>await Promise.all(u.map(p=>yt.ageGenderNet.predictAgeAndGender(p))),this.extractedFaces);return r.map((u,p)=>{const{age:y,gender:g,genderProbability:I}=l[p];return Fg(_g(u,g,I),y)})}withFaceExpressions(){return new Xu(this,this.input)}}class Ku extends $D{async run(){const r=await this.parentTask;if(!r)return;const{age:l,gender:u,genderProbability:p}=await tl(r,this.input,y=>yt.ageGenderNet.predictAgeAndGender(y),this.extractedFaces);return Fg(_g(r,u,p),l)}withFaceExpressions(){return new Ju(this,this.input)}}class nl extends ju{withFaceExpressions(){return new il(this,this.input)}withFaceDescriptors(){return new ga(this,this.input)}}class sl extends Ku{withFaceExpressions(){return new rl(this,this.input)}withFaceDescriptor(){return new ya(this,this.input)}}class fx extends Li{constructor(r,l){super();this.parentTask=r;this.input=l}}class ga extends fx{async run(){const r=await this.parentTask,l=await fa(r,this.input,u=>Promise.all(u.map(p=>yt.faceRecognitionNet.computeFaceDescriptor(p))),null,u=>u.landmarks.align(null,{useDlibAlignment:!0}));return l.map((u,p)=>kg(r[p],u))}withFaceExpressions(){return new il(this,this.input)}withAgeAndGender(){return new nl(this,this.input)}}class ya extends fx{async run(){const r=await this.parentTask;if(!r)return;const l=await tl(r,this.input,u=>yt.faceRecognitionNet.computeFaceDescriptor(u),null,u=>u.landmarks.align(null,{useDlibAlignment:!0}));return kg(r,l)}withFaceExpressions(){return new rl(this,this.input)}withAgeAndGender(){return new sl(this,this.input)}}const Zu=Je(Ze());class gx extends Li{constructor(r,l,u){super();this.parentTask=r;this.input=l;this.useTinyLandmarkNet=u}get landmarkNet(){return this.useTinyLandmarkNet?yt.faceLandmark68TinyNet:yt.faceLandmark68Net}}class yx extends gx{async run(){const r=await this.parentTask,l=r.map(y=>y.detection),u=this.input instanceof Zu.Tensor?await Vc(this.input,l):await zc(this.input,l),p=await Promise.all(u.map(y=>this.landmarkNet.detectLandmarks(y)));return u.forEach(y=>y instanceof Zu.Tensor&&y.dispose()),r.map((y,g)=>Xc(y,p[g]))}withFaceExpressions(){return new il(this,this.input)}withAgeAndGender(){return new nl(this,this.input)}withFaceDescriptors(){return new ga(this,this.input)}}class bx extends gx{async run(){const r=await this.parentTask;if(!r)return;const{detection:l}=r,u=this.input instanceof Zu.Tensor?await Vc(this.input,[l]):await zc(this.input,[l]),p=await this.landmarkNet.detectLandmarks(u[0]);return u.forEach(y=>y instanceof Zu.Tensor&&y.dispose()),Xc(r,p)}withFaceExpressions(){return new rl(this,this.input)}withAgeAndGender(){return new sl(this,this.input)}withFaceDescriptor(){return new ya(this,this.input)}}class wx extends Li{constructor(r,l=new bi){super();this.input=r;this.options=l}}class $g extends wx{async run(){const{input:r,options:l}=this,u=l instanceof px?p=>yt.tinyFaceDetector.locateFaces(p,l):l instanceof bi?p=>yt.ssdMobilenetv1.locateFaces(p,l):l instanceof vr?p=>yt.tinyYolov2.locateFaces(p,l):null;if(!u)throw new Error("detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options");return u(r)}runAndExtendWithFaceDetections(){return new Promise(async r=>{const l=await this.run();return r(l.map(u=>Zo({},u)))})}withFaceLandmarks(r=!1){return new yx(this.runAndExtendWithFaceDetections(),this.input,r)}withFaceExpressions(){return new Xu(this.runAndExtendWithFaceDetections(),this.input)}withAgeAndGender(){return new ju(this.runAndExtendWithFaceDetections(),this.input)}}class Lx extends wx{async run(){const r=await new $g(this.input,this.options);let l=r[0];return r.forEach(u=>{u.score>l.score&&(l=u)}),l}runAndExtendWithFaceDetection(){return new Promise(async r=>{const l=await this.run();return r(l?Zo({},l):void 0)})}withFaceLandmarks(r=!1){return new bx(this.runAndExtendWithFaceDetection(),this.input,r)}withFaceExpressions(){return new Ju(this.runAndExtendWithFaceDetection(),this.input)}withAgeAndGender(){return new Ku(this.runAndExtendWithFaceDetection(),this.input)}}function hee(r,l=new bi){return new Lx(r,l)}function Ug(r,l=new bi){return new $g(r,l)}async function UD(r,l){return console.warn("allFacesSsdMobilenetv1 is deprecated and will be removed soon, use the high level api instead"),await Ug(r,new bi(l?{minConfidence:l}:{})).withFaceLandmarks().withFaceDescriptors()}async function uee(r,l={}){return console.warn("allFacesTinyYolov2 is deprecated and will be removed soon, use the high level api instead"),await Ug(r,new vr(l)).withFaceLandmarks().withFaceDescriptors()}const dee=UD;function Sx(r,l){if(r.length!==l.length)throw new Error("euclideanDistance: arr1.length !== arr2.length");const u=Array.from(r),p=Array.from(l);return Math.sqrt(u.map((y,g)=>y-p[g]).reduce((y,g)=>y+Math.pow(g,2),0))}class BD{constructor(r,l=.6){this._distanceThreshold=l;const u=Array.isArray(r)?r:[r];if(!u.length)throw new Error("FaceRecognizer.constructor - expected atleast one input");let p=1;const y=()=>`person ${p++}`;this._labeledDescriptors=u.map(g=>{if(g instanceof Jo)return g;if(g instanceof Float32Array)return new Jo(y(),[g]);if(g.descriptor&&g.descriptor instanceof Float32Array)return new Jo(y(),[g.descriptor]);throw new Error("FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>")})}get labeledDescriptors(){return this._labeledDescriptors}get distanceThreshold(){return this._distanceThreshold}computeMeanDistance(r,l){return l.map(u=>Sx(u,r)).reduce((u,p)=>u+p,0)/(l.length||1)}matchDescriptor(r){return this.labeledDescriptors.map(({descriptors:l,label:u})=>new Ym(u,this.computeMeanDistance(r,l))).reduce((l,u)=>l.distancer.toJSON())}}static fromJSON(r){const l=r.labeledDescriptors.map(u=>Jo.fromJSON(u));return new BD(l,r.distanceThreshold)}}function pee(r){const l=new qu;return l.extractWeights(r),l}function MD(r,l){const{width:u,height:p}=new ms(l.width,l.height);if(u<=0||p<=0)throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({width:u,height:p})}`);if(Array.isArray(r))return r.map(y=>MD(y,{width:u,height:p}));if(pa(r)){const y=r.detection.forSize(u,p),g=r.unshiftedLandmarks.forSize(y.box.width,y.box.height);return Xc(Zo(r,y),g)}return $i(r)?Zo(r,r.detection.forSize(u,p)):r instanceof Gs||r instanceof Jt?r.forSize(u,p):r}var PD="0.8.5";const mee=Je(Ze()),fee=typeof process!="undefined",gee=typeof navigator!="undefined"&&typeof navigator.userAgent!="undefined",yee={faceapi:PD,node:fee,browser:gee};export{ox as AgeGenderNet,gu as BoundingBox,_t as Box,Li as ComposableTask,ga as ComputeAllFaceDescriptorsTask,fx as ComputeFaceDescriptorsTaskBase,ya as ComputeSingleFaceDescriptorTask,yx as DetectAllFaceLandmarksTask,$g as DetectAllFacesTask,gx as DetectFaceLandmarksTaskBase,wx as DetectFacesTaskBase,bx as DetectSingleFaceLandmarksTask,Lx as DetectSingleFaceTask,ms as Dimensions,tx as FACE_EXPRESSION_LABELS,Jt as FaceDetection,GQ as FaceDetectionNet,nx as FaceExpressionNet,da as FaceExpressions,Pu as FaceLandmark68Net,ax as FaceLandmark68TinyNet,RQ as FaceLandmarkNet,Gs as FaceLandmarks,EJ as FaceLandmarks5,wu as FaceLandmarks68,Ym as FaceMatch,BD as FaceMatcher,Vu as FaceRecognitionNet,Ir as Gender,Hm as LabeledBox,Jo as LabeledFaceDescriptors,ho as NetInput,Wn as NeuralNetwork,Nc as ObjectDetection,Qe as Point,DJ as PredictedBox,bu as Rect,Zc as SsdMobilenetv1,bi as SsdMobilenetv1Options,qu as TinyFaceDetector,px as TinyFaceDetectorOptions,Hu as TinyYolov2,vr as TinyYolov2Options,dx as TinyYolov2SizeType,dee as allFaces,UD as allFacesSsdMobilenetv1,uee as allFacesTinyYolov2,qS as awaitMediaLoaded,jS as bufferToImage,JQ as computeFaceDescriptor,Rc as createCanvas,Su as createCanvasFromMedia,VQ as createFaceDetectionNet,DQ as createFaceRecognitionNet,SD as createSsdMobilenetv1,pee as createTinyFaceDetector,qQ as createTinyYolov2,Ug as detectAllFaces,FD as detectFaceLandmarks,XQ as detectFaceLandmarksTiny,lee as detectLandmarks,hee as detectSingleFace,ix as draw,St as env,Sx as euclideanDistance,Fg as extendWithAge,kg as extendWithFaceDescriptor,Zo as extendWithFaceDetection,Rg as extendWithFaceExpressions,Xc as extendWithFaceLandmarks,_g as extendWithGender,Vc as extractFaceTensors,zc as extractFaces,SQ as fetchImage,ZI as fetchJson,IQ as fetchNetWeights,ha as fetchOrThrow,is as getContext2dOrThrow,ea as getMediaDimensions,KS as imageTensorToCanvas,JI as imageToSquare,NJ as inverseSigmoid,WS as iou,Xm as isMediaElement,Lu as isMediaLoaded,kQ as isWithAge,$i as isWithFaceDetection,sx as isWithFaceExpressions,pa as isWithFaceLandmarks,FQ as isWithGender,oee as loadAgeGenderModel,aee as loadFaceDetectionModel,ree as loadFaceExpressionModel,nee as loadFaceLandmarkModel,see as loadFaceLandmarkTinyModel,iee as loadFaceRecognitionModel,_D as loadSsdMobilenetv1Model,eee as loadTinyFaceDetectorModel,tee as loadTinyYolov2Model,QI as loadWeightMap,cee as locateFaces,xQ as matchDimensions,$S as minBbox,yt as nets,US as nonMaxSuppression,di as normalize,BS as padToSquare,QQ as predictAgeAndGender,ZQ as recognizeFaceExpressions,MD as resizeResults,Qo as resolveInput,vJ as shuffleArray,yu as sigmoid,kD as ssdMobilenetv1,mee as tf,jQ as tinyFaceDetector,KQ as tinyYolov2,Wt as toNetInput,DS as utils,hx as validateConfig,yee as version}; /** * @license * Copyright 2017 Google LLC. All Rights Reserved. diff --git a/dist/face-api.esm.js.map b/dist/face-api.esm.js.map index 6fcb61a..2ae77d2 100644 --- a/dist/face-api.esm.js.map +++ b/dist/face-api.esm.js.map @@ -1,7 +1,7 @@ { "version": 3, - "sources": ["node_modules/node-fetch/browser.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/backend.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/environment.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/global_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/kernel_names.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/kernel_registry.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/profiler.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor_format.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/types.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/engine.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/device_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/flags.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor_util_env.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/operation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/complex.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor_ops_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/types.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/io_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/router_registry.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/indexed_db.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/local_storage.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/model_management.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/platforms/platform_browser.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/platforms/platform_node.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/buffer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cast.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/clone.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/print.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/base_side_effects.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/browser_files.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/progress.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/weights_loader.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/http.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/passthrough.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/io.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mat_mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/one_hot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/confusion_matrix.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/math.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/browser.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/serialization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/test_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/globals.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/add.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/floorDiv.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/abs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/acos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/acosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/add_n.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/axis_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/all.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/any.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/arg_max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/arg_min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/asin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/asinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/atan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/atan2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/atanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/basic_lstm_cell.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batch_to_space_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_to.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ceil.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/clip_by_value.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_input.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_input.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cumsum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depth_to_space.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/diag.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dilation2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/where.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/zeros_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/div_no_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/elu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/erf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/expand_dims.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/expm1.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tile.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/eye.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fill.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/floor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reduce_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/segment_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/gather.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/greater.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/greater_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/imag.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/is_finite.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/is_inf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/is_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/maximum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/scalar.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/leaky_relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/less.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/less_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linspace.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log1p.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/neg.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/softplus.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log_sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sub.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log_softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log_sum_exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_and.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_not.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_or.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_xor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_with_argmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/zeros.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ones.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mean.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/minimum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/moments.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/multi_rnn_cell.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/multinomial.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/not_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/real.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ones_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/outer_product.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/space_to_batch_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pow.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/prelu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/prod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rand.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/alea.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xor128.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xorwow.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xorshift7.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xor4096.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/tychei.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/seedrandom.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rand_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/random_gamma.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/random_normal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/random_uniform.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/range.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reciprocal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/relu6.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/round.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rsqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/selu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/separable_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/setdiff1d_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sign.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/fft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/ifft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/irfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/split_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/split.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/rfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/squared_difference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/squeeze.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/stack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/step.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/strided_slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor5d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor6d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/topk.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/truncated_normal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/unsorted_segment_sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/unstack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/variable.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/where_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/where_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/boolean_mask.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/compare.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/binary_ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/norm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/moving_average.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sparse_to_dense_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sparse_to_dense.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dropout_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dropout.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal_ops_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/in_top_k.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_filter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused/conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_filter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_input.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused/depthwise_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused/mat_mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused_ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/hamming_window.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/hann_window.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/frame.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/stft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/crop_and_resize.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/flip_left_right.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/rotate_with_offset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/nonmax_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/array_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/non_max_suppression_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_with_score.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_with_score_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_padded.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_padded_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/resize_bilinear.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/resize_nearest_neighbor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/band_part.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/gram_schmidt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/qr.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/loss_ops_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/compute_weighted_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/absolute_difference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/cosine_distance.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/hinge_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/huber_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/log_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/mean_squared_error.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/sigmoid_cross_entropy.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/softmax_cross_entropy.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adadelta_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adagrad_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adam_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adamax_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/sgd_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/momentum_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/rmsprop_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/optimizer_constructors.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/train.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/browser_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rotate_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/array_ops_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/selu_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/erf_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/complex_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/backend_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/split_shared.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/tile_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/topk_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/kernel_impls.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/base.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Abs_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Acos_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Acosh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Add_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/AddN_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ArgMax_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ArgMin_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Asin_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Asinh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Atan2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Atan_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Atanh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool3D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/BatchMatMul_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/BatchToSpaceND_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/BroadcastTo_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cast_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Ceil_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ClipByValue_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Concat_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2DBackpropInput_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_filter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv3D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cos_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cosh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cumsum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/DepthwiseConv2dNative_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Dilation2D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Div_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Elu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Erf_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Exp_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Expm1_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Floor_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/FloorDiv_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/FusedBatchNorm_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/GatherV2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/GreaterEqual_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Identity_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/IsFinite_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/IsInf_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/IsNan_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Log1p_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Log_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/LogSoftmax_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/LRN_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/min_max_grad_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Max_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Maximum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool3D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Min_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Minimum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Mod_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Multiply_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Negate_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/OneHot_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/OnesLike_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/PadV2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Pow_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Prelu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Reciprocal_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Relu6_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Relu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Reshape_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ResizeBilinear_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ResizeNearestNeighbor_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Reverse_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Round_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Rsqrt_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SelectV2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Selu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sigmoid_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sign_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sin_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sinh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Slice_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Softmax_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Softplus_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SpaceToBatchND_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SplitV_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sqrt_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Square_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SquaredDifference_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Step_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sub_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Tan_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Tanh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Tile_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Transpose_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Unpack_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/UnsortedSegmentSum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ZerosLike_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/register_all_gradients.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/abs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/acos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/acosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/add_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/add.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/all.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/any.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/arg_max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/arg_min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as_scalar.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as_type.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as5d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/asin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/asinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atan2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/avg_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/batch_to_space_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/batchnorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/broadcast_to.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cast.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ceil.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/clip_by_value.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/concat.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv2d_transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cumsum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depth_to_space.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depthwise_conv2D_deprecated.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depthwise_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/dilation2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div_no_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/dot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/elu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/erf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/expand_dims.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/expm1.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/fft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/flatten.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/floor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/floorDiv.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/gather.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ifft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/irfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_finite.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_inf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/leaky_relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/local_response_normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_sum_exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log1p.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_and.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_not.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_or.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_xor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mat_mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/max_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/maximum_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/maximum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mean.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/minimum_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/minimum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mod_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mul_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/neg.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/norm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/not_equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/not_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/one_hot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ones_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pow_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pow.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/prelu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/prod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reciprocal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/relu6.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reshape_as.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/resize_bilinear.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/resize_nearest_neighbor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reverse.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/rfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/round.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/rsqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/selu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/separable_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sign.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/softplus.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/space_to_batch_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/split.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squared_difference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squared_difference_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squeeze.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/stack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/step.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/strided_slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sub_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sub.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tile.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_bool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_float.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_int.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/topk.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unsorted_segment_sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unstack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/where.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/zeros_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/register_all_chained_ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/backend/common.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/errors.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/generic_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/constraints.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_constraints.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/keras_format/common.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/common.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/math_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/backend/tfjs_backend.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/keras_format/initializer_config.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/initializers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_initializers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/backend/state.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/types_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/variable_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/variables.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/topology.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/input_layer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/logs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/base_callbacks.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/serialization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/losses.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/metrics.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/optimizers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/user_defined_metadata.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/layer_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/serialization_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/container.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training_dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training_tensors.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/models.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/activations.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/regularizers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/advanced_activations.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/conv_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/recurrent.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional_recurrent.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/core.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/embeddings.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/merge.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/noise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/padding.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/pooling.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/wrappers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_layers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_metrics.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_models.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_regularizers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/callbacks.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/data/compiled_api.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/custom_op/register.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/arithmetic.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/basic_math.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/control.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/convolution.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/creation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/dynamic.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/evaluation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/graph.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/image.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/logical.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/matrices.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/reduction.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/slice_join.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/spectral.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/transformation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/operation_mapper.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/custom_op/node_value_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ops_for_converter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/arithmetic_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/basic_math_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_array.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_list.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/control_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/convolution_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/creation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/dynamic_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/evaluation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/graph_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/image_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/logical_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/matrices_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/normalization_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/reduction_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/slice_join_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/spectral_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/transformation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/operation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/execution_context.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/model_analysis.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/graph_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/graph_model.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/deep_map.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/deep_clone.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/ring_buffer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/growing_ring_buffer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/lazy_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/datasets/text_line_dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/datasets/csv_dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/microphone_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/webcam_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/datasource.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/string_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/byte_chunk_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/file_chunk_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/url_chunk_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/source_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/sources/file_data_source.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/sources/url_data_source.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/readers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/cpu_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/backend_cpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Abs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/binary_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Complex.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Identity.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Real.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cast.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/kernel_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Add.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/unary_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/unary_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Ceil.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Expm1.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Floor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Max_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Multiply.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Rsqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sub.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Transpose_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Unique_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/shared.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/base.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Acos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Acosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Asin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Asinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Atan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Atanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/pool_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/AvgPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/AvgPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/BatchNorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Clip.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Imag.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Concat.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2D.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2DBackpropFilter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2DBackpropInput.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Elu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Erf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/fft_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FFT.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FlipLeftRight.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IFFT.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsFinite.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsInf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsNaN.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Log1p.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/LogicalNot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolWithArgmax_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolWithArgmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NonMaxSuppressionV4.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NonMaxSuppressionV5.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NotEqual.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/PadV2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Reciprocal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/RotateWithOffset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Round.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Selu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sign.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Softplus.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/SpaceToBatchND.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/SquaredDifference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Step.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Tanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/register_all_kernels.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/canvas_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/tex_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/webgl_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/flags_webgl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/shared.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/addn_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/addn_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/argminmax_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/packing_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/glsl_version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/shader_compiler_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/shader_compiler.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/argminmax_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/avg_pool_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_complex_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/clip_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/clip_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/complex_abs_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/concat_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/concat_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_backprop_gpu_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_gpu_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_packed_gpu_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/crop_and_resize_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/cumsum_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/decode_matrix_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/decode_matrix_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/depth_to_space_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/diag_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_float_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_float_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_matrix_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_matrix_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/fft_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/fill_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gather_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gather_nd_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_context.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_math.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/im2col_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_grad_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/max_pool_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/mulmat_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/multinomial_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/onehot_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pack_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pad_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pad_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pool_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reduce_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reshape_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_nearest_neighbor_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_nearest_neighbor_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reverse_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reverse_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/scatter_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/segment_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/select_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/slice_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/slice_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/strided_slice_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/texture_manager.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/tile_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/unaryop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/unaryop_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/unpack_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/backend_webgl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/webgl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/base.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/kernel_funcs_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Atan2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Identity.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/AvgPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/AvgPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/batchnorm_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/batchnorm_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/BatchNorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/flip_left_right_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FlipLeftRight.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels_utils/from_pixels_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels_utils/from_pixels_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/reduce.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Max_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/transpose_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/transpose_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Transpose_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolWithArgmax_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolWithArgmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV3.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV4.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV5.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/rotate_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/RotateWithOffset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/SquaredDifference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/register_all_kernels.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../src/version.ts", "node_modules/@tensorflow/tfjs/dist/../src/index.ts", "src/env/isNodejs.ts", "src/draw/drawContour.ts", "src/classes/Dimensions.ts", "src/utils/index.ts", "src/classes/Point.ts", "src/classes/Box.ts", "src/classes/BoundingBox.ts", "src/classes/ObjectDetection.ts", "src/classes/FaceDetection.ts", "src/ops/iou.ts", "src/ops/minBbox.ts", "src/ops/nonMaxSuppression.ts", "src/ops/normalize.ts", "src/ops/padToSquare.ts", "src/ops/shuffleArray.ts", "src/ops/index.ts", "src/classes/Rect.ts", "src/classes/FaceLandmarks.ts", "src/classes/FaceLandmarks5.ts", "src/classes/FaceLandmarks68.ts", "src/classes/FaceMatch.ts", "src/classes/LabeledBox.ts", "src/classes/LabeledFaceDescriptors.ts", "src/classes/PredictedBox.ts", "src/factories/WithFaceDetection.ts", "src/env/createBrowserEnv.ts", "src/env/createFileSystem.ts", "src/env/createNodejsEnv.ts", "src/env/isBrowser.ts", "src/env/index.ts", "src/dom/resolveInput.ts", "src/dom/getContext2dOrThrow.ts", "src/draw/DrawTextField.ts", "src/draw/DrawBox.ts", "src/draw/drawDetections.ts", "src/dom/isMediaLoaded.ts", "src/dom/awaitMediaLoaded.ts", "src/dom/bufferToImage.ts", "src/dom/getMediaDimensions.ts", "src/dom/createCanvas.ts", "src/dom/imageTensorToCanvas.ts", "src/dom/isMediaElement.ts", "node_modules/@tensorflow/tfjs-core/dist/backends/../../src/backends/backend.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/environment.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/global_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/kernel_names.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/kernel_registry.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/profiler.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tape.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor_format.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/types.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/engine.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/device_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/flags.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor_util_env.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/operation.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/complex.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor_ops_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/buffer.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cast.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/clone.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/print.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/base_side_effects.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/reshape.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/mat_mul.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/transpose.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor3d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/browser.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/slice_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/globals.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/add.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/floorDiv.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/div.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/mul.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/abs.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/axis_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/concat_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/concat.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sigmoid.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/slice.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/batch_to_space_nd.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/broadcast_to.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv2d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv2d_backprop_input.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv3d_backprop_input.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cos.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cosh.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cumsum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/broadcast_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/where.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/zeros_like.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/exp.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/expand_dims.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tile.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/eye.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/fill.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/floor.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/segment_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/gather.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/greater.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/greater_equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/imag.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/maximum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/scalar.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/less.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/less_equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/log.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/log1p.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/gradients.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/neg.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/max.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sub.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/log_sum_exp.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/logical_and.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/logical_not.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/zeros.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/ones.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/mean.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/min.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/minimum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/square.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/not_equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/real.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/pad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/space_to_batch_nd.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/pow.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor1d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/range.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/relu.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/reverse.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/rsqrt.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sin.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sinh.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/fft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/ifft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/irfft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/split_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/split.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/rfft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sqrt.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/squared_difference.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/squeeze.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/stack.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/step.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor2d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/unsorted_segment_sum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/unstack.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/norm.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/signal_ops_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv2d_backprop_filter.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/depthwise_conv2d_native_backprop_filter.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/depthwise_conv2d_native_backprop_input.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/hamming_window.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/hann_window.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/frame.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/stft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/crop_and_resize.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/flip_left_right.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/rotate_with_offset.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/nonmax_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression.ts", "node_modules/@tensorflow/tfjs-core/dist/backends/../../src/backends/array_util.ts", "node_modules/@tensorflow/tfjs-core/dist/backends/../../src/backends/non_max_suppression_impl.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_async.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_with_score.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_with_score_async.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_padded.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_padded_async.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/resize_bilinear.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/resize_nearest_neighbor.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/linalg/../../../src/ops/linalg/band_part.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/linalg/../../../src/ops/linalg/gram_schmidt.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/linalg/../../../src/ops/linalg/qr.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/loss_ops_utils.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/compute_weighted_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/absolute_difference.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/cosine_distance.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/hinge_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/huber_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/log_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/mean_squared_error.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/sigmoid_cross_entropy.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/softmax_cross_entropy.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/ops.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/selu_util.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Abs_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Acos_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Acosh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Add_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/AddN_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ArgMax_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ArgMin_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Asin_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Asinh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Atan2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Atan_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Atanh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/avg_pool_3d_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/AvgPool3D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/avg_pool_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/AvgPool_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/BatchMatMul_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/BatchToSpaceND_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/BroadcastTo_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cast_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Ceil_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ClipByValue_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Concat_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Conv2D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Conv2DBackpropInput_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv3d_backprop_filter.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Conv3D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cos_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cosh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cumsum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/DepthwiseConv2dNative_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Dilation2D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Div_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Elu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Erf_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Exp_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Expm1_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Floor_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/FloorDiv_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/FusedBatchNorm_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/GatherV2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/GreaterEqual_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Identity_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/IsFinite_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/IsInf_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/IsNan_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Log1p_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Log_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/LogSoftmax_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/local_response_normalization_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/LRN_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/min_max_grad_util.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Max_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Maximum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/max_pool_3d_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/MaxPool3D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/max_pool_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/MaxPool_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Min_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Minimum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Mod_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Multiply_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Negate_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/OneHot_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/OnesLike_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/PadV2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Pow_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Prelu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Reciprocal_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Relu6_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Relu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Reshape_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ResizeBilinear_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ResizeNearestNeighbor_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Reverse_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Round_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Rsqrt_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SelectV2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Selu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sigmoid_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sign_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sin_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sinh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Slice_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Softmax_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Softplus_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SpaceToBatchND_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SplitV_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sqrt_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Square_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SquaredDifference_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Step_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sub_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Tan_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Tanh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Tile_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Transpose_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Unpack_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/UnsortedSegmentSum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ZerosLike_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/register_all_gradients.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/index.ts", "src/dom/imageToSquare.ts", "src/dom/NetInput.ts", "src/dom/toNetInput.ts", "src/dom/extractFaces.ts", "src/dom/extractFaceTensors.ts", "src/dom/fetchOrThrow.ts", "src/dom/fetchImage.ts", "src/dom/fetchJson.ts", "src/dom/fetchNetWeights.ts", "src/common/getModelUris.ts", "src/dom/loadWeightMap.ts", "src/dom/matchDimensions.ts", "src/NeuralNetwork.ts", "src/common/depthwiseSeparableConv.ts", "src/faceFeatureExtractor/denseBlock.ts", "src/common/convLayer.ts", "src/common/disposeUnusedWeightTensors.ts", "src/common/extractConvParamsFactory.ts", "src/common/extractFCParamsFactory.ts", "src/common/types.ts", "src/common/extractSeparableConvParamsFactory.ts", "src/common/extractWeightEntryFactory.ts", "src/common/extractWeightsFactory.ts", "src/faceFeatureExtractor/extractorsFactory.ts", "src/faceFeatureExtractor/extractParams.ts", "src/common/loadConvParamsFactory.ts", "src/faceFeatureExtractor/loadParamsFactory.ts", "src/faceFeatureExtractor/extractParamsFromWeigthMap.ts", "src/faceFeatureExtractor/FaceFeatureExtractor.ts", "src/common/fullyConnectedLayer.ts", "src/faceProcessor/extractParams.ts", "src/faceProcessor/extractParamsFromWeigthMap.ts", "src/faceProcessor/util.ts", "src/faceProcessor/FaceProcessor.ts", "src/faceExpressionNet/FaceExpressions.ts", "src/faceExpressionNet/FaceExpressionNet.ts", "src/factories/WithFaceExpressions.ts", "src/draw/drawFaceExpressions.ts", "src/factories/WithFaceLandmarks.ts", "src/draw/DrawFaceLandmarks.ts", "src/draw/index.ts", "src/xception/extractParams.ts", "src/xception/extractParamsFromWeigthMap.ts", "src/xception/TinyXception.ts", "src/ageGenderNet/extractParams.ts", "src/ageGenderNet/extractParamsFromWeigthMap.ts", "src/ageGenderNet/types.ts", "src/ageGenderNet/AgeGenderNet.ts", "src/faceLandmarkNet/FaceLandmark68NetBase.ts", "src/faceLandmarkNet/FaceLandmark68Net.ts", "src/faceFeatureExtractor/extractParamsFromWeigthMapTiny.ts", "src/faceFeatureExtractor/extractParamsTiny.ts", "src/faceFeatureExtractor/TinyFaceFeatureExtractor.ts", "src/faceLandmarkNet/FaceLandmark68TinyNet.ts", "src/faceLandmarkNet/index.ts", "src/faceRecognitionNet/scaleLayer.ts", "src/faceRecognitionNet/convLayer.ts", "src/faceRecognitionNet/extractParams.ts", "src/faceRecognitionNet/extractParamsFromWeigthMap.ts", "src/faceRecognitionNet/residualLayer.ts", "src/faceRecognitionNet/FaceRecognitionNet.ts", "src/faceRecognitionNet/index.ts", "src/factories/WithFaceDescriptor.ts", "src/factories/WithAge.ts", "src/factories/WithGender.ts", "src/ssdMobilenetv1/extractParams.ts", "src/ssdMobilenetv1/extractParamsFromWeigthMap.ts", "src/ssdMobilenetv1/pointwiseConvLayer.ts", "src/ssdMobilenetv1/mobileNetV1.ts", "src/ssdMobilenetv1/nonMaxSuppression.ts", "src/ssdMobilenetv1/outputLayer.ts", "src/ssdMobilenetv1/boxPredictionLayer.ts", "src/ssdMobilenetv1/predictionLayer.ts", "src/ssdMobilenetv1/SsdMobilenetv1Options.ts", "src/ssdMobilenetv1/SsdMobilenetv1.ts", "src/ssdMobilenetv1/index.ts", "src/tinyYolov2/const.ts", "src/tinyYolov2/config.ts", "src/tinyYolov2/leaky.ts", "src/tinyYolov2/convWithBatchNorm.ts", "src/tinyYolov2/depthwiseSeparableConv.ts", "src/tinyYolov2/extractParams.ts", "src/tinyYolov2/extractParamsFromWeigthMap.ts", "src/tinyYolov2/TinyYolov2Options.ts", "src/tinyYolov2/TinyYolov2Base.ts", "src/tinyYolov2/TinyYolov2.ts", "src/tinyYolov2/index.ts", "src/tinyFaceDetector/TinyFaceDetectorOptions.ts", "src/globalApi/ComposableTask.ts", "src/globalApi/extractFacesAndComputeResults.ts", "src/tinyFaceDetector/const.ts", "src/tinyFaceDetector/TinyFaceDetector.ts", "src/globalApi/nets.ts", "src/globalApi/PredictFaceExpressionsTask.ts", "src/globalApi/PredictAgeAndGenderTask.ts", "src/globalApi/ComputeFaceDescriptorsTasks.ts", "src/globalApi/DetectFaceLandmarksTasks.ts", "src/globalApi/DetectFacesTasks.ts", "src/globalApi/detectFaces.ts", "src/globalApi/allFaces.ts", "src/euclideanDistance.ts", "src/globalApi/FaceMatcher.ts", "src/tinyFaceDetector/index.ts", "src/resizeResults.ts", "src/index.ts"], - "sourcesContent": ["\"use strict\";\n\n// ref: https://github.com/tc39/proposal-global\nvar getGlobal = function () {\n\t// the only reliable means to get the global object is\n\t// `Function('return this')()`\n\t// However, this causes CSP violations in Chrome apps.\n\tif (typeof self !== 'undefined') { return self; }\n\tif (typeof window !== 'undefined') { return window; }\n\tif (typeof global !== 'undefined') { return global; }\n\tthrow new Error('unable to locate global object');\n}\n\nvar global = getGlobal();\n\nmodule.exports = exports = global.fetch;\n\n// Needed for TypeScript and Webpack.\nif (global.fetch) {\n\texports.default = global.fetch.bind(global);\n}\n\nexports.Headers = global.Headers;\nexports.Request = global.Request;\nexports.Response = global.Response;", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport const EPSILON_FLOAT32 = 1e-7;\nexport const EPSILON_FLOAT16 = 1e-4;\n/** Convenient class for storing tensor-related data. */\nexport class DataStorage {\n constructor(backend, dataMover) {\n this.backend = backend;\n this.dataMover = dataMover;\n this.data = new WeakMap();\n this.dataIdsCount = 0;\n }\n get(dataId) {\n if (!this.data.has(dataId)) {\n this.dataMover.moveData(this.backend, dataId);\n }\n return this.data.get(dataId);\n }\n set(dataId, value) {\n this.dataIdsCount++;\n this.data.set(dataId, value);\n }\n has(dataId) {\n return this.data.has(dataId);\n }\n delete(dataId) {\n this.dataIdsCount--;\n return this.data.delete(dataId);\n }\n numDataIds() {\n return this.dataIdsCount;\n }\n}\n/**\n * The interface that defines the kernels that should be implemented when\n * adding a new backend. New backends don't need to implement every one of the\n * methods, this can be done gradually (throw an error for unimplemented\n * methods).\n */\nexport class KernelBackend {\n time(f) {\n return notYetImplemented('time');\n }\n read(dataId) {\n return notYetImplemented('read');\n }\n readSync(dataId) {\n return notYetImplemented('readSync');\n }\n numDataIds() {\n return notYetImplemented('numDataIds');\n }\n disposeData(dataId) {\n return notYetImplemented('disposeData');\n }\n write(values, shape, dtype) {\n return notYetImplemented('write');\n }\n move(dataId, values, shape, dtype) {\n return notYetImplemented('move');\n }\n memory() {\n return notYetImplemented('memory');\n }\n /** Returns the highest precision for floats in bits (e.g. 16 or 32) */\n floatPrecision() {\n return notYetImplemented('floatPrecision');\n }\n /** Returns the smallest representable number. */\n epsilon() {\n return this.floatPrecision() === 32 ? EPSILON_FLOAT32 : EPSILON_FLOAT16;\n }\n batchMatMul(a, b, transposeA, transposeB) {\n return notYetImplemented('batchMatMul');\n }\n fusedBatchMatMul({ a, b, transposeA, transposeB, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedBatchMatMul');\n }\n slice(x, begin, size) {\n return notYetImplemented('slice');\n }\n stridedSlice(x, begin, end, strides) {\n return notYetImplemented('stridedSlice');\n }\n unstack(x, axis) {\n return notYetImplemented('unstack');\n }\n reverse(a, axis) {\n return notYetImplemented('reverse');\n }\n concat(tensors, axis) {\n return notYetImplemented('concat');\n }\n neg(a) {\n return notYetImplemented('neg');\n }\n add(a, b) {\n return notYetImplemented('add');\n }\n addN(tensors) {\n return notYetImplemented('addN');\n }\n subtract(a, b) {\n return notYetImplemented('subtract');\n }\n multiply(a, b) {\n return notYetImplemented('multiply');\n }\n realDivide(a, b) {\n return notYetImplemented('realDivide');\n }\n floorDiv(a, b) {\n return notYetImplemented('floorDiv');\n }\n sum(x, axes) {\n return notYetImplemented('sum');\n }\n prod(x, axes) {\n return notYetImplemented('prod');\n }\n unsortedSegmentSum(x, segmentIds, numSegments) {\n return notYetImplemented('unsortedSegmentSum');\n }\n argMin(x, axis) {\n return notYetImplemented('argMin');\n }\n argMax(x, axis) {\n return notYetImplemented('argMax');\n }\n equal(a, b) {\n return notYetImplemented('equal');\n }\n notEqual(a, b) {\n return notYetImplemented('notEqual');\n }\n less(a, b) {\n return notYetImplemented('less');\n }\n lessEqual(a, b) {\n return notYetImplemented('lessEqual');\n }\n greater(a, b) {\n return notYetImplemented('greater');\n }\n greaterEqual(a, b) {\n return notYetImplemented('greaterEqual');\n }\n logicalNot(a) {\n return notYetImplemented('logicalNot');\n }\n logicalAnd(a, b) {\n return notYetImplemented('logicalAnd');\n }\n logicalOr(a, b) {\n return notYetImplemented('logicalOr');\n }\n where(condition) {\n return notYetImplemented('where');\n }\n select(condition, a, b) {\n return notYetImplemented('select');\n }\n topk(x, k, sorted) {\n return notYetImplemented('topk');\n }\n min(x, axes) {\n return notYetImplemented('min');\n }\n minimum(a, b) {\n return notYetImplemented('minimum');\n }\n mod(a, b) {\n return notYetImplemented('mod');\n }\n max(x, axes) {\n return notYetImplemented('max');\n }\n maximum(a, b) {\n return notYetImplemented('maximum');\n }\n all(x, axes) {\n return notYetImplemented('all');\n }\n any(x, axes) {\n return notYetImplemented('any');\n }\n squaredDifference(a, b) {\n return notYetImplemented('squaredDifference');\n }\n ceil(x) {\n return notYetImplemented('ceil');\n }\n floor(x) {\n return notYetImplemented('floor');\n }\n round(x) {\n return notYetImplemented('round');\n }\n sign(x) {\n return notYetImplemented('sign');\n }\n isNaN(x) {\n return notYetImplemented('isNaN');\n }\n isInf(x) {\n return notYetImplemented('isInf');\n }\n isFinite(x) {\n return notYetImplemented('isFinite');\n }\n pow(a, b) {\n return notYetImplemented('pow');\n }\n exp(x) {\n return notYetImplemented('exp');\n }\n expm1(x) {\n return notYetImplemented('expm1');\n }\n softmax(x, dim) {\n return notYetImplemented('softmax');\n }\n log(x) {\n return notYetImplemented('log');\n }\n log1p(x) {\n return notYetImplemented('log1p');\n }\n sqrt(x) {\n return notYetImplemented('sqrt');\n }\n rsqrt(x) {\n return notYetImplemented('rsqrt');\n }\n square(x) {\n return notYetImplemented('square');\n }\n reciprocal(x) {\n return notYetImplemented('reciprocal');\n }\n relu(x) {\n return notYetImplemented('relu');\n }\n relu6(x) {\n return notYetImplemented('relu6');\n }\n prelu(x, a) {\n return notYetImplemented('prelu');\n }\n elu(x) {\n return notYetImplemented('elu');\n }\n eluDer(dy, y) {\n return notYetImplemented('eluDer');\n }\n selu(x) {\n return notYetImplemented('selu');\n }\n int(x) {\n return notYetImplemented('int');\n }\n clip(x, min, max) {\n return notYetImplemented('clip');\n }\n abs(x) {\n return notYetImplemented('abs');\n }\n complexAbs(x) {\n return notYetImplemented('complexAbs');\n }\n sigmoid(x) {\n return notYetImplemented('sigmoid');\n }\n softplus(x) {\n return notYetImplemented('softplus');\n }\n sin(x) {\n return notYetImplemented('sin');\n }\n cos(x) {\n return notYetImplemented('cos');\n }\n tan(x) {\n return notYetImplemented('tan');\n }\n asin(x) {\n return notYetImplemented('asin');\n }\n acos(x) {\n return notYetImplemented('acos');\n }\n atan(x) {\n return notYetImplemented('atan');\n }\n atan2(a, b) {\n return notYetImplemented('atan2');\n }\n sinh(x) {\n return notYetImplemented('sinh');\n }\n cosh(x) {\n return notYetImplemented('cosh');\n }\n tanh(x) {\n return notYetImplemented('tanh');\n }\n asinh(x) {\n return notYetImplemented('asinh');\n }\n acosh(x) {\n return notYetImplemented('acosh');\n }\n atanh(x) {\n return notYetImplemented('atanh');\n }\n erf(x) {\n return notYetImplemented('erf');\n }\n step(x, alpha) {\n return notYetImplemented('step');\n }\n fusedConv2d({ input, filter, convInfo, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedConv2d');\n }\n conv2d(x, filter, convInfo) {\n return notYetImplemented('conv2d');\n }\n conv2dDerInput(dy, filter, convInfo) {\n return notYetImplemented('conv2dDerInput');\n }\n conv2dDerFilter(x, dY, convInfo) {\n return notYetImplemented('conv2dDerFilter');\n }\n fusedDepthwiseConv2D({ input, filter, convInfo, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedDepthwiseConv2D');\n }\n depthwiseConv2D(input, filter, convInfo) {\n return notYetImplemented('depthwiseConv2D');\n }\n depthwiseConv2DDerInput(dy, filter, convInfo) {\n return notYetImplemented('depthwiseConv2DDerInput');\n }\n depthwiseConv2DDerFilter(x, dY, convInfo) {\n return notYetImplemented('depthwiseConv2DDerFilter');\n }\n conv3d(x, filter, convInfo) {\n return notYetImplemented('conv3d');\n }\n conv3dDerInput(dy, filter, convInfo) {\n return notYetImplemented('conv3dDerInput');\n }\n conv3dDerFilter(x, dY, convInfo) {\n return notYetImplemented('conv3dDerFilter');\n }\n maxPool(x, convInfo) {\n return notYetImplemented('maxPool');\n }\n maxPoolBackprop(dy, x, y, convInfo) {\n return notYetImplemented('maxPoolBackprop');\n }\n avgPool(x, convInfo) {\n return notYetImplemented('avgPool');\n }\n avgPoolBackprop(dy, x, convInfo) {\n return notYetImplemented('avgPoolBackprop');\n }\n avgPool3d(x, convInfo) {\n return notYetImplemented('avgPool3d');\n }\n avgPool3dBackprop(dy, x, convInfo) {\n return notYetImplemented('avgPool3dBackprop');\n }\n maxPool3d(x, convInfo) {\n return notYetImplemented('maxPool3d');\n }\n maxPool3dBackprop(dy, x, y, convInfo) {\n return notYetImplemented('maxPool3dBackprop');\n }\n reshape(x, shape) {\n return notYetImplemented('reshape');\n }\n cast(x, dtype) {\n return notYetImplemented('cast');\n }\n tile(x, reps) {\n return notYetImplemented('tile');\n }\n pad(x, paddings, constantValue) {\n return notYetImplemented('pad');\n }\n transpose(x, perm) {\n return notYetImplemented('transpose');\n }\n gather(x, indices, axis) {\n return notYetImplemented('gather');\n }\n gatherND(x, indices) {\n return notYetImplemented('gatherND');\n }\n scatterND(indices, updates, shape) {\n return notYetImplemented('scatterND');\n }\n batchToSpaceND(x, blockShape, crops) {\n return notYetImplemented('batchToSpaceND');\n }\n spaceToBatchND(x, blockShape, paddings) {\n return notYetImplemented('spaceToBatchND');\n }\n resizeBilinear(x, newHeight, newWidth, alignCorners) {\n return notYetImplemented('resizeBilinear');\n }\n resizeBilinearBackprop(dy, x, alignCorners) {\n return notYetImplemented('resizeBilinearBackprop');\n }\n resizeNearestNeighbor(x, newHEight, newWidth, alignCorners) {\n return notYetImplemented('resizeNearestNeighbor');\n }\n resizeNearestNeighborBackprop(dy, x, alignCorners) {\n return notYetImplemented('resizeNearestNeighborBackprop');\n }\n batchNorm(x, mean, variance, offset, scale, varianceEpsilon) {\n return notYetImplemented('batchNorm');\n }\n localResponseNormalization4D(x, radius, bias, alpha, beta) {\n return notYetImplemented('localResponseNormalization4D');\n }\n LRNGrad(dy, inputImage, outputImage, radius, bias, alpha, beta) {\n return notYetImplemented('LRNGrad');\n }\n multinomial(logits, normalized, numSamples, seed) {\n return notYetImplemented('multinomial');\n }\n oneHot(indices, depth, onValue, offValue) {\n return notYetImplemented('oneHot');\n }\n cumsum(x, axis, exclusive, reverse) {\n return notYetImplemented('cumsum');\n }\n nonMaxSuppression(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {\n return notYetImplemented('nonMaxSuppression');\n }\n fft(x) {\n return notYetImplemented('fft');\n }\n ifft(x) {\n return notYetImplemented('ifft');\n }\n complex(real, imag) {\n return notYetImplemented('complex');\n }\n real(input) {\n return notYetImplemented('real');\n }\n imag(input) {\n return notYetImplemented('imag');\n }\n cropAndResize(image, boxes, boxIndex, cropSize, method, extrapolationValue) {\n return notYetImplemented('cropAndResize');\n }\n depthToSpace(x, blockSize, dataFormat) {\n return notYetImplemented('depthToSpace');\n }\n // Aligns with the \"SplitV\" kernel in TensorFlow.\n split(value, sizeSplits, axis) {\n return notYetImplemented('split');\n }\n sparseToDense(sparseIndices, sparseValues, outputShape, defaultValue) {\n return notYetImplemented('sparseToDense');\n }\n diag(x) {\n return notYetImplemented('diag');\n }\n fill(shape, value, dtype) {\n return notYetImplemented('fill');\n }\n onesLike(x) {\n return notYetImplemented('onesLike');\n }\n zerosLike(x) {\n return notYetImplemented('zerosLike');\n }\n linspace(start, stop, num) {\n return notYetImplemented('linspace');\n }\n dispose() {\n return notYetImplemented('dispose');\n }\n}\nfunction notYetImplemented(kernelName) {\n throw new Error(`'${kernelName}' not yet implemented or not found in the registry. ` +\n `This kernel may not be supported by the tfjs backend you have chosen`);\n}\n//# sourceMappingURL=backend.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Expects flags from URL in the format ?tfjsflags=FLAG1:1,FLAG2:true.\nconst TENSORFLOWJS_FLAGS_PREFIX = 'tfjsflags';\n/**\n * The environment contains evaluated flags as well as the registered platform.\n * This is always used as a global singleton and can be retrieved with\n * `tf.env()`.\n *\n * @doc {heading: 'Environment'}\n */\nexport class Environment {\n // tslint:disable-next-line: no-any\n constructor(global) {\n this.global = global;\n this.flags = {};\n this.flagRegistry = {};\n this.urlFlags = {};\n this.populateURLFlags();\n }\n setPlatform(platformName, platform) {\n if (this.platform != null) {\n console.warn(`Platform ${this.platformName} has already been set. ` +\n `Overwriting the platform with ${platform}.`);\n }\n this.platformName = platformName;\n this.platform = platform;\n }\n registerFlag(flagName, evaluationFn, setHook) {\n this.flagRegistry[flagName] = { evaluationFn, setHook };\n // Override the flag value from the URL. This has to happen here because the\n // environment is initialized before flags get registered.\n if (this.urlFlags[flagName] != null) {\n const flagValue = this.urlFlags[flagName];\n console.warn(`Setting feature override from URL ${flagName}: ${flagValue}.`);\n this.set(flagName, flagValue);\n }\n }\n async getAsync(flagName) {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n this.flags[flagName] = await this.evaluateFlag(flagName);\n return this.flags[flagName];\n }\n get(flagName) {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n const flagValue = this.evaluateFlag(flagName);\n if (flagValue instanceof Promise) {\n throw new Error(`Flag ${flagName} cannot be synchronously evaluated. ` +\n `Please use getAsync() instead.`);\n }\n this.flags[flagName] = flagValue;\n return this.flags[flagName];\n }\n getNumber(flagName) {\n return this.get(flagName);\n }\n getBool(flagName) {\n return this.get(flagName);\n }\n getFlags() {\n return this.flags;\n }\n // For backwards compatibility.\n get features() {\n return this.flags;\n }\n set(flagName, value) {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(`Cannot set flag ${flagName} as it has not been registered.`);\n }\n this.flags[flagName] = value;\n if (this.flagRegistry[flagName].setHook != null) {\n this.flagRegistry[flagName].setHook(value);\n }\n }\n evaluateFlag(flagName) {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(`Cannot evaluate flag '${flagName}': no evaluation function found.`);\n }\n return this.flagRegistry[flagName].evaluationFn();\n }\n setFlags(flags) {\n this.flags = Object.assign({}, flags);\n }\n reset() {\n this.flags = {};\n this.urlFlags = {};\n this.populateURLFlags();\n }\n populateURLFlags() {\n if (typeof this.global === 'undefined' ||\n typeof this.global.location === 'undefined' ||\n typeof this.global.location.search === 'undefined') {\n return;\n }\n const urlParams = getQueryParams(this.global.location.search);\n if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) {\n const keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(',');\n keyValues.forEach(keyValue => {\n const [key, value] = keyValue.split(':');\n this.urlFlags[key] = parseValue(key, value);\n });\n }\n }\n}\nexport function getQueryParams(queryString) {\n const params = {};\n queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, (s, ...t) => {\n decodeParam(params, t[0], t[1]);\n return t.join('=');\n });\n return params;\n}\nfunction decodeParam(params, name, value) {\n params[decodeURIComponent(name)] = decodeURIComponent(value || '');\n}\nfunction parseValue(flagName, value) {\n value = value.toLowerCase();\n if (value === 'true' || value === 'false') {\n return value === 'true';\n }\n else if (`${+value}` === value) {\n return +value;\n }\n throw new Error(`Could not parse value flag value ${value} for flag ${flagName}.`);\n}\n/**\n * Returns the current environment (a global singleton).\n *\n * The environment object contains the evaluated feature values as well as the\n * active platform.\n *\n * @doc {heading: 'Environment'}\n */\nexport function env() {\n return ENV;\n}\nexport let ENV = null;\nexport function setEnvironmentGlobal(environment) {\n ENV = environment;\n}\n//# sourceMappingURL=environment.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Note that the identifier globalNameSpace is scoped to this module, but will\n// always resolve to the same global object regardless of how the module is\n// resolved.\n// tslint:disable-next-line:no-any\nlet globalNameSpace;\n// tslint:disable-next-line:no-any\nexport function getGlobalNamespace() {\n if (globalNameSpace == null) {\n // tslint:disable-next-line:no-any\n let ns;\n if (typeof (window) !== 'undefined') {\n ns = window;\n }\n else if (typeof (global) !== 'undefined') {\n ns = global;\n }\n else if (typeof (process) !== 'undefined') {\n ns = process;\n }\n else if (typeof (self) !== 'undefined') {\n ns = self;\n }\n else {\n throw new Error('Could not find a global object');\n }\n globalNameSpace = ns;\n }\n return globalNameSpace;\n}\n// tslint:disable-next-line:no-any\nfunction getGlobalMap() {\n const ns = getGlobalNamespace();\n if (ns._tfGlobals == null) {\n ns._tfGlobals = new Map();\n }\n return ns._tfGlobals;\n}\n/**\n * Returns a globally accessible 'singleton' object.\n *\n * @param key the name of the object\n * @param init a function to initialize to initialize this object\n * the first time it is fetched.\n */\nexport function getGlobal(key, init) {\n const globalMap = getGlobalMap();\n if (globalMap.has(key)) {\n return globalMap.get(key);\n }\n else {\n const singleton = init();\n globalMap.set(key, singleton);\n return globalMap.get(key);\n }\n}\n//# sourceMappingURL=global_util.js.map", "export const Abs = 'Abs';\nexport const Acos = 'Acos';\nexport const Acosh = 'Acosh';\nexport const Add = 'Add';\nexport const AddN = 'AddN';\nexport const All = 'All';\nexport const Any = 'Any';\nexport const ArgMax = 'ArgMax';\nexport const ArgMin = 'ArgMin';\nexport const Asin = 'Asin';\nexport const Asinh = 'Asinh';\nexport const Atan = 'Atan';\nexport const Atanh = 'Atanh';\nexport const Atan2 = 'Atan2';\nexport const AvgPool = 'AvgPool';\nexport const AvgPoolBackprop = 'AvgPoolBackprop';\nexport const AvgPool3D = 'AvgPool3D';\nexport const AvgPool3DBackprop = 'AvgPool3DBackprop';\nexport const BatchMatMul = 'BatchMatMul';\nexport const BatchToSpaceND = 'BatchToSpaceND';\nexport const BroadcastTo = 'BroadcastTo';\nexport const Cast = 'Cast';\nexport const Ceil = 'Ceil';\nexport const ClipByValue = 'ClipByValue';\nexport const Complex = 'Complex';\nexport const Concat = 'Concat';\nexport const Conv2D = 'Conv2D';\nexport const Conv2DBackpropFilter = 'Conv2DBackpropFilter';\nexport const Conv2DBackpropInput = 'Conv2DBackpropInput';\nexport const Conv3D = 'Conv3D';\nexport const Conv3DBackpropFilterV2 = 'Conv3DBackpropFilterV2';\nexport const Conv3DBackpropInputV2 = 'Conv3DBackpropInputV2';\nexport const Cos = 'Cos';\nexport const Cosh = 'Cosh';\nexport const Cumsum = 'Cumsum';\nexport const CropAndResize = 'CropAndResize';\nexport const DepthToSpace = 'DepthToSpace';\nexport const DepthwiseConv2dNative = 'DepthwiseConv2dNative';\nexport const DepthwiseConv2dNativeBackpropFilter = 'DepthwiseConv2dNativeBackpropFilter';\nexport const DepthwiseConv2dNativeBackpropInput = 'DepthwiseConv2dNativeBackpropInput';\nexport const Diag = 'Diag';\nexport const Dilation2D = 'Dilation2D';\nexport const Dilation2DBackpropInput = 'Dilation2DBackpropInput';\nexport const Dilation2DBackpropFilter = 'Dilation2DBackpropFilter';\nexport const Div = 'Div';\nexport const Elu = 'Elu';\nexport const EluGrad = 'EluGrad';\nexport const Erf = 'Erf';\nexport const Equal = 'Equal';\nexport const Exp = 'Exp';\nexport const Expm1 = 'Expm1';\nexport const FFT = 'FFT';\nexport const Fill = 'Fill';\nexport const FlipLeftRight = 'FlipLeftRight';\nexport const Floor = 'Floor';\nexport const FloorDiv = 'FloorDiv';\nexport const FusedBatchNorm = 'FusedBatchNorm';\nexport const GatherV2 = 'GatherV2';\nexport const GatherNd = 'GatherNd';\nexport const Greater = 'Greater';\nexport const GreaterEqual = 'GreaterEqual';\nexport const Identity = 'Identity';\nexport const IFFT = 'IFFT';\nexport const Imag = 'Imag';\nexport const IsFinite = 'IsFinite';\nexport const IsInf = 'IsInf';\nexport const IsNan = 'IsNan';\nexport const Less = 'Less';\nexport const LessEqual = 'LessEqual';\nexport const LinSpace = 'LinSpace';\nexport const Log = 'Log';\nexport const Log1p = 'Log1p';\nexport const LogicalAnd = 'LogicalAnd';\nexport const LogicalNot = 'LogicalNot';\nexport const LogicalOr = 'LogicalOr';\nexport const LogSoftmax = 'LogSoftmax';\nexport const LRN = 'LRN';\nexport const LRNBackprop = 'LRNBackprop';\nexport const Max = 'Max';\nexport const Maximum = 'Maximum';\nexport const MaxPool = 'MaxPool';\nexport const MaxPoolBackprop = 'MaxPoolBackprop';\nexport const MaxPool3D = 'MaxPool3D';\nexport const MaxPool3DBackprop = 'MaxPool3DBackprop';\nexport const MaxPoolWithArgmax = 'MaxPoolWithArgmax';\nexport const Mean = 'Mean';\nexport const Min = 'Min';\nexport const Minimum = 'Minimum';\nexport const Mod = 'Mod';\nexport const Multiply = 'Multiply';\nexport const Negate = 'Negate';\nexport const NotEqual = 'NotEqual';\nexport const NonMaxSuppressionV3 = 'NonMaxSuppressionV3';\nexport const NonMaxSuppressionV4 = 'NonMaxSuppressionV4';\nexport const NonMaxSuppressionV5 = 'NonMaxSuppressionV5';\nexport const OnesLike = 'OnesLike';\nexport const OneHot = 'OneHot';\nexport const PadV2 = 'PadV2';\nexport const Pool = 'Pool';\nexport const Pow = 'Pow';\nexport const Prelu = 'Prelu';\nexport const Prod = 'Prod';\nexport const Range = 'Range';\nexport const Real = 'Real';\nexport const Reciprocal = 'Reciprocal';\nexport const Relu = 'Relu';\nexport const Reshape = 'Reshape';\nexport const ResizeNearestNeighbor = 'ResizeNearestNeighbor';\nexport const ResizeNearestNeighborGrad = 'ResizeNearestNeighborGrad';\nexport const ResizeBilinear = 'ResizeBilinear';\nexport const ResizeBilinearGrad = 'ResizeBilinearGrad';\nexport const Relu6 = 'Relu6';\nexport const Reverse = 'Reverse';\nexport const Round = 'Round';\nexport const Rsqrt = 'Rsqrt';\nexport const ScatterNd = 'ScatterNd';\nexport const SelectV2 = 'SelectV2';\nexport const Selu = 'Selu';\nexport const Slice = 'Slice';\nexport const Sin = 'Sin';\nexport const Sinh = 'Sinh';\nexport const Sign = 'Sign';\nexport const Sigmoid = 'Sigmoid';\nexport const Softplus = 'Softplus';\nexport const Sqrt = 'Sqrt';\nexport const Sum = 'Sum';\nexport const SpaceToBatchND = 'SpaceToBatchND';\nexport const SplitV = 'SplitV';\nexport const Softmax = 'Softmax';\nexport const SquaredDifference = 'SquaredDifference';\nexport const Square = 'Square';\nexport const Sub = 'Sub';\nexport const SparseToDense = 'SparseToDense';\nexport const StridedSlice = 'StridedSlice';\nexport const Tan = 'Tan';\nexport const Tanh = 'Tanh';\nexport const Tile = 'Tile';\nexport const TopK = 'TopK';\nexport const Transpose = 'Transpose';\nexport const Unique = 'Unique';\nexport const Unpack = 'Unpack';\nexport const UnsortedSegmentSum = 'UnsortedSegmentSum';\nexport const ZerosLike = 'ZerosLike';\n/**\n * TensorFlow.js-only kernels\n */\nexport const Step = 'Step';\nexport const FromPixels = 'FromPixels';\nexport const RotateWithOffset = 'RotateWithOffset';\nexport const _FusedMatMul = '_FusedMatMul';\nexport const FusedConv2D = 'FusedConv2D';\nexport const FusedDepthwiseConv2D = 'FusedDepthwiseConv2D';\n//# sourceMappingURL=kernel_names.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from './environment';\nimport { getGlobal } from './global_util';\nconst kernelRegistry = getGlobal('kernelRegistry', () => new Map());\nconst gradRegistry = getGlobal('gradRegistry', () => new Map());\n/**\n * Returns the kernel function (code) associated with the provided names.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n */\nexport function getKernel(kernelName, backendName) {\n const key = makeKey(kernelName, backendName);\n return kernelRegistry.get(key);\n}\n/**\n * Returns the registered gradient info associated with the provided kernel.\n * @param kernelName The official TF kernel name.\n */\nexport function getGradient(kernelName) {\n return gradRegistry.get(kernelName);\n}\nexport function getKernelsForBackend(backendName) {\n const it = kernelRegistry.entries();\n const result = [];\n while (true) {\n const { done, value } = it.next();\n if (done) {\n break;\n }\n const [key, config] = value;\n const [backend,] = key.split('_');\n if (backend === backendName) {\n result.push(config);\n }\n }\n return result;\n}\n/**\n * Registers the function (forward pass) for the kernel in a global registry.\n *\n * @param config A config object with the following properties:\n * - `kernelName` The official name of the kernel.\n * - `backendName` The official name of the backend.\n * - `kernelFunc` The function to run during the forward pass of the kernel.\n * - `setupFunc` Optional. Gets called once, after the backend initializes.\n * - `disposeFunc` Optional. Gets called once, right before the backend is\n * disposed.\n */\nexport function registerKernel(config) {\n const { kernelName, backendName } = config;\n const key = makeKey(kernelName, backendName);\n if (kernelRegistry.has(key)) {\n console.warn(`The kernel '${kernelName}' for backend ` +\n `'${backendName}' is already registered`);\n }\n kernelRegistry.set(key, config);\n}\n/**\n * Registers a gradient function for a given kernel in the global registry,\n * to be used during the back-propagation of that kernel.\n *\n * @param config An object with the following properties:\n * - `kernelName` The name of the kernel that the gradient function is for.\n * - `gradFunc` The function to run during back-propagation.\n */\nexport function registerGradient(config) {\n const { kernelName } = config;\n if (gradRegistry.has(kernelName)) {\n // TODO (yassogba) after 3.0 assess whether we need to keep this gated\n // to debug mode.\n if (env().getBool('DEBUG')) {\n console.warn(`Overriding the gradient for '${kernelName}'`);\n }\n }\n gradRegistry.set(kernelName, config);\n}\n/**\n * Removes the kernel function from the registry.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n *\n */\nexport function unregisterKernel(kernelName, backendName) {\n const key = makeKey(kernelName, backendName);\n if (!kernelRegistry.has(key)) {\n throw new Error(`The kernel '${kernelName}' for backend ` +\n `'${backendName}' is not registered`);\n }\n kernelRegistry.delete(key);\n}\n/** Removes the registered gradient from the global registry. */\nexport function unregisterGradient(kernelName) {\n if (!gradRegistry.has(kernelName)) {\n throw new Error(`The gradient '${kernelName}' for backend is not registered`);\n }\n gradRegistry.delete(kernelName);\n}\n/**\n * Finds kernels that have already been registered to a backend and re-registers\n * them for a new backend. Useful for registering custom backends.\n * @param registeredBackendName Already registered backend.\n * @param newBackendName New backend.\n */\nexport function copyRegisteredKernels(registeredBackendName, newBackendName) {\n const kernels = getKernelsForBackend(registeredBackendName);\n kernels.forEach(kernelConfig => {\n const newKernelConfig = Object.assign({}, kernelConfig, { backendName: newBackendName });\n registerKernel(newKernelConfig);\n });\n}\nfunction makeKey(kernelName, backendName) {\n return `${backendName}_${kernelName}`;\n}\n//# sourceMappingURL=kernel_registry.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from './environment';\n/**\n * Shuffles the array in-place using Fisher-Yates algorithm.\n *\n * ```js\n * const a = [1, 2, 3, 4, 5];\n * tf.util.shuffle(a);\n * console.log(a);\n * ```\n *\n * @param array The array to shuffle in-place.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\n// tslint:disable-next-line:no-any\nexport function shuffle(array) {\n let counter = array.length;\n let temp = 0;\n let index = 0;\n // While there are elements in the array\n while (counter > 0) {\n // Pick a random index\n index = (Math.random() * counter) | 0;\n // Decrease counter by 1\n counter--;\n // And swap the last element with it\n temp = array[counter];\n array[counter] = array[index];\n array[index] = temp;\n }\n}\n/** Clamps a value to a specified range. */\nexport function clamp(min, x, max) {\n return Math.max(min, Math.min(x, max));\n}\nexport function nearestLargerEven(val) {\n return val % 2 === 0 ? val : val + 1;\n}\nexport function sum(arr) {\n let sum = 0;\n for (let i = 0; i < arr.length; i++) {\n sum += arr[i];\n }\n return sum;\n}\n/**\n * Returns a sample from a uniform [a, b) distribution.\n *\n * @param a The minimum support (inclusive).\n * @param b The maximum support (exclusive).\n * @return A pseudorandom number on the half-open interval [a,b).\n */\nexport function randUniform(a, b) {\n const r = Math.random();\n return (b * r) + (1 - r) * a;\n}\n/** Returns the squared Euclidean distance between two vectors. */\nexport function distSquared(a, b) {\n let result = 0;\n for (let i = 0; i < a.length; i++) {\n const diff = Number(a[i]) - Number(b[i]);\n result += diff * diff;\n }\n return result;\n}\n/**\n * Asserts that the expression is true. Otherwise throws an error with the\n * provided message.\n *\n * ```js\n * const x = 2;\n * tf.util.assert(x === 2, 'x is not 2');\n * ```\n *\n * @param expr The expression to assert (as a boolean).\n * @param msg A function that returns the message to report when throwing an\n * error. We use a function for performance reasons.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function assert(expr, msg) {\n if (!expr) {\n throw new Error(typeof msg === 'string' ? msg : msg());\n }\n}\nexport function assertShapesMatch(shapeA, shapeB, errorMessagePrefix = '') {\n assert(arraysEqual(shapeA, shapeB), () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`);\n}\nexport function assertNonNull(a) {\n assert(a != null, () => `The input to the tensor constructor must be a non-null value.`);\n}\n// NOTE: We explicitly type out what T extends instead of any so that\n// util.flatten on a nested array of number doesn't try to infer T as a\n// number[][], causing us to explicitly type util.flatten().\n/**\n * Flattens an arbitrarily nested array.\n *\n * ```js\n * const a = [[1, 2], [3, 4], [5, [6, [7]]]];\n * const flat = tf.util.flatten(a);\n * console.log(flat);\n * ```\n *\n * @param arr The nested array to flatten.\n * @param result The destination array which holds the elements.\n * @param skipTypedArray If true, avoids flattening the typed arrays. Defaults\n * to false.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function flatten(arr, result = [], skipTypedArray = false) {\n if (result == null) {\n result = [];\n }\n if (Array.isArray(arr) || isTypedArray(arr) && !skipTypedArray) {\n for (let i = 0; i < arr.length; ++i) {\n flatten(arr[i], result, skipTypedArray);\n }\n }\n else {\n result.push(arr);\n }\n return result;\n}\n/**\n * Returns the size (number of elements) of the tensor given its shape.\n *\n * ```js\n * const shape = [3, 4, 2];\n * const size = tf.util.sizeFromShape(shape);\n * console.log(size);\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function sizeFromShape(shape) {\n if (shape.length === 0) {\n // Scalar.\n return 1;\n }\n let size = shape[0];\n for (let i = 1; i < shape.length; i++) {\n size *= shape[i];\n }\n return size;\n}\nexport function isScalarShape(shape) {\n return shape.length === 0;\n}\nexport function arraysEqual(n1, n2) {\n if (n1 === n2) {\n return true;\n }\n if (n1 == null || n2 == null) {\n return false;\n }\n if (n1.length !== n2.length) {\n return false;\n }\n for (let i = 0; i < n1.length; i++) {\n if (n1[i] !== n2[i]) {\n return false;\n }\n }\n return true;\n}\nexport function isInt(a) {\n return a % 1 === 0;\n}\nexport function tanh(x) {\n // tslint:disable-next-line:no-any\n if (Math.tanh != null) {\n // tslint:disable-next-line:no-any\n return Math.tanh(x);\n }\n if (x === Infinity) {\n return 1;\n }\n else if (x === -Infinity) {\n return -1;\n }\n else {\n const e2x = Math.exp(2 * x);\n return (e2x - 1) / (e2x + 1);\n }\n}\nexport function sizeToSquarishShape(size) {\n const width = Math.ceil(Math.sqrt(size));\n return [width, Math.ceil(size / width)];\n}\n/**\n * Creates a new array with randomized indicies to a given quantity.\n *\n * ```js\n * const randomTen = tf.util.createShuffledIndices(10);\n * console.log(randomTen);\n * ```\n *\n * @param number Quantity of how many shuffled indicies to create.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function createShuffledIndices(n) {\n const shuffledIndices = new Uint32Array(n);\n for (let i = 0; i < n; ++i) {\n shuffledIndices[i] = i;\n }\n shuffle(shuffledIndices);\n return shuffledIndices;\n}\nexport function rightPad(a, size) {\n if (size <= a.length) {\n return a;\n }\n return a + ' '.repeat(size - a.length);\n}\nexport function repeatedTry(checkFn, delayFn = (counter) => 0, maxCounter) {\n return new Promise((resolve, reject) => {\n let tryCount = 0;\n const tryFn = () => {\n if (checkFn()) {\n resolve();\n return;\n }\n tryCount++;\n const nextBackoff = delayFn(tryCount);\n if (maxCounter != null && tryCount >= maxCounter) {\n reject();\n return;\n }\n setTimeout(tryFn, nextBackoff);\n };\n tryFn();\n });\n}\n/**\n * Given the full size of the array and a shape that may contain -1 as the\n * implicit dimension, returns the inferred shape where -1 is replaced.\n * E.g. For shape=[2, -1, 3] and size=24, it will return [2, 4, 3].\n *\n * @param shape The shape, which may contain -1 in some dimension.\n * @param size The full size (number of elements) of the array.\n * @return The inferred shape where -1 is replaced with the inferred size.\n */\nexport function inferFromImplicitShape(shape, size) {\n let shapeProd = 1;\n let implicitIdx = -1;\n for (let i = 0; i < shape.length; ++i) {\n if (shape[i] >= 0) {\n shapeProd *= shape[i];\n }\n else if (shape[i] === -1) {\n if (implicitIdx !== -1) {\n throw Error(`Shapes can only have 1 implicit size. ` +\n `Found -1 at dim ${implicitIdx} and dim ${i}`);\n }\n implicitIdx = i;\n }\n else if (shape[i] < 0) {\n throw Error(`Shapes can not be < 0. Found ${shape[i]} at dim ${i}`);\n }\n }\n if (implicitIdx === -1) {\n if (size > 0 && size !== shapeProd) {\n throw Error(`Size(${size}) must match the product of shape ${shape}`);\n }\n return shape;\n }\n if (shapeProd === 0) {\n throw Error(`Cannot infer the missing size in [${shape}] when ` +\n `there are 0 elements`);\n }\n if (size % shapeProd !== 0) {\n throw Error(`The implicit shape can't be a fractional number. ` +\n `Got ${size} / ${shapeProd}`);\n }\n const newShape = shape.slice();\n newShape[implicitIdx] = size / shapeProd;\n return newShape;\n}\nexport function parseAxisParam(axis, shape) {\n const rank = shape.length;\n // Normalize input\n axis = axis == null ? shape.map((s, i) => i) : [].concat(axis);\n // Check for valid range\n assert(axis.every(ax => ax >= -rank && ax < rank), () => `All values in axis param must be in range [-${rank}, ${rank}) but ` +\n `got axis ${axis}`);\n // Check for only integers\n assert(axis.every(ax => isInt(ax)), () => `All values in axis param must be integers but ` +\n `got axis ${axis}`);\n // Handle negative axis.\n return axis.map(a => a < 0 ? rank + a : a);\n}\n/** Reduces the shape by removing all dimensions of shape 1. */\nexport function squeezeShape(shape, axis) {\n const newShape = [];\n const keptDims = [];\n const isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0;\n const axes = (axis == null || isEmptyArray) ?\n null :\n parseAxisParam(axis, shape).sort();\n let j = 0;\n for (let i = 0; i < shape.length; ++i) {\n if (axes != null) {\n if (axes[j] === i && shape[i] !== 1) {\n throw new Error(`Can't squeeze axis ${i} since its dim '${shape[i]}' is not 1`);\n }\n if ((axes[j] == null || axes[j] > i) && shape[i] === 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n if (axes[j] <= i) {\n j++;\n }\n }\n if (shape[i] !== 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n }\n return { newShape, keptDims };\n}\nexport function getTypedArrayFromDType(dtype, size) {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(size);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values;\n}\nexport function getArrayFromDType(dtype, size) {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(size);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(size);\n }\n else if (dtype === 'string') {\n values = new Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values;\n}\nexport function checkConversionForErrors(vals, dtype) {\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i];\n if (isNaN(num) || !isFinite(num)) {\n throw Error(`A tensor of type ${dtype} being uploaded contains ${num}.`);\n }\n }\n}\n/** Returns true if the dtype is valid. */\nexport function isValidDtype(dtype) {\n return dtype === 'bool' || dtype === 'complex64' || dtype === 'float32' ||\n dtype === 'int32' || dtype === 'string';\n}\n/**\n * Returns true if the new type can't encode the old type without loss of\n * precision.\n */\nexport function hasEncodingLoss(oldType, newType) {\n if (newType === 'complex64') {\n return false;\n }\n if (newType === 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'int32' && oldType !== 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'bool' && oldType === 'bool') {\n return false;\n }\n return true;\n}\nexport function isTypedArray(a) {\n return a instanceof Float32Array || a instanceof Int32Array ||\n a instanceof Uint8Array;\n}\nexport function bytesPerElement(dtype) {\n if (dtype === 'float32' || dtype === 'int32') {\n return 4;\n }\n else if (dtype === 'complex64') {\n return 8;\n }\n else if (dtype === 'bool') {\n return 1;\n }\n else {\n throw new Error(`Unknown dtype ${dtype}`);\n }\n}\n/**\n * Returns the approximate number of bytes allocated in the string array - 2\n * bytes per character. Computing the exact bytes for a native string in JS is\n * not possible since it depends on the encoding of the html page that serves\n * the website.\n */\nexport function bytesFromStringArray(arr) {\n if (arr == null) {\n return 0;\n }\n let bytes = 0;\n arr.forEach(x => bytes += x.length);\n return bytes;\n}\n/** Returns true if the value is a string. */\nexport function isString(value) {\n return typeof value === 'string' || value instanceof String;\n}\nexport function isBoolean(value) {\n return typeof value === 'boolean';\n}\nexport function isNumber(value) {\n return typeof value === 'number';\n}\nexport function inferDtype(values) {\n if (Array.isArray(values)) {\n return inferDtype(values[0]);\n }\n if (values instanceof Float32Array) {\n return 'float32';\n }\n else if (values instanceof Int32Array || values instanceof Uint8Array) {\n return 'int32';\n }\n else if (isNumber(values)) {\n return 'float32';\n }\n else if (isString(values)) {\n return 'string';\n }\n else if (isBoolean(values)) {\n return 'bool';\n }\n return 'float32';\n}\nexport function isFunction(f) {\n return !!(f && f.constructor && f.call && f.apply);\n}\nexport function nearestDivisor(size, start) {\n for (let i = start; i < size; ++i) {\n if (size % i === 0) {\n return i;\n }\n }\n return size;\n}\nexport function computeStrides(shape) {\n const rank = shape.length;\n if (rank < 2) {\n return [];\n }\n // Last dimension has implicit stride of 1, thus having D-1 (instead of D)\n // strides.\n const strides = new Array(rank - 1);\n strides[rank - 2] = shape[rank - 1];\n for (let i = rank - 3; i >= 0; --i) {\n strides[i] = strides[i + 1] * shape[i + 1];\n }\n return strides;\n}\n/**\n * Create typed array for scalar value. Used for storing in `DataStorage`.\n */\nexport function createScalarValue(value, dtype) {\n if (dtype === 'string') {\n return encodeString(value);\n }\n return toTypedArray([value], dtype);\n}\nexport function toTypedArray(a, dtype) {\n if (dtype === 'string') {\n throw new Error('Cannot convert a string[] to a TypedArray');\n }\n if (Array.isArray(a)) {\n a = flatten(a);\n }\n if (env().getBool('DEBUG')) {\n checkConversionForErrors(a, dtype);\n }\n if (noConversionNeeded(a, dtype)) {\n return a;\n }\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(a);\n }\n else if (dtype === 'int32') {\n return new Int32Array(a);\n }\n else if (dtype === 'bool') {\n const bool = new Uint8Array(a.length);\n for (let i = 0; i < bool.length; ++i) {\n if (Math.round(a[i]) !== 0) {\n bool[i] = 1;\n }\n }\n return bool;\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\nfunction createNestedArray(offset, shape, a) {\n const ret = new Array();\n if (shape.length === 1) {\n const d = shape[0];\n for (let i = 0; i < d; i++) {\n ret[i] = a[offset + i];\n }\n }\n else {\n const d = shape[0];\n const rest = shape.slice(1);\n const len = rest.reduce((acc, c) => acc * c);\n for (let i = 0; i < d; i++) {\n ret[i] = createNestedArray(offset + i * len, rest, a);\n }\n }\n return ret;\n}\n// Provide a nested array of TypedArray in given shape.\nexport function toNestedArray(shape, a) {\n if (shape.length === 0) {\n // Scalar type should return a single number.\n return a[0];\n }\n const size = shape.reduce((acc, c) => acc * c);\n if (size === 0) {\n // A tensor with shape zero should be turned into empty list.\n return [];\n }\n if (size !== a.length) {\n throw new Error(`[${shape}] does not match the input size ${a.length}.`);\n }\n return createNestedArray(0, shape, a);\n}\nfunction noConversionNeeded(a, dtype) {\n return (a instanceof Float32Array && dtype === 'float32') ||\n (a instanceof Int32Array && dtype === 'int32') ||\n (a instanceof Uint8Array && dtype === 'bool');\n}\nexport function makeOnesTypedArray(size, dtype) {\n const array = makeZerosTypedArray(size, dtype);\n for (let i = 0; i < array.length; i++) {\n array[i] = 1;\n }\n return array;\n}\nexport function makeZerosTypedArray(size, dtype) {\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(size);\n }\n else if (dtype === 'int32') {\n return new Int32Array(size);\n }\n else if (dtype === 'bool') {\n return new Uint8Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n/**\n * Make nested `TypedArray` filled with zeros.\n * @param shape The shape information for the nested array.\n * @param dtype dtype of the array element.\n */\nexport function makeZerosNestedTypedArray(shape, dtype) {\n const size = shape.reduce((prev, curr) => prev * curr, 1);\n if (dtype == null || dtype === 'float32') {\n return toNestedArray(shape, new Float32Array(size));\n }\n else if (dtype === 'int32') {\n return toNestedArray(shape, new Int32Array(size));\n }\n else if (dtype === 'bool') {\n return toNestedArray(shape, new Uint8Array(size));\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n/**\n * Returns the current high-resolution time in milliseconds relative to an\n * arbitrary time in the past. It works across different platforms (node.js,\n * browsers).\n *\n * ```js\n * console.log(tf.util.now());\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function now() {\n return env().platform.now();\n}\nexport function assertNonNegativeIntegerDimensions(shape) {\n shape.forEach(dimSize => {\n assert(Number.isInteger(dimSize) && dimSize >= 0, () => `Tensor must have a shape comprised of positive integers but got ` +\n `shape [${shape}].`);\n });\n}\n/**\n * Returns a platform-specific implementation of\n * [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n *\n * If `fetch` is defined on the global object (`window`, `process`, etc.),\n * `tf.util.fetch` returns that function.\n *\n * If not, `tf.util.fetch` returns a platform-specific solution.\n *\n * ```js\n * const resource = await tf.util.fetch('https://unpkg.com/@tensorflow/tfjs');\n * // handle response\n * ```\n *\n * @doc {heading: 'Util'}\n */\nexport function fetch(path, requestInits) {\n return env().platform.fetch(path, requestInits);\n}\n/**\n * Encodes the provided string into bytes using the provided encoding scheme.\n *\n * @param s The string to encode.\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function encodeString(s, encoding = 'utf-8') {\n encoding = encoding || 'utf-8';\n return env().platform.encode(s, encoding);\n}\n/**\n * Decodes the provided bytes into a string using the provided encoding scheme.\n * @param bytes The bytes to decode.\n *\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function decodeString(bytes, encoding = 'utf-8') {\n encoding = encoding || 'utf-8';\n return env().platform.decode(bytes, encoding);\n}\n/**\n * Computes flat index for a given location (multidimentionsal index) in a\n * Tensor/multidimensional array.\n *\n * @param locs Location in the tensor.\n * @param rank Rank of the tensor.\n * @param strides Tensor strides.\n */\nexport function locToIndex(locs, rank, strides) {\n if (rank === 0) {\n return 0;\n }\n else if (rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += strides[i] * locs[i];\n }\n return index;\n}\n/**\n * Computes the location (multidimensional index) in a tensor/multidimentional\n * array for a given flat index.\n *\n * @param index Index in flat array.\n * @param rank Rank of tensor.\n * @param strides Strides of tensor.\n */\nexport function indexToLoc(index, rank, strides) {\n if (rank === 0) {\n return [];\n }\n else if (rank === 1) {\n return [index];\n }\n const locs = new Array(rank);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / strides[i]);\n index -= locs[i] * strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n}\n//# sourceMappingURL=util.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport * as util from './util';\nexport class Profiler {\n constructor(backendTimer, logger) {\n this.backendTimer = backendTimer;\n this.logger = logger;\n if (logger == null) {\n this.logger = new Logger();\n }\n }\n profileKernel(kernelName, inputs, f) {\n let outputs;\n const holdResultWrapperFn = () => {\n outputs = f();\n };\n const timer = this.backendTimer.time(holdResultWrapperFn);\n for (let i = 0; i < outputs.length; i++) {\n const output = outputs[i];\n // Dangling promise here because we don't want to propagate up\n // asynchronicity.\n output.data().then(tensorVals => {\n checkComputationForErrors(tensorVals, output.dtype, kernelName);\n });\n }\n const kernelProfile = {\n kernelName,\n outputs,\n inputs,\n timeMs: timer.then(timing => timing.kernelMs),\n extraInfo: timer.then(timing => timing.getExtraProfileInfo != null ?\n timing.getExtraProfileInfo() :\n '')\n };\n return kernelProfile;\n }\n logKernelProfile(kernelProfile) {\n const { kernelName, outputs, timeMs, inputs, extraInfo } = kernelProfile;\n outputs.forEach(result => {\n Promise.all([result.data(), timeMs, extraInfo]).then(valueContainer => {\n this.logger.logKernelProfile(kernelName, result, valueContainer[0], valueContainer[1], inputs, valueContainer[2]);\n });\n });\n }\n}\nexport function checkComputationForErrors(vals, dtype, kernelName) {\n if (dtype !== 'float32') {\n // Only floating point computations will generate NaN values\n return false;\n }\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i];\n if (isNaN(num) || !isFinite(num)) {\n // Throwing custom exception so behavior is testable.\n console.warn(`Found ${num} in the result of '${kernelName}'`);\n return true;\n }\n }\n return false;\n}\nexport class Logger {\n logKernelProfile(name, result, vals, timeMs, inputs, extraInfo) {\n const time = typeof timeMs === 'number' ? util.rightPad(`${timeMs}ms`, 9) :\n timeMs['error'];\n const paddedName = util.rightPad(name, 25);\n const rank = result.rank;\n const size = result.size;\n const shape = util.rightPad(result.shape.toString(), 14);\n let inputShapesDescription = '';\n for (const name in inputs) {\n const input = inputs[name];\n if (input != null) {\n // The input might be a non-tensor (e.g HTMLImageElement), in which case\n // we claim the output shape as input shape.\n const inputShape = input.shape || result.shape;\n const inputRank = inputShape.length;\n inputShapesDescription +=\n `${name}: ${inputRank}D ${inputRank > 0 ? inputShape : ''} `;\n }\n }\n console.log(`%c${paddedName}\\t%c${time}\\t%c${rank}D ${shape}\\t%c${size}\\t%c${inputShapesDescription}\\t%c${extraInfo}`, 'font-weight:bold', 'color:red', 'color:blue', 'color: orange', 'color: green', 'color: steelblue');\n }\n}\n//# sourceMappingURL=profiler.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport * as util from './util';\n/**\n * Computes a list of TapeNodes that connect x to y, filtering everything else\n * out and preserving the order of the original tape elements.\n *\n * @param tape The tape elements to filter.\n * @param xs The input Tensors.\n * @param y The output Tensor.\n */\nexport function getFilteredNodesXToY(tape, xs, y) {\n // Forward pass to compute all the nodes and Tensors that are transitively a\n // function of x.\n const tensorsFromX = {};\n const nodesFromX = {};\n for (let i = 0; i < xs.length; i++) {\n tensorsFromX[xs[i].id] = true;\n }\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n for (const inputName in nodeInputs) {\n const input = nodeInputs[inputName];\n let anyInputFromX = false;\n for (let j = 0; j < xs.length; j++) {\n if (tensorsFromX[input.id]) {\n node.outputs.forEach(output => tensorsFromX[output.id] = true);\n anyInputFromX = true;\n nodesFromX[node.id] = true;\n break;\n }\n }\n if (anyInputFromX) {\n break;\n }\n }\n }\n // Backward pass to find all of the nodes and Tensors that lead to y.\n const tensorsLeadToY = {};\n tensorsLeadToY[y.id] = true;\n const nodesToY = {};\n for (let i = tape.length - 1; i >= 0; i--) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n // If any of the outputs lead to y, mark all of the inputs as leading to y.\n for (let j = 0; j < node.outputs.length; j++) {\n if (tensorsLeadToY[node.outputs[j].id]) {\n for (const inputName in nodeInputs) {\n tensorsLeadToY[nodeInputs[inputName].id] = true;\n nodesToY[node.id] = true;\n }\n break;\n }\n }\n }\n // Return the paths that come from x and lead to y.\n const filteredTape = [];\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n if (nodesFromX[node.id] && nodesToY[node.id]) {\n // Prune the inputs from the node that aren't a function of x.\n const prunedInputs = {};\n for (const inputName in node.inputs) {\n const nodeInput = node.inputs[inputName];\n if (tensorsFromX[nodeInput.id]) {\n prunedInputs[inputName] = nodeInput;\n }\n }\n // Copy the node and overwrite inputsAndArgs to the pruned version.\n const prunedNode = Object.assign({}, node);\n prunedNode.inputs = prunedInputs;\n prunedNode.outputs = node.outputs;\n filteredTape.push(prunedNode);\n }\n }\n return filteredTape;\n}\n/**\n * Backpropagate gradients through the filtered TapeNodes.\n *\n * @param tensorAccumulatedGradientMap A map of Tensor to its gradient. This map\n * is mutated by this method.\n * @param filteredTape The filtered TapeNodes to backprop through.\n */\nexport function backpropagateGradients(tensorAccumulatedGradientMap, filteredTape, tidy, add) {\n // Walk the tape backward and keep a map of Tensor to its gradient.\n for (let i = filteredTape.length - 1; i >= 0; i--) {\n const node = filteredTape[i];\n const dys = [];\n node.outputs.forEach(o => {\n const gradTensor = tensorAccumulatedGradientMap[o.id];\n if (gradTensor != null) {\n dys.push(gradTensor);\n }\n else {\n // This particular output is not in the back-propagation subgraph, so it\n // does not affect the final output, thus we put null for its dy.\n dys.push(null);\n }\n });\n if (node.gradient == null) {\n throw new Error(`Cannot compute gradient: gradient function not found ` +\n `for ${node.kernelName}.`);\n }\n // Backprop dy through this node and accumulate gradients over the inputs.\n const inputGradients = node.gradient(dys);\n for (const inputName in node.inputs) {\n if (!(inputName in inputGradients)) {\n throw new Error(`Cannot backprop through input ${inputName}. ` +\n `Available gradients found: ${Object.keys(inputGradients)}.`);\n }\n // Call the gradient function.\n const dx = tidy(() => inputGradients[inputName]());\n if (dx.dtype !== 'float32') {\n throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` +\n `${inputName} must have 'float32' dtype, but has '${dx.dtype}'`);\n }\n const x = node.inputs[inputName];\n if (!util.arraysEqual(dx.shape, x.shape)) {\n throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` +\n `'${inputName}' has shape '${dx.shape}', which does not match ` +\n `the shape of the input '${x.shape}'`);\n }\n if (tensorAccumulatedGradientMap[x.id] == null) {\n tensorAccumulatedGradientMap[x.id] = dx;\n }\n else {\n const curGradient = tensorAccumulatedGradientMap[x.id];\n tensorAccumulatedGradientMap[x.id] = add(curGradient, dx);\n curGradient.dispose();\n }\n }\n }\n}\n//# sourceMappingURL=tape.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { computeStrides, isString, rightPad, sizeFromShape } from './util';\n// Maximum number of values before we decide to show ellipsis.\nconst FORMAT_LIMIT_NUM_VALS = 20;\n// Number of first and last values to show when displaying a, b,...,y, z.\nconst FORMAT_NUM_FIRST_LAST_VALS = 3;\n// Number of significant digits to show.\nconst FORMAT_NUM_SIG_DIGITS = 7;\nexport function tensorToString(vals, shape, dtype, verbose) {\n const strides = computeStrides(shape);\n const padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides);\n const rank = shape.length;\n const valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol);\n const lines = ['Tensor'];\n if (verbose) {\n lines.push(` dtype: ${dtype}`);\n lines.push(` rank: ${rank}`);\n lines.push(` shape: [${shape}]`);\n lines.push(` values:`);\n }\n lines.push(valsLines.map(l => ' ' + l).join('\\n'));\n return lines.join('\\n');\n}\nfunction computeMaxSizePerColumn(vals, shape, dtype, strides) {\n const n = sizeFromShape(shape);\n const numCols = strides[strides.length - 1];\n const padPerCol = new Array(numCols).fill(0);\n const rank = shape.length;\n const valuesOrTuples = dtype === 'complex64' ? createComplexTuples(vals) : vals;\n if (rank > 1) {\n for (let row = 0; row < n / numCols; row++) {\n const offset = row * numCols;\n for (let j = 0; j < numCols; j++) {\n padPerCol[j] = Math.max(padPerCol[j], valToString(valuesOrTuples[offset + j], 0, dtype).length);\n }\n }\n }\n return padPerCol;\n}\nfunction valToString(val, pad, dtype) {\n let valStr;\n if (Array.isArray(val)) {\n valStr = `${parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS))} + ` +\n `${parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS))}j`;\n }\n else if (isString(val)) {\n valStr = `'${val}'`;\n }\n else if (dtype === 'bool') {\n valStr = boolNumToString(val);\n }\n else {\n valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString();\n }\n return rightPad(valStr, pad);\n}\nfunction boolNumToString(v) {\n return v === 0 ? 'false' : 'true';\n}\nfunction subTensorToString(vals, shape, dtype, strides, padPerCol, isLast = true) {\n const storagePerElement = dtype === 'complex64' ? 2 : 1;\n const size = shape[0];\n const rank = shape.length;\n if (rank === 0) {\n if (dtype === 'complex64') {\n const complexTuple = createComplexTuples(vals);\n return [valToString(complexTuple[0], 0, dtype)];\n }\n if (dtype === 'bool') {\n return [boolNumToString(vals[0])];\n }\n return [vals[0].toString()];\n }\n if (rank === 1) {\n if (size > FORMAT_LIMIT_NUM_VALS) {\n const firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement;\n let firstVals = Array.from(vals.slice(0, firstValsSize));\n let lastVals = Array.from(vals.slice((size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement, size * storagePerElement));\n if (dtype === 'complex64') {\n firstVals = createComplexTuples(firstVals);\n lastVals = createComplexTuples(lastVals);\n }\n return [\n '[' +\n firstVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ', ..., ' +\n lastVals\n .map((x, i) => valToString(x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype))\n .join(', ') +\n ']'\n ];\n }\n const displayVals = dtype === 'complex64' ? createComplexTuples(vals) :\n Array.from(vals);\n return [\n '[' +\n displayVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ']'\n ];\n }\n // The array is rank 2 or more.\n const subshape = shape.slice(1);\n const substrides = strides.slice(1);\n const stride = strides[0] * storagePerElement;\n const lines = [];\n if (size > FORMAT_LIMIT_NUM_VALS) {\n for (let i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, false /* isLast */));\n }\n lines.push('...');\n for (let i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */));\n }\n }\n else {\n for (let i = 0; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */));\n }\n }\n const sep = rank === 2 ? ',' : '';\n lines[0] = '[' + lines[0] + sep;\n for (let i = 1; i < lines.length - 1; i++) {\n lines[i] = ' ' + lines[i] + sep;\n }\n let newLineSep = ',\\n';\n for (let i = 2; i < rank; i++) {\n newLineSep += '\\n';\n }\n lines[lines.length - 1] =\n ' ' + lines[lines.length - 1] + ']' + (isLast ? '' : newLineSep);\n return lines;\n}\nfunction createComplexTuples(vals) {\n const complexTuples = [];\n for (let i = 0; i < vals.length; i += 2) {\n complexTuples.push([vals[i], vals[i + 1]]);\n }\n return complexTuples;\n}\n//# sourceMappingURL=tensor_format.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { tensorToString } from './tensor_format';\nimport * as util from './util';\nimport { computeStrides, toNestedArray } from './util';\n/**\n * A mutable object, similar to `tf.Tensor`, that allows users to set values\n * at locations before converting to an immutable `tf.Tensor`.\n *\n * See `tf.buffer` for creating a tensor buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class TensorBuffer {\n constructor(shape, dtype, values) {\n this.dtype = dtype;\n this.shape = shape.slice();\n this.size = util.sizeFromShape(shape);\n if (values != null) {\n const n = values.length;\n util.assert(n === this.size, () => `Length of values '${n}' does not match the size ` +\n `inferred by the shape '${this.size}'.`);\n }\n if (dtype === 'complex64') {\n throw new Error(`complex64 dtype TensorBuffers are not supported. Please create ` +\n `a TensorBuffer for the real and imaginary parts separately and ` +\n `call tf.complex(real, imag).`);\n }\n this.values = values || util.getArrayFromDType(dtype, this.size);\n this.strides = computeStrides(shape);\n }\n /**\n * Sets a value in the buffer at a given location.\n *\n * @param value The value to set.\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n set(value, ...locs) {\n if (locs.length === 0) {\n locs = [0];\n }\n util.assert(locs.length === this.rank, () => `The number of provided coordinates (${locs.length}) must ` +\n `match the rank (${this.rank})`);\n const index = this.locToIndex(locs);\n this.values[index] = value;\n }\n /**\n * Returns the value in the buffer at the provided location.\n *\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n get(...locs) {\n if (locs.length === 0) {\n locs = [0];\n }\n let i = 0;\n for (const loc of locs) {\n if (loc < 0 || loc >= this.shape[i]) {\n const msg = `Requested out of range element at ${locs}. ` +\n ` Buffer shape=${this.shape}`;\n throw new Error(msg);\n }\n i++;\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return this.values[index];\n }\n locToIndex(locs) {\n if (this.rank === 0) {\n return 0;\n }\n else if (this.rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return index;\n }\n indexToLoc(index) {\n if (this.rank === 0) {\n return [];\n }\n else if (this.rank === 1) {\n return [index];\n }\n const locs = new Array(this.shape.length);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / this.strides[i]);\n index -= locs[i] * this.strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n }\n get rank() {\n return this.shape.length;\n }\n /**\n * Creates an immutable `tf.Tensor` object from the buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n toTensor() {\n return trackerFn().makeTensor(this.values, this.shape, this.dtype);\n }\n}\n// For tracking tensor creation and disposal.\nlet trackerFn = null;\n// Used by chaining methods to call into ops.\nlet opHandler = null;\n// Used to warn about deprecated methods.\nlet deprecationWarningFn = null;\n// This here so that we can use this method on dev branches and keep the\n// functionality at master.\n// tslint:disable-next-line:no-unused-expression\n[deprecationWarningFn];\n/**\n * An external consumer can register itself as the tensor tracker. This way\n * the Tensor class can notify the tracker for every tensor created and\n * disposed.\n */\nexport function setTensorTracker(fn) {\n trackerFn = fn;\n}\n/**\n * An external consumer can register itself as the op handler. This way the\n * Tensor class can have chaining methods that call into ops via the op\n * handler.\n */\nexport function setOpHandler(handler) {\n opHandler = handler;\n}\n/**\n * Sets the deprecation warning function to be used by this file. This way the\n * Tensor class can be a leaf but still use the environment.\n */\nexport function setDeprecationWarningFn(fn) {\n deprecationWarningFn = fn;\n}\n/**\n * A `tf.Tensor` object represents an immutable, multidimensional array of\n * numbers that has a shape and a data type.\n *\n * See `tf.tensor` for details on how to create a `tf.Tensor`.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Tensor {\n constructor(shape, dtype, dataId, id) {\n /** Whether this tensor has been globally kept. */\n this.kept = false;\n this.isDisposedInternal = false;\n this.shape = shape.slice();\n this.dtype = dtype || 'float32';\n this.size = util.sizeFromShape(shape);\n this.strides = computeStrides(shape);\n this.dataId = dataId;\n this.id = id;\n this.rankType = (this.rank < 5 ? this.rank.toString() : 'higher');\n }\n get rank() {\n return this.shape.length;\n }\n /**\n * Returns a promise of `tf.TensorBuffer` that holds the underlying data.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async buffer() {\n const vals = await this.data();\n return opHandler.buffer(this.shape, this.dtype, vals);\n }\n /**\n * Returns a `tf.TensorBuffer` that holds the underlying data.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n bufferSync() {\n return opHandler.buffer(this.shape, this.dtype, this.dataSync());\n }\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * asynchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async array() {\n const vals = await this.data();\n return toNestedArray(this.shape, vals);\n }\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * synchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n arraySync() {\n return toNestedArray(this.shape, this.dataSync());\n }\n /**\n * Asynchronously downloads the values from the `tf.Tensor`. Returns a\n * promise of `TypedArray` that resolves when the computation has finished.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async data() {\n this.throwIfDisposed();\n const data = trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n const bytes = await data;\n try {\n return bytes.map(b => util.decodeString(b));\n }\n catch (_a) {\n throw new Error('Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data;\n }\n /**\n * Synchronously downloads the values from the `tf.Tensor`. This blocks the\n * UI thread until the values are ready, which can cause performance issues.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dataSync() {\n this.throwIfDisposed();\n const data = trackerFn().readSync(this.dataId);\n if (this.dtype === 'string') {\n try {\n return data.map(b => util.decodeString(b));\n }\n catch (_a) {\n throw new Error('Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data;\n }\n /** Returns the underlying bytes of the tensor's data. */\n async bytes() {\n this.throwIfDisposed();\n const data = await trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n return data;\n }\n else {\n return new Uint8Array(data.buffer);\n }\n }\n /**\n * Disposes `tf.Tensor` from memory.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dispose() {\n if (this.isDisposed) {\n return;\n }\n trackerFn().disposeTensor(this);\n this.isDisposedInternal = true;\n }\n get isDisposed() {\n return this.isDisposedInternal;\n }\n throwIfDisposed() {\n if (this.isDisposed) {\n throw new Error(`Tensor is disposed.`);\n }\n }\n /**\n * Prints the `tf.Tensor`. See `tf.print` for details.\n *\n * @param verbose Whether to print verbose information about the tensor,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n print(verbose = false) {\n return opHandler.print(this, verbose);\n }\n /**\n * Returns a copy of the tensor. See `tf.clone` for details.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n clone() {\n this.throwIfDisposed();\n return opHandler.clone(this);\n }\n /**\n * Returns a human-readable description of the tensor. Useful for logging.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n toString(verbose = false) {\n const vals = this.dataSync();\n return tensorToString(vals, this.shape, this.dtype, verbose);\n }\n cast(dtype) {\n this.throwIfDisposed();\n return opHandler.cast(this, dtype);\n }\n variable(trainable = true, name, dtype) {\n this.throwIfDisposed();\n return trackerFn().makeVariable(this, trainable, name, dtype);\n }\n}\nObject.defineProperty(Tensor, Symbol.hasInstance, {\n value: (instance) => {\n // Implementation note: we should use properties of the object that will be\n // defined before the constructor body has finished executing (methods).\n // This is because when this code is transpiled by babel, babel will call\n // classCallCheck before the constructor body is run.\n // See https://github.com/tensorflow/tfjs/issues/3384 for backstory.\n return !!instance && instance.data != null && instance.dataSync != null &&\n instance.throwIfDisposed != null;\n }\n});\n/**\n * A mutable `tf.Tensor`, useful for persisting state, e.g. for training.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Variable extends Tensor {\n constructor(initialValue, trainable, name, tensorId) {\n super(initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId);\n this.trainable = trainable;\n this.name = name;\n }\n /**\n * Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have\n * the same shape and dtype as the old `tf.Tensor`.\n *\n * @param newValue New tensor to be assigned to this variable.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n assign(newValue) {\n if (newValue.dtype !== this.dtype) {\n throw new Error(`dtype of the new value (${newValue.dtype}) and ` +\n `previous value (${this.dtype}) must match`);\n }\n if (!util.arraysEqual(newValue.shape, this.shape)) {\n throw new Error(`shape of the new value (${newValue.shape}) and ` +\n `previous value (${this.shape}) must match`);\n }\n trackerFn().disposeTensor(this);\n this.dataId = newValue.dataId;\n trackerFn().incRef(this, null /* backend */);\n }\n dispose() {\n trackerFn().disposeVariable(this);\n this.isDisposedInternal = true;\n }\n}\nObject.defineProperty(Variable, Symbol.hasInstance, {\n value: (instance) => {\n return instance instanceof Tensor && instance.assign != null &&\n instance.assign instanceof Function;\n }\n});\n//# sourceMappingURL=tensor.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport var Rank;\n(function (Rank) {\n Rank[\"R0\"] = \"R0\";\n Rank[\"R1\"] = \"R1\";\n Rank[\"R2\"] = \"R2\";\n Rank[\"R3\"] = \"R3\";\n Rank[\"R4\"] = \"R4\";\n Rank[\"R5\"] = \"R5\";\n Rank[\"R6\"] = \"R6\";\n})(Rank || (Rank = {}));\n// Looks for upcasting types. Used, for example, in operations with mixed dtype\n// inputs.\nvar UpcastInt32AndMap;\n(function (UpcastInt32AndMap) {\n UpcastInt32AndMap[\"float32\"] = \"float32\";\n UpcastInt32AndMap[\"int32\"] = \"int32\";\n UpcastInt32AndMap[\"bool\"] = \"int32\";\n UpcastInt32AndMap[\"complex64\"] = \"complex64\";\n})(UpcastInt32AndMap || (UpcastInt32AndMap = {}));\nvar UpcastBoolAndMap;\n(function (UpcastBoolAndMap) {\n UpcastBoolAndMap[\"float32\"] = \"float32\";\n UpcastBoolAndMap[\"int32\"] = \"int32\";\n UpcastBoolAndMap[\"bool\"] = \"bool\";\n UpcastBoolAndMap[\"complex64\"] = \"complex64\";\n})(UpcastBoolAndMap || (UpcastBoolAndMap = {}));\nvar UpcastFloat32AndMap;\n(function (UpcastFloat32AndMap) {\n UpcastFloat32AndMap[\"float32\"] = \"float32\";\n UpcastFloat32AndMap[\"int32\"] = \"float32\";\n UpcastFloat32AndMap[\"bool\"] = \"float32\";\n UpcastFloat32AndMap[\"complex64\"] = \"complex64\";\n})(UpcastFloat32AndMap || (UpcastFloat32AndMap = {}));\nvar UpcastComplex64AndMap;\n(function (UpcastComplex64AndMap) {\n UpcastComplex64AndMap[\"float32\"] = \"complex64\";\n UpcastComplex64AndMap[\"int32\"] = \"complex64\";\n UpcastComplex64AndMap[\"bool\"] = \"complex64\";\n UpcastComplex64AndMap[\"complex64\"] = \"complex64\";\n})(UpcastComplex64AndMap || (UpcastComplex64AndMap = {}));\nconst upcastTypeMap = {\n 'float32': UpcastFloat32AndMap,\n 'int32': UpcastInt32AndMap,\n 'bool': UpcastBoolAndMap,\n 'complex64': UpcastComplex64AndMap\n};\nexport function upcastType(typeA, typeB) {\n if (typeA === 'string' || typeB === 'string') {\n if (typeA === 'string' && typeB === 'string') {\n return 'string';\n }\n throw new Error(`Can not upcast ${typeA} with ${typeB}`);\n }\n return upcastTypeMap[typeA][typeB];\n}\n/** Returns the output type after summation. */\nexport function sumOutType(type) {\n return upcastType(type, 'int32');\n}\n//# sourceMappingURL=types.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { Tensor } from './tensor';\nimport { upcastType } from './types';\nimport { assert } from './util';\nexport function makeTypesMatch(a, b) {\n if (a.dtype === b.dtype) {\n return [a, b];\n }\n const dtype = upcastType(a.dtype, b.dtype);\n return [a.cast(dtype), b.cast(dtype)];\n}\nexport function assertTypesMatch(a, b) {\n assert(a.dtype === b.dtype, () => `The dtypes of the first(${a.dtype}) and` +\n ` second(${b.dtype}) input must match`);\n}\nexport function isTensorInList(tensor, tensorList) {\n return tensorList.some(x => x.id === tensor.id);\n}\n/**\n * Extracts any `Tensor`s found within the provided object.\n *\n * @param container an object that may be a `Tensor` or may directly contain\n * `Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. In general it\n * is safe to pass any object here, except that `Promise`s are not\n * supported.\n * @returns An array of `Tensors` found within the passed object. If the\n * argument is simply a `Tensor', a list containing that `Tensor` is\n * returned. If the object is not a `Tensor` or does not\n * contain `Tensors`, an empty list is returned.\n */\nexport function getTensorsInContainer(result) {\n const list = [];\n const seen = new Set();\n walkTensorContainer(result, list, seen);\n return list;\n}\nfunction walkTensorContainer(container, list, seen) {\n if (container == null) {\n return;\n }\n if (container instanceof Tensor) {\n list.push(container);\n return;\n }\n if (!isIterable(container)) {\n return;\n }\n // Iteration over keys works also for arrays.\n const iterable = container;\n for (const k in iterable) {\n const val = iterable[k];\n if (!seen.has(val)) {\n seen.add(val);\n walkTensorContainer(val, list, seen);\n }\n }\n}\n// tslint:disable-next-line:no-any\nfunction isIterable(obj) {\n return Array.isArray(obj) || typeof obj === 'object';\n}\n//# sourceMappingURL=tensor_util.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { KernelBackend } from './backends/backend';\nimport { Environment, setEnvironmentGlobal } from './environment';\nimport { getGlobalNamespace } from './global_util';\nimport { Add, Cast } from './kernel_names';\nimport { getGradient, getKernel, getKernelsForBackend } from './kernel_registry';\nimport { Profiler } from './profiler';\nimport { backpropagateGradients, getFilteredNodesXToY } from './tape';\nimport { setTensorTracker, Tensor, Variable } from './tensor';\nimport { getTensorsInContainer } from './tensor_util';\nimport * as util from './util';\nimport { bytesFromStringArray, makeOnesTypedArray, now, sizeFromShape } from './util';\nclass EngineState {\n constructor() {\n // Public since optimizers will use it.\n this.registeredVariables = {};\n this.nextTapeNodeId = 0;\n this.numBytes = 0;\n this.numTensors = 0;\n this.numStringTensors = 0;\n this.numDataBuffers = 0;\n // Number of nested tf.grad() statements when computing higher-order\n // gradients. E.g. `1` for first-order gradients and `2` for second-order\n // gradients. Used to track if the tape should be removed after a backprop.\n this.gradientDepth = 0;\n // Number of nested kernel calls. When kernel depth is greater than 1, we turn\n // off the tape.\n this.kernelDepth = 0;\n this.scopeStack = [];\n /**\n * Keeps track of the number of data moves during a kernel execution. We\n * maintain a stack since kernels can call other kernels, recursively.\n */\n this.numDataMovesStack = [];\n this.nextScopeId = 0;\n this.tensorInfo = new WeakMap();\n this.profiling = false;\n this.activeProfile = { newBytes: 0, newTensors: 0, peakBytes: 0, kernels: [], result: null };\n }\n dispose() {\n for (const variableName in this.registeredVariables) {\n this.registeredVariables[variableName].dispose();\n }\n }\n}\nexport class Engine {\n constructor(ENV) {\n this.ENV = ENV;\n this.registry = {};\n this.registryFactory = {};\n this.pendingBackendInitId = 0;\n this.state = new EngineState();\n }\n async ready() {\n if (this.pendingBackendInit != null) {\n return this.pendingBackendInit.then(() => { });\n }\n if (this.backendInstance != null) {\n return;\n }\n const sortedBackends = this.getSortedBackends();\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const success = await this.initializeBackend(backendName).success;\n if (success) {\n await this.setBackend(backendName);\n return;\n }\n }\n throw new Error(`Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n get backend() {\n if (this.pendingBackendInit != null) {\n throw new Error(`Backend '${this.backendName}' has not yet been initialized. Make ` +\n `sure to await tf.ready() or await tf.setBackend() before calling ` +\n `other methods`);\n }\n if (this.backendInstance == null) {\n const { name, asyncInit } = this.initializeBackendsAndReturnBest();\n if (asyncInit) {\n throw new Error(`The highest priority backend '${name}' has not yet been ` +\n `initialized. Make sure to await tf.ready() or ` +\n `await tf.setBackend() before calling other methods`);\n }\n this.setBackend(name);\n }\n return this.backendInstance;\n }\n backendNames() {\n return Object.keys(this.registryFactory);\n }\n findBackend(backendName) {\n if (!(backendName in this.registry)) {\n // If the backend hasn't been initialized but we have a registry entry for\n // it, initialize it and return it.\n if (backendName in this.registryFactory) {\n const { asyncInit } = this.initializeBackend(backendName);\n if (asyncInit) {\n // Backend is not ready yet.\n return null;\n }\n }\n else {\n return null;\n }\n }\n return this.registry[backendName];\n }\n findBackendFactory(backendName) {\n if (!(backendName in this.registryFactory)) {\n return null;\n }\n return this.registryFactory[backendName].factory;\n }\n registerBackend(backendName, factory, priority = 1) {\n if (backendName in this.registryFactory) {\n console.warn(`${backendName} backend was already registered. ` +\n `Reusing existing backend factory.`);\n return false;\n }\n this.registryFactory[backendName] = { factory, priority };\n return true;\n }\n async setBackend(backendName) {\n if (this.registryFactory[backendName] == null) {\n throw new Error(`Backend name '${backendName}' not found in registry`);\n }\n this.backendName = backendName;\n if (this.registry[backendName] == null) {\n this.backendInstance = null;\n const { success, asyncInit } = this.initializeBackend(backendName);\n const result = asyncInit ? await success : success;\n if (!result) {\n return false;\n }\n }\n this.backendInstance = this.registry[backendName];\n this.setupRegisteredKernels();\n // Reset the profiler.\n this.profiler = new Profiler(this.backendInstance);\n return true;\n }\n setupRegisteredKernels() {\n const kernels = getKernelsForBackend(this.backendName);\n kernels.forEach(kernel => {\n if (kernel.setupFunc != null) {\n kernel.setupFunc(this.backendInstance);\n }\n });\n }\n disposeRegisteredKernels(backendName) {\n const kernels = getKernelsForBackend(backendName);\n kernels.forEach(kernel => {\n if (kernel.disposeFunc != null) {\n kernel.disposeFunc(this.registry[backendName]);\n }\n });\n }\n /**\n * Initializes a backend by looking up the backend name in the factory\n * registry and calling the factory method. Returns a boolean representing\n * whether the initialization of the backend suceeded. Throws an error if\n * there is no backend in the factory registry.\n */\n initializeBackend(backendName) {\n const registryFactoryEntry = this.registryFactory[backendName];\n if (registryFactoryEntry == null) {\n throw new Error(`Cannot initialize backend ${backendName}, no registration found.`);\n }\n try {\n const backend = registryFactoryEntry.factory();\n /* Test if the factory returns a promise.\n Done in a more liberal way than\n previous 'Promise.resolve(backend)===backend'\n as we needed to account for custom Promise\n implementations (e.g. Angular) */\n if (backend && !(backend instanceof KernelBackend)\n && typeof backend.then === 'function') {\n const promiseId = ++this.pendingBackendInitId;\n const success = backend\n .then(backendInstance => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.registry[backendName] = backendInstance;\n this.pendingBackendInit = null;\n return true;\n })\n .catch(err => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.pendingBackendInit = null;\n console.warn(`Initialization of backend ${backendName} failed`);\n console.warn(err.stack || err.message);\n return false;\n });\n this.pendingBackendInit = success;\n return { success, asyncInit: true };\n }\n else {\n this.registry[backendName] = backend;\n return { success: true, asyncInit: false };\n }\n }\n catch (err) {\n console.warn(`Initialization of backend ${backendName} failed`);\n console.warn(err.stack || err.message);\n return { success: false, asyncInit: false };\n }\n }\n removeBackend(backendName) {\n if (!(backendName in this.registryFactory)) {\n throw new Error(`${backendName} backend not found in registry`);\n }\n if (this.backendName === backendName && this.pendingBackendInit != null) {\n // There is a pending promise of the backend we want to remove. Make it\n // obsolete.\n this.pendingBackendInitId++;\n }\n if (backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n delete this.registryFactory[backendName];\n // Unset the backend if it is active.\n if (this.backendName === backendName) {\n this.pendingBackendInit = null;\n this.backendName = null;\n this.backendInstance = null;\n }\n }\n getSortedBackends() {\n if (Object.keys(this.registryFactory).length === 0) {\n throw new Error('No backend found in registry.');\n }\n return Object.keys(this.registryFactory).sort((a, b) => {\n // Highest priority comes first.\n return this.registryFactory[b].priority -\n this.registryFactory[a].priority;\n });\n }\n initializeBackendsAndReturnBest() {\n const sortedBackends = this.getSortedBackends();\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const { success, asyncInit } = this.initializeBackend(backendName);\n if (asyncInit || success) {\n return { name: backendName, asyncInit };\n }\n }\n throw new Error(`Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n moveData(backend, dataId) {\n const info = this.state.tensorInfo.get(dataId);\n const srcBackend = info.backend;\n const values = this.readSync(dataId);\n // Delete the tensor from the old backend and move it to the new\n // backend.\n srcBackend.disposeData(dataId);\n info.backend = backend;\n backend.move(dataId, values, info.shape, info.dtype);\n if (this.shouldCheckForMemLeaks()) {\n // Track the number of moves during a kernel execution to correctly\n // detect memory leaks.\n this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++;\n }\n }\n tidy(nameOrFn, fn) {\n let name = null;\n if (fn == null) {\n // Called with only 1 argument.\n if (typeof nameOrFn !== 'function') {\n throw new Error('Please provide a function to tidy()');\n }\n fn = nameOrFn;\n }\n else {\n // Called with 2 arguments.\n if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) {\n throw new Error('When calling with two arguments, the first argument ' +\n 'to tidy() must be a string');\n }\n if (typeof fn !== 'function') {\n throw new Error('When calling with two arguments, the 2nd argument ' +\n 'to tidy() must be a function');\n }\n name = nameOrFn;\n // TODO(nsthorat,smilkov): Do operation logging and performance\n // profiling.\n }\n let result;\n return this.scopedRun(() => this.startScope(name), () => this.endScope(result), () => {\n result = fn();\n if (result instanceof Promise) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n return result;\n });\n }\n scopedRun(start, end, f) {\n start();\n try {\n const res = f();\n end();\n return res;\n }\n catch (ex) {\n end();\n throw ex;\n }\n }\n nextTensorId() {\n return Engine.nextTensorId++;\n }\n nextVariableId() {\n return Engine.nextVariableId++;\n }\n /**\n * This method is called instead of the public-facing tensor.clone() when\n * saving a tensor for backwards pass. It makes sure to add the clone\n * operation to the tape regardless of being called inside a kernel\n * execution.\n *\n * This method will go away once all kernels are modularized since we won't\n * need to turn off the tape inside runKernel().\n */\n clone(x) {\n const y = this.makeTensorFromDataId(x.dataId, x.shape, x.dtype);\n const inputs = { x };\n const grad = (dy) => ({\n x: () => {\n const dtype = 'float32';\n const gradInputs = { x: dy };\n const attrs = { dtype };\n return ENGINE.runKernelFunc(backend => backend.cast(dy, dtype), gradInputs, null /* grad */, Cast, attrs);\n }\n });\n const saved = [];\n this.addTapeNode(this.state.activeScope.name, inputs, [y], grad, saved, {});\n return y;\n }\n /**\n * Execute a kernel with the given name and return the output tensor.\n *\n * @param kernelName The name of the kernel to execute.\n * @param inputs A map of input names to tensors.\n * @param attrs A map of attribute names to their values. An attribute is a\n * primitive (non-tensor) input to the kernel.\n * @param inputsToSave A list of tensors, inputs to save for the backprop\n * computation.\n * @param outputsToSave A list of booleans, specifying which output to save\n * for the backprop computation. These are booleans since the output\n * tensors are not visible to the user.\n */\n runKernel(kernelName, inputs, attrs, inputsToSave, outputsToSave) {\n const forwardFunc = null;\n const backwardsFunc = null;\n // Call runKernel as a stop-gap until we modularize all kernels.\n // Once we modularize all kernels, we will remove the existing\n // `runKernelFunc`.\n return this.runKernelFunc(forwardFunc, inputs, backwardsFunc, kernelName, attrs, inputsToSave, outputsToSave);\n }\n shouldCheckForMemLeaks() {\n return this.ENV.getBool('IS_TEST');\n }\n checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos) {\n const numDataIdsAfter = this.backend.numDataIds();\n // Count the number of data ids associated with the result of the kernel.\n let numOutputDataIds = 0;\n outInfos.forEach(info => {\n // Complex numbers allocate 3 data ids, one for 'real', one for\n // 'imaginary', and one for the container that holds the former two.\n numOutputDataIds += (info.dtype === 'complex64' ? 3 : 1);\n });\n // Account for the number of moves during kernel execution. A \"data move\"\n // can happen in the middle of a kernel execution, placing a new (key,value)\n // pair in the data storage. Since data moves have net zero effect (we\n // always remove the data from the old backend), we have to cancel them out\n // when detecting memory leaks.\n const numMoves = this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1];\n const dataIdsLeaked = numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves;\n if (dataIdsLeaked > 0) {\n throw new Error(`Backend '${this.backendName}' has an internal memory leak ` +\n `(${dataIdsLeaked} data ids) after running '${kernelName}'`);\n }\n }\n /**\n * @deprecated Use `runKernel` for newly added kernels. Keep using this method\n * only for kernels that are not yet fully modularized.\n */\n runKernelFunc(forwardFunc, inputs, backwardsFunc, kernelName, attrs, inputsToSave, outputsToSave) {\n let outputs;\n let saved = [];\n const isTapeOn = this.isTapeOn();\n if (kernelName == null) {\n kernelName =\n this.state.activeScope != null ? this.state.activeScope.name : '';\n }\n const startingBytecount = this.state.numBytes;\n const startingNumTensors = this.state.numTensors;\n if (this.shouldCheckForMemLeaks()) {\n this.state.numDataMovesStack.push(0);\n }\n let kernelFunc;\n const kernel = getKernel(kernelName, this.backendName);\n let out;\n if (kernel != null) {\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = kernel.kernelFunc({ inputs, attrs, backend: this.backend });\n const outInfos = Array.isArray(out) ? out : [out];\n if (this.shouldCheckForMemLeaks()) {\n this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos);\n }\n const outTensors = outInfos.map(({ dataId, shape, dtype }) => this.makeTensorFromDataId(dataId, shape, dtype));\n // Save the inputs and outputs.\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since we would never run backprop, which disposes\n // the kept tensors.\n if (isTapeOn) {\n let tensorsToSave = this.getTensorsForGradient(kernelName, inputs, outTensors);\n if (tensorsToSave == null) {\n // Fallback for ops that call runKernelFunc and pass in\n // inputsToSave and outputsToSave. Currently this is the set of ops\n // with kernel support in the WASM backend. Once those ops and\n // respective gradients are modularised we can remove this path.\n if (outputsToSave == null) {\n outputsToSave = [];\n }\n const outsToSave = outTensors.filter((_, i) => outputsToSave[i]);\n tensorsToSave = (inputsToSave || []).slice().concat(outsToSave);\n }\n saved = this.saveTensorsForBackwardMode(tensorsToSave);\n }\n return outTensors;\n };\n }\n else {\n const saveFunc = (tensors) => {\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since we would never run backprop, which disposes\n // the kept tensors.\n if (!isTapeOn) {\n return;\n }\n saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n };\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = this.tidy(() => forwardFunc(this.backend, saveFunc));\n const outs = (Array.isArray(out) ? out : [out]);\n if (this.shouldCheckForMemLeaks()) {\n this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outs);\n }\n return outs;\n };\n }\n // Stop recording to a tape when running a kernel.\n let kernelProfile;\n this.scopedRun(() => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {\n if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {\n outputs = kernelFunc();\n }\n else {\n kernelProfile = this.profiler.profileKernel(kernelName, inputs, () => kernelFunc());\n if (this.ENV.getBool('DEBUG')) {\n this.profiler.logKernelProfile(kernelProfile);\n }\n outputs = kernelProfile.outputs;\n }\n });\n if (isTapeOn) {\n this.addTapeNode(kernelName, inputs, outputs, backwardsFunc, saved, attrs);\n }\n if (this.state.profiling) {\n this.state.activeProfile.kernels.push({\n name: kernelName,\n bytesAdded: this.state.numBytes - startingBytecount,\n totalBytesSnapshot: this.state.numBytes,\n tensorsAdded: this.state.numTensors - startingNumTensors,\n totalTensorsSnapshot: this.state.numTensors,\n inputShapes: Object.keys(inputs).map(key => inputs[key] != null ? inputs[key].shape : null),\n outputShapes: outputs.map(item => item.shape),\n kernelTimeMs: kernelProfile.timeMs,\n extraInfo: kernelProfile.extraInfo\n });\n }\n return (Array.isArray(out) ? outputs : outputs[0]);\n }\n /**\n * Saves tensors used in forward mode for use in backward mode.\n *\n * @param tensors the list of tensors to save.\n */\n saveTensorsForBackwardMode(tensors) {\n const saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n return saved;\n }\n /**\n * Returns a list of tensors to save for a given gradient calculation.\n *\n * Returns undefined if their is no registered gradient for this kernel in the\n * gradient registry.\n *\n * @param kernelName name of kernel to look up gradient for.\n * @param inputs a map of input tensors.\n * @param outputs an array of output tensors from forward mode of kernel.\n */\n getTensorsForGradient(kernelName, inputs, outputs) {\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n const inputsToSave = gradConfig.inputsToSave || [];\n const outputsToSave = gradConfig.outputsToSave || [];\n // If saveAllInputs is true, all inputs will be saved. Otherwise, inputs\n // specified in inputsToSave will be saved.\n let inputTensorsToSave;\n if (gradConfig.saveAllInputs) {\n util.assert(Array.isArray(inputs), () => 'saveAllInputs is true, expected inputs to be an array.');\n inputTensorsToSave = Object.keys(inputs).map((key) => inputs[key]);\n }\n else {\n inputTensorsToSave = inputsToSave.map((inputName) => inputs[inputName]);\n }\n const outputTensorsToSave = outputs.filter((_, i) => outputsToSave[i]);\n return inputTensorsToSave.concat(outputTensorsToSave);\n }\n // TODO(yassogba) throw exception here once all runkernelFunc calls with\n // inputsToSave/outputsToSave are removed\n return null;\n }\n /**\n * Internal method used by public APIs for tensor creation. Makes a new\n * tensor with the provided shape, dtype and values. It always\n * creates a new data id and writes the values to the underlying backend.\n */\n makeTensor(values, shape, dtype, backend) {\n if (values == null) {\n throw new Error('Values passed to engine.makeTensor() are null');\n }\n dtype = dtype || 'float32';\n backend = backend || this.backend;\n let backendVals = values;\n if (dtype === 'string' && util.isString(values[0])) {\n backendVals = values.map(d => util.encodeString(d));\n }\n const dataId = backend.write(backendVals, shape, dtype);\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.incRef(t, backend);\n // Count bytes for string tensors.\n if (dtype === 'string') {\n const info = this.state.tensorInfo.get(dataId);\n const newBytes = bytesFromStringArray(backendVals);\n this.state.numBytes += newBytes - info.bytes;\n info.bytes = newBytes;\n }\n return t;\n }\n /**\n * Internal method used by backends. Makes a new tensor\n * that is a wrapper around an existing data id. It doesn't create\n * a new data id, only increments the ref count used in memory tracking.\n */\n makeTensorFromDataId(dataId, shape, dtype, backend) {\n dtype = dtype || 'float32';\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.incRef(t, backend);\n return t;\n }\n makeVariable(initialValue, trainable = true, name, dtype) {\n name = name || this.nextVariableId().toString();\n if (dtype != null && dtype !== initialValue.dtype) {\n initialValue = initialValue.cast(dtype);\n }\n const v = new Variable(initialValue, trainable, name, this.nextTensorId());\n if (this.state.registeredVariables[v.name] != null) {\n throw new Error(`Variable with name ${v.name} was already registered`);\n }\n this.state.registeredVariables[v.name] = v;\n this.incRef(v, this.backend);\n return v;\n }\n incRef(a, backend) {\n const refCount = this.state.tensorInfo.has(a.dataId) ?\n this.state.tensorInfo.get(a.dataId).refCount :\n 0;\n this.state.numTensors++;\n if (a.dtype === 'string') {\n this.state.numStringTensors++;\n }\n if (refCount === 0) {\n this.state.numDataBuffers++;\n // Bytes for complex numbers are counted by their components. Bytes for\n // string tensors are counted when writing values.\n let bytes = 0;\n if (a.dtype !== 'complex64' && a.dtype !== 'string') {\n bytes = a.size * util.bytesPerElement(a.dtype);\n }\n this.state.tensorInfo.set(a.dataId, {\n backend: backend || this.backend,\n dtype: a.dtype,\n shape: a.shape,\n bytes,\n refCount: 0\n });\n this.state.numBytes += bytes;\n }\n this.state.tensorInfo.get(a.dataId).refCount++;\n if (!(a instanceof Variable)) {\n this.track(a);\n }\n }\n disposeTensor(a) {\n if (!this.state.tensorInfo.has(a.dataId)) {\n return;\n }\n this.state.numTensors--;\n if (a.dtype === 'string') {\n this.state.numStringTensors--;\n }\n const info = this.state.tensorInfo.get(a.dataId);\n const refCount = info.refCount;\n if (refCount <= 1) {\n // Don't count bytes for complex numbers as they are counted by their\n // components.\n if (a.dtype !== 'complex64') {\n this.state.numBytes -= info.bytes;\n }\n this.state.numDataBuffers--;\n info.backend.disposeData(a.dataId);\n this.state.tensorInfo.delete(a.dataId);\n }\n else {\n this.state.tensorInfo.get(a.dataId).refCount--;\n }\n // TODO(nsthorat): Construct an error and save the stack trace for\n // debugging when in debug mode. Creating a stack trace is too expensive\n // to do unconditionally.\n }\n disposeVariables() {\n for (const varName in this.state.registeredVariables) {\n const v = this.state.registeredVariables[varName];\n this.disposeVariable(v);\n }\n }\n disposeVariable(v) {\n this.disposeTensor(v);\n if (this.state.registeredVariables[v.name] != null) {\n delete this.state.registeredVariables[v.name];\n }\n }\n memory() {\n const info = this.backend.memory();\n info.numTensors = this.state.numTensors;\n info.numDataBuffers = this.state.numDataBuffers;\n info.numBytes = this.state.numBytes;\n if (this.state.numStringTensors > 0) {\n info.unreliable = true;\n if (info.reasons == null) {\n info.reasons = [];\n }\n info.reasons.push('Memory usage by string tensors is approximate ' +\n '(2 bytes per character)');\n }\n return info;\n }\n async profile(query) {\n this.state.profiling = true;\n const startBytes = this.state.numBytes;\n const startNumTensors = this.state.numTensors;\n this.state.activeProfile.kernels = [];\n this.state.activeProfile.result = await query();\n this.state.profiling = false;\n this.state.activeProfile.peakBytes = Math.max(...this.state.activeProfile.kernels.map(d => d.totalBytesSnapshot));\n this.state.activeProfile.newBytes = this.state.numBytes - startBytes;\n this.state.activeProfile.newTensors =\n this.state.numTensors - startNumTensors;\n for (const kernel of this.state.activeProfile.kernels) {\n kernel.kernelTimeMs = await kernel.kernelTimeMs;\n kernel.extraInfo = await kernel.extraInfo;\n }\n return this.state.activeProfile;\n }\n isTapeOn() {\n return this.state.gradientDepth > 0 && this.state.kernelDepth === 0;\n }\n addTapeNode(kernelName, inputs, outputs, gradientsFunc, saved, attrs) {\n const tapeNode = { id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved };\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n gradientsFunc = gradConfig.gradFunc;\n }\n if (gradientsFunc != null) {\n tapeNode.gradient = (dys) => {\n // TODO(smilkov): To optimize back-prop, pass dys that are not used in\n // the backprop graph to the user as null instead of zeros\n dys = dys.map((dy, i) => {\n if (dy == null) {\n const output = outputs[i];\n const vals = util.makeZerosTypedArray(output.size, output.dtype);\n return this.makeTensor(vals, output.shape, output.dtype);\n }\n return dy;\n });\n // Grad functions of ops with single outputs expect a dy, while ops\n // with multiple outputs expect dys (array of dy).\n return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs);\n };\n }\n this.state.activeTape.push(tapeNode);\n }\n keep(result) {\n result.kept = true;\n return result;\n }\n startTape() {\n if (this.state.gradientDepth === 0) {\n this.state.activeTape = [];\n }\n this.state.gradientDepth++;\n }\n endTape() {\n this.state.gradientDepth--;\n }\n /**\n * Start a scope. Use this with endScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n startScope(name) {\n const scopeInfo = {\n track: [],\n name: 'unnamed scope',\n id: this.state.nextScopeId++\n };\n if (name) {\n scopeInfo.name = name;\n }\n this.state.scopeStack.push(scopeInfo);\n this.state.activeScope = scopeInfo;\n }\n /**\n * End a scope. Use this with startScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n endScope(result) {\n const tensorsToTrackInParent = getTensorsInContainer(result);\n const tensorsToTrackInParentSet = new Set(tensorsToTrackInParent.map(t => t.id));\n // Dispose the arrays tracked in this scope.\n for (let i = 0; i < this.state.activeScope.track.length; i++) {\n const tensor = this.state.activeScope.track[i];\n if (!tensor.kept && !tensorsToTrackInParentSet.has(tensor.id)) {\n tensor.dispose();\n }\n }\n const oldScope = this.state.scopeStack.pop();\n this.state.activeScope = this.state.scopeStack.length === 0 ?\n null :\n this.state.scopeStack[this.state.scopeStack.length - 1];\n // Track the current result in the parent scope.\n tensorsToTrackInParent.forEach(tensor => {\n // Only track the tensor if was allocated in the inner scope and is not\n // globally kept.\n if (!tensor.kept && tensor.scopeId === oldScope.id) {\n this.track(tensor);\n }\n });\n }\n /**\n * Returns gradients of `f` with respect to each of the `xs`. The gradients\n * returned are of the same length as `xs`, but some might be null if `f`\n * was not a function of that `x`. It also takes optional dy to multiply the\n * gradient, which defaults to `1`.\n */\n gradients(f, xs, dy, allowNoGradients = false) {\n util.assert(xs.length > 0, () => 'gradients() received an empty list of xs.');\n if (dy != null && dy.dtype !== 'float32') {\n throw new Error(`dy must have 'float32' dtype, but has '${dy.dtype}'`);\n }\n const y = this.scopedRun(() => this.startTape(), () => this.endTape(), () => this.tidy('forward', f));\n util.assert(y instanceof Tensor, () => 'The result y returned by f() must be a tensor.');\n // Filter out the nodes that don't connect x => y.\n const filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y);\n if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) {\n throw new Error('Cannot compute gradient of y=f(x) with respect to x. Make sure ' +\n 'that the f you passed encloses all operations that lead from x ' +\n 'to y.');\n }\n return this.tidy('backward', () => {\n const accumulatedGradientMap = {};\n accumulatedGradientMap[y.id] = (dy == null) ? ones(y.shape) : dy;\n // Backprop gradients through the filtered nodes.\n backpropagateGradients(accumulatedGradientMap, filteredTape, \n // Pass the tidy function to avoid circular dep with `tape.ts`.\n f => this.tidy(f), \n // Pass an add function to avoide a circular dep with `tape.ts`.\n add);\n const grads = xs.map(x => accumulatedGradientMap[x.id]);\n if (this.state.gradientDepth === 0) {\n // This means that we are not computing higher-order gradients\n // and can clean up the tape.\n this.state.activeTape.forEach(node => {\n for (const tensor of node.saved) {\n tensor.dispose();\n }\n });\n this.state.activeTape = null;\n }\n return { value: y, grads };\n });\n }\n customGrad(f) {\n util.assert(util.isFunction(f), () => 'The f passed in customGrad(f) must be a function.');\n return (...inputs) => {\n util.assert(inputs.every(t => t instanceof Tensor), () => 'The args passed in customGrad(f)(x1, x2,...) must all be ' +\n 'tensors');\n let res;\n const inputMap = {};\n inputs.forEach((input, i) => {\n inputMap[i] = input;\n });\n return this.runKernelFunc((_, save) => {\n res = f(...[...inputs, save]);\n util.assert(res.value instanceof Tensor, () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.value` is a tensor');\n util.assert(util.isFunction(res.gradFunc), () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function.');\n return res.value;\n }, inputMap, (dy, saved) => {\n const gradRes = res.gradFunc(dy, saved);\n const grads = Array.isArray(gradRes) ? gradRes : [gradRes];\n util.assert(grads.length === inputs.length, () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'the same number of tensors as inputs passed to f(...).');\n util.assert(grads.every(t => t instanceof Tensor), () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'a list of only tensors.');\n const gradMap = {};\n grads.forEach((grad, i) => {\n gradMap[i] = () => grad;\n });\n return gradMap;\n });\n };\n }\n readSync(dataId) {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.readSync(dataId);\n }\n read(dataId) {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.read(dataId);\n }\n async time(query) {\n const start = now();\n const timingInfo = await this.backend.time(query);\n timingInfo.wallMs = now() - start;\n return timingInfo;\n }\n /**\n * Tracks a Tensor in the current scope to be automatically cleaned up\n * when the current scope ends, and returns the value.\n *\n * @param result The Tensor to track in the current scope.\n */\n track(result) {\n if (this.state.activeScope != null) {\n result.scopeId = this.state.activeScope.id;\n this.state.activeScope.track.push(result);\n }\n return result;\n }\n get registeredVariables() {\n return this.state.registeredVariables;\n }\n /**\n * Resets the engine state. Removes all backends but does not remove\n * registered backend factories.\n */\n reset() {\n // Make any pending promise obsolete.\n this.pendingBackendInitId++;\n this.state.dispose();\n this.ENV.reset();\n this.state = new EngineState();\n for (const backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n this.backendName = null;\n this.backendInstance = null;\n this.pendingBackendInit = null;\n }\n}\nEngine.nextTensorId = 0;\nEngine.nextVariableId = 0;\nfunction ones(shape) {\n const values = makeOnesTypedArray(sizeFromShape(shape), 'float32');\n return ENGINE.makeTensor(values, shape, 'float32');\n}\nexport function getOrMakeEngine() {\n const ns = getGlobalNamespace();\n if (ns._tfengine == null) {\n const environment = new Environment(ns);\n ns._tfengine = new Engine(environment);\n }\n setEnvironmentGlobal(ns._tfengine.ENV);\n // Tell the current tensor interface that the global engine is responsible\n // for tracking.\n setTensorTracker(() => ns._tfengine);\n return ns._tfengine;\n}\nexport const ENGINE = getOrMakeEngine();\n/**\n * A implementation of the add op for use within engine and tape.\n *\n * This allows us to avoid a circular dependency between add.ts and engine.\n * It is exported to be available in tape tests.\n */\nexport function add(a, b) {\n // We duplicate Add here to avoid a circular dependency with add.ts.\n const inputs = { a, b };\n return ENGINE.runKernelFunc((backend, save) => {\n const res = backend.add(a, b);\n save([a, b]);\n return res;\n }, inputs, null /* gradient */, Add);\n}\n//# sourceMappingURL=engine.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// tslint:disable-next-line:no-any\nfunction _isNavigatorDefined() {\n return typeof navigator !== 'undefined' && navigator != null;\n}\nexport function isMobile() {\n if (_isNavigatorDefined()) {\n // tslint:disable-next-line:no-any\n const a = navigator.userAgent || navigator.vendor || window.opera;\n // tslint:disable-next-line:max-line-length\n return /(android|bb\\d+|meego).+mobile|avantgo|bada\\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i\n .test(a) ||\n // tslint:disable-next-line:max-line-length\n /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\\-(n|u)|c55\\/|capi|ccwa|cdm\\-|cell|chtm|cldc|cmd\\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\\-s|devi|dica|dmob|do(c|p)o|ds(12|\\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\\-|_)|g1 u|g560|gene|gf\\-5|g\\-mo|go(\\.w|od)|gr(ad|un)|haie|hcit|hd\\-(m|p|t)|hei\\-|hi(pt|ta)|hp( i|ip)|hs\\-c|ht(c(\\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\\-(20|go|ma)|i230|iac( |\\-|\\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\\/)|klon|kpt |kwc\\-|kyo(c|k)|le(no|xi)|lg( g|\\/(k|l|u)|50|54|\\-[a-w])|libw|lynx|m1\\-w|m3ga|m50\\/|ma(te|ui|xo)|mc(01|21|ca)|m\\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\\-2|po(ck|rt|se)|prox|psio|pt\\-g|qa\\-a|qc(07|12|21|32|60|\\-[2-7]|i\\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\\-|oo|p\\-)|sdk\\/|se(c(\\-|0|1)|47|mc|nd|ri)|sgh\\-|shar|sie(\\-|m)|sk\\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\\-|v\\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\\-|tdg\\-|tel(i|m)|tim\\-|t\\-mo|to(pl|sh)|ts(70|m\\-|m3|m5)|tx\\-9|up(\\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\\-|your|zeto|zte\\-/i\n .test(a.substr(0, 4));\n }\n return false;\n}\nexport function isBrowser() {\n return (typeof window !== 'undefined' && window.document != null) ||\n //@ts-ignore\n (typeof WorkerGlobalScope !== 'undefined');\n}\n//# sourceMappingURL=device_util.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport './engine';\nimport * as device_util from './device_util';\nimport { env } from './environment';\nconst ENV = env();\n/**\n * This file contains environment-related flag registrations.\n */\n/** Whether to enable debug mode. */\nENV.registerFlag('DEBUG', () => false, debugValue => {\n if (debugValue) {\n console.warn('Debugging mode is ON. The output of every math call will ' +\n 'be downloaded to CPU and checked for NaNs. ' +\n 'This significantly impacts performance.');\n }\n});\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag('IS_BROWSER', () => device_util.isBrowser());\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag('IS_NODE', () => (typeof process !== 'undefined') &&\n (typeof process.versions !== 'undefined') &&\n (typeof process.versions.node !== 'undefined'));\n/** Whether this browser is Chrome. */\nENV.registerFlag('IS_CHROME', () => typeof navigator !== 'undefined' && navigator != null &&\n navigator.userAgent != null && /Chrome/.test(navigator.userAgent) &&\n /Google Inc/.test(navigator.vendor));\n/**\n * True when the environment is \"production\" where we disable safety checks\n * to gain performance.\n */\nENV.registerFlag('PROD', () => false);\n/**\n * Whether to do sanity checks when inferring a shape from user-provided\n * values, used when creating a new tensor.\n */\nENV.registerFlag('TENSORLIKE_CHECK_SHAPE_CONSISTENCY', () => ENV.getBool('DEBUG'));\n/** Whether deprecation warnings are enabled. */\nENV.registerFlag('DEPRECATION_WARNINGS_ENABLED', () => true);\n/** True if running unit tests. */\nENV.registerFlag('IS_TEST', () => false);\n//# sourceMappingURL=flags.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from './engine';\nimport { env } from './environment';\nimport { Tensor } from './tensor';\nimport { assert, flatten, inferDtype, isTypedArray, toTypedArray } from './util';\nexport function inferShape(val, dtype) {\n let firstElem = val;\n if (isTypedArray(val)) {\n return dtype === 'string' ? [] : [val.length];\n }\n if (!Array.isArray(val)) {\n return []; // Scalar.\n }\n const shape = [];\n while (Array.isArray(firstElem) ||\n isTypedArray(firstElem) && dtype !== 'string') {\n shape.push(firstElem.length);\n firstElem = firstElem[0];\n }\n if (Array.isArray(val) &&\n env().getBool('TENSORLIKE_CHECK_SHAPE_CONSISTENCY')) {\n deepAssertShapeConsistency(val, shape, []);\n }\n return shape;\n}\nfunction deepAssertShapeConsistency(val, shape, indices) {\n indices = indices || [];\n if (!(Array.isArray(val)) && !isTypedArray(val)) {\n assert(shape.length === 0, () => `Element arr[${indices.join('][')}] is a primitive, ` +\n `but should be an array/TypedArray of ${shape[0]} elements`);\n return;\n }\n assert(shape.length > 0, () => `Element arr[${indices.join('][')}] should be a primitive, ` +\n `but is an array of ${val.length} elements`);\n assert(val.length === shape[0], () => `Element arr[${indices.join('][')}] should have ${shape[0]} ` +\n `elements, but has ${val.length} elements`);\n const subShape = shape.slice(1);\n for (let i = 0; i < val.length; ++i) {\n deepAssertShapeConsistency(val[i], subShape, indices.concat(i));\n }\n}\nfunction assertDtype(expectedDtype, actualDType, argName, functionName) {\n if (expectedDtype == null) {\n return;\n }\n if (expectedDtype !== 'numeric' && expectedDtype !== actualDType ||\n expectedDtype === 'numeric' && actualDType === 'string') {\n throw new Error(`Argument '${argName}' passed to '${functionName}' must ` +\n `be ${expectedDtype} tensor, but got ${actualDType} tensor`);\n }\n}\nexport function convertToTensor(x, argName, functionName, parseAsDtype = 'numeric') {\n if (x instanceof Tensor) {\n assertDtype(parseAsDtype, x.dtype, argName, functionName);\n return x;\n }\n let inferredDtype = inferDtype(x);\n // If the user expects a bool/int/float, use that info to update the\n // inferredDtype when it is not a string.\n if (inferredDtype !== 'string' &&\n ['bool', 'int32', 'float32'].indexOf(parseAsDtype) >= 0) {\n inferredDtype = parseAsDtype;\n }\n assertDtype(parseAsDtype, inferredDtype, argName, functionName);\n if ((x == null) ||\n (!isTypedArray(x) && !Array.isArray(x) && typeof x !== 'number' &&\n typeof x !== 'boolean' && typeof x !== 'string')) {\n const type = x == null ? 'null' : x.constructor.name;\n throw new Error(`Argument '${argName}' passed to '${functionName}' must be a ` +\n `Tensor or TensorLike, but got '${type}'`);\n }\n const inferredShape = inferShape(x, inferredDtype);\n if (!isTypedArray(x) && !Array.isArray(x)) {\n x = [x];\n }\n const skipTypedArray = true;\n const values = inferredDtype !== 'string' ?\n toTypedArray(x, inferredDtype) :\n flatten(x, [], skipTypedArray);\n return ENGINE.makeTensor(values, inferredShape, inferredDtype);\n}\nexport function convertToTensorArray(arg, argName, functionName, parseAsDtype = 'numeric') {\n if (!Array.isArray(arg)) {\n throw new Error(`Argument ${argName} passed to ${functionName} must be a ` +\n '`Tensor[]` or `TensorLike[]`');\n }\n const tensors = arg;\n return tensors.map((t, i) => convertToTensor(t, `${argName}[${i}]`, functionName), parseAsDtype);\n}\n//# sourceMappingURL=tensor_util_env.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nexport const OP_SCOPE_SUFFIX = '__op';\n/**\n * Used for wrapping functions that perform math operations on\n * Tensors. The function will be wrapped in a named scope that cleans all\n * memory usage after the function is done.\n */\nexport function op(f) {\n const keys = Object.keys(f);\n if (keys.length !== 1) {\n throw new Error(`Please provide an object with a single key ` +\n `(operation name) mapping to a function. Got an object with ` +\n `${keys.length} keys.`);\n }\n let opName = keys[0];\n const fn = f[opName];\n // Strip the underscore from the end of the function name.\n if (opName.endsWith('_')) {\n opName = opName.substring(0, opName.length - 1);\n }\n // add an __op suffix to distinguish ops from kernels in tf.profile\n opName = opName + OP_SCOPE_SUFFIX;\n // tslint:disable-next-line:no-any\n const f2 = (...args) => {\n ENGINE.startScope(opName);\n try {\n const result = fn(...args);\n if (result instanceof Promise) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n ENGINE.endScope(result);\n return result;\n }\n catch (ex) {\n ENGINE.endScope(null);\n throw ex;\n }\n };\n Object.defineProperty(f2, 'name', { value: opName, configurable: true });\n // tslint:disable-next-line:no-any\n return f2;\n}\n//# sourceMappingURL=operation.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Complex } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Converts two real numbers to a complex number.\n *\n * Given a tensor `real` representing the real part of a complex number, and a\n * tensor `imag` representing the imaginary part of a complex number, this\n * operation returns complex numbers elementwise of the form [r0, i0, r1, i1],\n * where r represents the real part and i represents the imag part.\n *\n * The input tensors real and imag must have the same shape.\n *\n * ```js\n * const real = tf.tensor1d([2.25, 3.25]);\n * const imag = tf.tensor1d([4.75, 5.75]);\n * const complex = tf.complex(real, imag);\n *\n * complex.print();\n * ```\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction complex_(real, imag) {\n const $real = convertToTensor(real, 'real', 'complex');\n const $imag = convertToTensor(imag, 'imag', 'complex');\n util.assertShapesMatch($real.shape, $imag.shape, `real and imag shapes, ${$real.shape} and ${$imag.shape}, ` +\n `must match in call to tf.complex().`);\n const forward = (backend) => {\n return backend.complex($real, $imag);\n };\n const inputs = { real: $real, imag: $imag };\n return ENGINE.runKernelFunc(forward, inputs, null /* gradient */, Complex);\n}\nexport const complex = op({ complex_ });\n//# sourceMappingURL=complex.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { assert, assertNonNegativeIntegerDimensions, flatten, inferDtype, isTypedArray, sizeFromShape, toTypedArray } from '../util';\n/** This is shared code across all tensor creation methods. */\nexport function makeTensor(values, shape, inferredShape, dtype) {\n if (dtype == null) {\n dtype = inferDtype(values);\n }\n if (dtype === 'complex64') {\n throw new Error(`Cannot construct a complex64 tensor directly. ` +\n `Please use tf.complex(real, imag).`);\n }\n if (!isTypedArray(values) && !Array.isArray(values) &&\n typeof values !== 'number' && typeof values !== 'boolean' &&\n typeof values !== 'string') {\n throw new Error('values passed to tensor(values) must be a number/boolean/string or ' +\n 'an array of numbers/booleans/strings, or a TypedArray');\n }\n if (shape != null) {\n assertNonNegativeIntegerDimensions(shape);\n const providedSize = sizeFromShape(shape);\n const inferredSize = sizeFromShape(inferredShape);\n assert(providedSize === inferredSize, () => `Based on the provided shape, [${shape}], the tensor should have ` +\n `${providedSize} values but has ${inferredSize}`);\n for (let i = 0; i < inferredShape.length; ++i) {\n const inferred = inferredShape[i];\n const flatDimsDontMatch = i === inferredShape.length - 1 ?\n inferred !== sizeFromShape(shape.slice(i)) :\n true;\n assert(inferredShape[i] === shape[i] || !flatDimsDontMatch, () => `Error creating a new Tensor. Inferred shape ` +\n `(${inferredShape}) does not match the provided ` +\n `shape (${shape}). `);\n }\n }\n if (!isTypedArray(values) && !Array.isArray(values)) {\n values = [values];\n }\n shape = shape || inferredShape;\n values = dtype !== 'string' ?\n toTypedArray(values, dtype) :\n flatten(values, [], true);\n return ENGINE.makeTensor(values, shape, dtype);\n}\n//# sourceMappingURL=tensor_ops_util.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { inferShape } from '../tensor_util_env';\nimport { makeTensor } from './tensor_ops_util';\n/**\n * Creates a `tf.Tensor` with the provided values, shape and dtype.\n *\n * ```js\n * // Pass an array of values to create a vector.\n * tf.tensor([1, 2, 3, 4]).print();\n * ```\n *\n * ```js\n * // Pass a nested array of values to make a matrix or a higher\n * // dimensional tensor.\n * tf.tensor([[1, 2], [3, 4]]).print();\n * ```\n *\n * ```js\n * // Pass a flat array and specify a shape yourself.\n * tf.tensor([1, 2, 3, 4], [2, 2]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`. If the values are strings,\n * they will be encoded as utf-8 and kept as `Uint8Array[]`.\n * @param shape The shape of the tensor. Optional. If not provided,\n * it is inferred from `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor(values, shape, dtype) {\n const inferredShape = inferShape(values, dtype);\n return makeTensor(values, shape, inferredShape, dtype);\n}\n//# sourceMappingURL=tensor.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/* Type definitions for exporting and importing of models. */\n/**\n * A map from Tensor dtype to number of bytes per element of the Tensor.\n */\nexport const DTYPE_VALUE_SIZE_MAP = {\n 'float32': 4,\n 'float16': 2,\n 'int32': 4,\n 'uint16': 2,\n 'uint8': 1,\n 'bool': 1,\n 'complex64': 8\n};\n//# sourceMappingURL=types.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { complex } from '../ops/complex';\nimport { tensor } from '../ops/tensor';\nimport { sizeFromShape } from '../util';\nimport { DTYPE_VALUE_SIZE_MAP } from './types';\n/** Number of bytes reserved for the length of the string. (32bit integer). */\nconst NUM_BYTES_STRING_LENGTH = 4;\n/**\n * Encode a map from names to weight values as an ArrayBuffer, along with an\n * `Array` of `WeightsManifestEntry` as specification of the encoded weights.\n *\n * This function does not perform sharding.\n *\n * This function is the reverse of `decodeWeights`.\n *\n * @param tensors A map (\"dict\") from names to tensors.\n * @param group Group to which the weights belong (optional).\n * @returns A `Promise` of\n * - A flat `ArrayBuffer` with all the binary values of the `Tensor`s\n * concatenated.\n * - An `Array` of `WeightManifestEntry`s, carrying information including\n * tensor names, `dtype`s and shapes.\n * @throws Error: on unsupported tensor `dtype`.\n */\nexport async function encodeWeights(tensors, group) {\n // TODO(adarob, cais): Support quantization.\n const specs = [];\n const dataPromises = [];\n const names = Array.isArray(tensors) ?\n tensors.map(tensor => tensor.name) :\n Object.keys(tensors);\n for (let i = 0; i < names.length; ++i) {\n const name = names[i];\n const t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name];\n if (t.dtype !== 'float32' && t.dtype !== 'int32' && t.dtype !== 'bool' &&\n t.dtype !== 'string' && t.dtype !== 'complex64') {\n throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`);\n }\n const spec = { name, shape: t.shape, dtype: t.dtype };\n if (t.dtype === 'string') {\n const utf8bytes = new Promise(async (resolve) => {\n const vals = await t.bytes();\n const totalNumBytes = vals.reduce((p, c) => p + c.length, 0) +\n NUM_BYTES_STRING_LENGTH * vals.length;\n const bytes = new Uint8Array(totalNumBytes);\n let offset = 0;\n for (let i = 0; i < vals.length; i++) {\n const val = vals[i];\n const bytesOfLength = new Uint8Array(new Uint32Array([val.length]).buffer);\n bytes.set(bytesOfLength, offset);\n offset += NUM_BYTES_STRING_LENGTH;\n bytes.set(val, offset);\n offset += val.length;\n }\n resolve(bytes);\n });\n dataPromises.push(utf8bytes);\n }\n else {\n dataPromises.push(t.data());\n }\n if (group != null) {\n spec.group = group;\n }\n specs.push(spec);\n }\n const tensorValues = await Promise.all(dataPromises);\n return { data: concatenateTypedArrays(tensorValues), specs };\n}\n/**\n * Decode flat ArrayBuffer as weights.\n *\n * This function does not handle sharding.\n *\n * This function is the reverse of `encodeWeights`.\n *\n * @param buffer A flat ArrayBuffer carrying the binary values of the tensors\n * concatenated in the order specified in `specs`.\n * @param specs Specifications of the names, dtypes and shapes of the tensors\n * whose value are encoded by `buffer`.\n * @return A map from tensor name to tensor value, with the names corresponding\n * to names in `specs`.\n * @throws Error, if any of the tensors has unsupported dtype.\n */\nexport function decodeWeights(buffer, specs) {\n // TODO(adarob, cais): Support quantization.\n const out = {};\n let float16Decode;\n let offset = 0;\n for (const spec of specs) {\n const name = spec.name;\n const dtype = spec.dtype;\n const shape = spec.shape;\n const size = sizeFromShape(shape);\n let values;\n if ('quantization' in spec) {\n const quantization = spec.quantization;\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n if (!('min' in quantization && 'scale' in quantization)) {\n throw new Error(`Weight ${spec.name} with quantization ${quantization.dtype} ` +\n `doesn't have corresponding metadata min and scale.`);\n }\n }\n else if (quantization.dtype === 'float16') {\n if (dtype !== 'float32') {\n throw new Error(`Weight ${spec.name} is quantized with ${quantization.dtype} ` +\n `which only supports weights of type float32 not ${dtype}.`);\n }\n }\n else {\n throw new Error(`Weight ${spec.name} has unknown ` +\n `quantization dtype ${quantization.dtype}. ` +\n `Supported quantization dtypes are: ` +\n `'uint8', 'uint16', and 'float16'.`);\n }\n const quantizationSizeFactor = DTYPE_VALUE_SIZE_MAP[quantization.dtype];\n const byteBuffer = buffer.slice(offset, offset + size * quantizationSizeFactor);\n const quantizedArray = (quantization.dtype === 'uint8') ?\n new Uint8Array(byteBuffer) :\n new Uint16Array(byteBuffer);\n if (dtype === 'float32') {\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n values = new Float32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = v * quantization.scale + quantization.min;\n }\n }\n else if (quantization.dtype === 'float16') {\n if (float16Decode === undefined) {\n float16Decode = getFloat16Decoder();\n }\n values = float16Decode(quantizedArray);\n }\n else {\n throw new Error(`Unsupported quantization type ${quantization.dtype} ` +\n `for weight type float32.`);\n }\n }\n else if (dtype === 'int32') {\n if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') {\n throw new Error(`Unsupported quantization type ${quantization.dtype} ` +\n `for weight type int32.`);\n }\n values = new Int32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = Math.round(v * quantization.scale + quantization.min);\n }\n }\n else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * quantizationSizeFactor;\n }\n else if (dtype === 'string') {\n const size = sizeFromShape(spec.shape);\n values = [];\n for (let i = 0; i < size; i++) {\n const byteLength = new Uint32Array(buffer.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0];\n offset += NUM_BYTES_STRING_LENGTH;\n const bytes = new Uint8Array(buffer.slice(offset, offset + byteLength));\n values.push(bytes);\n offset += byteLength;\n }\n }\n else {\n const dtypeFactor = DTYPE_VALUE_SIZE_MAP[dtype];\n const byteBuffer = buffer.slice(offset, offset + size * dtypeFactor);\n if (dtype === 'float32') {\n values = new Float32Array(byteBuffer);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(byteBuffer);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(byteBuffer);\n }\n else if (dtype === 'complex64') {\n values = new Float32Array(byteBuffer);\n const real = new Float32Array(values.length / 2);\n const image = new Float32Array(values.length / 2);\n for (let i = 0; i < real.length; i++) {\n real[i] = values[i * 2];\n image[i] = values[i * 2 + 1];\n }\n const realTensor = tensor(real, shape, 'float32');\n const imageTensor = tensor(image, shape, 'float32');\n out[name] = complex(realTensor, imageTensor);\n }\n else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * dtypeFactor;\n }\n if (dtype !== 'complex64') {\n out[name] = tensor(values, shape, dtype);\n }\n }\n return out;\n}\n/**\n * Concatenate TypedArrays into an ArrayBuffer.\n */\nexport function concatenateTypedArrays(xs) {\n // TODO(adarob, cais): Support quantization.\n if (xs === null) {\n throw new Error(`Invalid input value: ${JSON.stringify(xs)}`);\n }\n let totalByteLength = 0;\n // `normalizedXs` is here for this reason: a `TypedArray`'s `buffer'\n // can have a different byte length from that of the `TypedArray` itself,\n // for example, when the `TypedArray` is created from an offset in an\n // `ArrayBuffer`. `normliazedXs` holds `TypedArray`s whose `buffer`s match\n // the `TypedArray` in byte length. If an element of `xs` does not show\n // this property, a new `TypedArray` that satisfy this property will be\n // constructed and pushed into `normalizedXs`.\n const normalizedXs = [];\n xs.forEach((x) => {\n totalByteLength += x.byteLength;\n // tslint:disable:no-any\n normalizedXs.push(x.byteLength === x.buffer.byteLength ? x :\n new x.constructor(x));\n if (!(x instanceof Float32Array || x instanceof Int32Array ||\n x instanceof Uint8Array)) {\n throw new Error(`Unsupported TypedArray subtype: ${x.constructor.name}`);\n }\n // tslint:enable:no-any\n });\n const y = new Uint8Array(totalByteLength);\n let offset = 0;\n normalizedXs.forEach((x) => {\n y.set(new Uint8Array(x.buffer), offset);\n offset += x.byteLength;\n });\n return y.buffer;\n}\n// Use Buffer on Node.js instead of Blob/atob/btoa\nconst useNodeBuffer = typeof Buffer !== 'undefined' &&\n (typeof Blob === 'undefined' || typeof atob === 'undefined' ||\n typeof btoa === 'undefined');\n/**\n * Calculate the byte length of a JavaScript string.\n *\n * Note that a JavaScript string can contain wide characters, therefore the\n * length of the string is not necessarily equal to the byte length.\n *\n * @param str Input string.\n * @returns Byte length.\n */\nexport function stringByteLength(str) {\n if (useNodeBuffer) {\n return Buffer.byteLength(str);\n }\n return new Blob([str]).size;\n}\n/**\n * Encode an ArrayBuffer as a base64 encoded string.\n *\n * @param buffer `ArrayBuffer` to be converted.\n * @returns A string that base64-encodes `buffer`.\n */\nexport function arrayBufferToBase64String(buffer) {\n if (useNodeBuffer) {\n return Buffer.from(buffer).toString('base64');\n }\n const buf = new Uint8Array(buffer);\n let s = '';\n for (let i = 0, l = buf.length; i < l; i++) {\n s += String.fromCharCode(buf[i]);\n }\n return btoa(s);\n}\n/**\n * Decode a base64 string as an ArrayBuffer.\n *\n * @param str Base64 string.\n * @returns Decoded `ArrayBuffer`.\n */\nexport function base64StringToArrayBuffer(str) {\n if (useNodeBuffer) {\n const buf = Buffer.from(str, 'base64');\n return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);\n }\n const s = atob(str);\n const buffer = new Uint8Array(s.length);\n for (let i = 0; i < s.length; ++i) {\n buffer.set([s.charCodeAt(i)], i);\n }\n return buffer.buffer;\n}\n/**\n * Concatenate a number of ArrayBuffers into one.\n *\n * @param buffers A number of array buffers to concatenate.\n * @returns Result of concatenating `buffers` in order.\n */\nexport function concatenateArrayBuffers(buffers) {\n if (buffers.length === 1) {\n return buffers[0];\n }\n let totalByteLength = 0;\n buffers.forEach((buffer) => {\n totalByteLength += buffer.byteLength;\n });\n const temp = new Uint8Array(totalByteLength);\n let offset = 0;\n buffers.forEach((buffer) => {\n temp.set(new Uint8Array(buffer), offset);\n offset += buffer.byteLength;\n });\n return temp.buffer;\n}\n/**\n * Get the basename of a path.\n *\n * Behaves in a way analogous to Linux's basename command.\n *\n * @param path\n */\nexport function basename(path) {\n const SEPARATOR = '/';\n path = path.trim();\n while (path.endsWith(SEPARATOR)) {\n path = path.slice(0, path.length - 1);\n }\n const items = path.split(SEPARATOR);\n return items[items.length - 1];\n}\n/**\n * Populate ModelArtifactsInfo fields for a model with JSON topology.\n * @param modelArtifacts\n * @returns A ModelArtifactsInfo object.\n */\nexport function getModelArtifactsInfoForJSON(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('Expected JSON model topology, received ArrayBuffer.');\n }\n return {\n dateSaved: new Date(),\n modelTopologyType: 'JSON',\n modelTopologyBytes: modelArtifacts.modelTopology == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.modelTopology)),\n weightSpecsBytes: modelArtifacts.weightSpecs == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)),\n weightDataBytes: modelArtifacts.weightData == null ?\n 0 :\n modelArtifacts.weightData.byteLength,\n };\n}\n/**\n * Computes mantisa table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 2048 mantissa lookup values.\n */\nfunction computeFloat16MantisaTable() {\n const convertMantissa = (i) => {\n let m = i << 13;\n let e = 0;\n while ((m & 0x00800000) === 0) {\n e -= 0x00800000;\n m <<= 1;\n }\n m &= ~0x00800000;\n e += 0x38800000;\n return m | e;\n };\n const mantisaTable = new Uint32Array(2048);\n mantisaTable[0] = 0;\n for (let i = 1; i < 1024; i++) {\n mantisaTable[i] = convertMantissa(i);\n }\n for (let i = 1024; i < 2048; i++) {\n mantisaTable[i] = 0x38000000 + ((i - 1024) << 13);\n }\n return mantisaTable;\n}\n/**\n * Computes exponent table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 64 exponent lookup values.\n */\nfunction computeFloat16ExponentTable() {\n const exponentTable = new Uint32Array(64);\n exponentTable[0] = 0;\n exponentTable[31] = 0x47800000;\n exponentTable[32] = 0x80000000;\n exponentTable[63] = 0xc7800000;\n for (let i = 1; i < 31; i++) {\n exponentTable[i] = i << 23;\n }\n for (let i = 33; i < 63; i++) {\n exponentTable[i] = 0x80000000 + ((i - 32) << 23);\n }\n return exponentTable;\n}\n/**\n * Computes offset table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 6d offset values.\n */\nfunction computeFloat16OffsetTable() {\n const offsetTable = new Uint32Array(64);\n for (let i = 0; i < 64; i++) {\n offsetTable[i] = 1024;\n }\n offsetTable[0] = offsetTable[32] = 0;\n return offsetTable;\n}\n/**\n * Retrieve a Float16 decoder which will decode a ByteArray of Float16 values\n * to a Float32Array.\n *\n * @returns Function (buffer: Uint16Array) => Float32Array which decodes\n * the Uint16Array of Float16 bytes to a Float32Array.\n */\nexport function getFloat16Decoder() {\n // Algorithm is based off of\n // http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n // Cache lookup tables\n const mantisaTable = computeFloat16MantisaTable();\n const exponentTable = computeFloat16ExponentTable();\n const offsetTable = computeFloat16OffsetTable();\n return (quantizedArray) => {\n const buffer = new ArrayBuffer(4 * quantizedArray.length);\n const bufferUint32View = new Uint32Array(buffer);\n for (let index = 0; index < quantizedArray.length; index++) {\n const float16Bits = quantizedArray[index];\n const float32Bits = mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +\n exponentTable[float16Bits >> 10];\n bufferUint32View[index] = float32Bits;\n }\n return new Float32Array(buffer);\n };\n}\n//# sourceMappingURL=io_utils.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport class IORouterRegistry {\n constructor() {\n this.saveRouters = [];\n this.loadRouters = [];\n }\n static getInstance() {\n if (IORouterRegistry.instance == null) {\n IORouterRegistry.instance = new IORouterRegistry();\n }\n return IORouterRegistry.instance;\n }\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerSaveRouter(saveRouter) {\n IORouterRegistry.getInstance().saveRouters.push(saveRouter);\n }\n /**\n * Register a load-handler router.\n *\n * @param loadRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `load` method defined or `null`.\n */\n static registerLoadRouter(loadRouter) {\n IORouterRegistry.getInstance().loadRouters.push(loadRouter);\n }\n /**\n * Look up IOHandler for saving, given a URL-like string.\n *\n * @param url\n * @returns If only one match is found, an instance of IOHandler with the\n * `save` method defined. If no match is found, `null`.\n * @throws Error, if more than one match is found.\n */\n static getSaveHandlers(url) {\n return IORouterRegistry.getHandlers(url, 'save');\n }\n /**\n * Look up IOHandler for loading, given a URL-like string.\n *\n * @param url\n * @param loadOptions Optional, custom load options.\n * @returns All valid handlers for `url`, given the currently registered\n * handler routers.\n */\n static getLoadHandlers(url, loadOptions) {\n return IORouterRegistry.getHandlers(url, 'load', loadOptions);\n }\n static getHandlers(url, handlerType, loadOptions) {\n const validHandlers = [];\n const routers = handlerType === 'load' ?\n IORouterRegistry.getInstance().loadRouters :\n IORouterRegistry.getInstance().saveRouters;\n routers.forEach(router => {\n const handler = router(url, loadOptions);\n if (handler !== null) {\n validHandlers.push(handler);\n }\n });\n return validHandlers;\n }\n}\nexport const registerSaveRouter = (loudRouter) => IORouterRegistry.registerSaveRouter(loudRouter);\nexport const registerLoadRouter = (loudRouter) => IORouterRegistry.registerLoadRouter(loudRouter);\nexport const getSaveHandlers = (url) => IORouterRegistry.getSaveHandlers(url);\nexport const getLoadHandlers = (url, loadOptions) => IORouterRegistry.getLoadHandlers(url, loadOptions);\n//# sourceMappingURL=router_registry.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst DATABASE_NAME = 'tensorflowjs';\nconst DATABASE_VERSION = 1;\n// Model data and ModelArtifactsInfo (metadata) are stored in two separate\n// stores for efficient access of the list of stored models and their metadata.\n// 1. The object store for model data: topology, weights and weight manifests.\nconst MODEL_STORE_NAME = 'models_store';\n// 2. The object store for ModelArtifactsInfo, including meta-information such\n// as the type of topology (JSON vs binary), byte size of the topology, byte\n// size of the weights, etc.\nconst INFO_STORE_NAME = 'model_info_store';\n/**\n * Delete the entire database for tensorflow.js, including the models store.\n */\nexport async function deleteDatabase() {\n const idbFactory = getIndexedDBFactory();\n return new Promise((resolve, reject) => {\n const deleteRequest = idbFactory.deleteDatabase(DATABASE_NAME);\n deleteRequest.onsuccess = () => resolve();\n deleteRequest.onerror = error => reject(error);\n });\n}\nfunction getIndexedDBFactory() {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Add more info about what IOHandler subtypes are available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error('Failed to obtain IndexedDB factory because the current environment' +\n 'is not a web browser.');\n }\n // tslint:disable-next-line:no-any\n const theWindow = typeof window === 'undefined' ? self : window;\n const factory = theWindow.indexedDB || theWindow.mozIndexedDB ||\n theWindow.webkitIndexedDB || theWindow.msIndexedDB ||\n theWindow.shimIndexedDB;\n if (factory == null) {\n throw new Error('The current browser does not appear to support IndexedDB.');\n }\n return factory;\n}\nfunction setUpDatabase(openRequest) {\n const db = openRequest.result;\n db.createObjectStore(MODEL_STORE_NAME, { keyPath: 'modelPath' });\n db.createObjectStore(INFO_STORE_NAME, { keyPath: 'modelPath' });\n}\n/**\n * IOHandler subclass: Browser IndexedDB.\n *\n * See the doc string of `browserIndexedDB` for more details.\n */\nexport class BrowserIndexedDB {\n constructor(modelPath) {\n this.indexedDB = getIndexedDBFactory();\n if (modelPath == null || !modelPath) {\n throw new Error('For IndexedDB, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n }\n async save(modelArtifacts) {\n // TODO(cais): Support saving GraphDef models.\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n return this.databaseAction(this.modelPath, modelArtifacts);\n }\n async load() {\n return this.databaseAction(this.modelPath);\n }\n /**\n * Perform database action to put model artifacts into or read model artifacts\n * from IndexedDB object store.\n *\n * Whether the action is put or get depends on whether `modelArtifacts` is\n * specified. If it is specified, the action will be put; otherwise the action\n * will be get.\n *\n * @param modelPath A unique string path for the model.\n * @param modelArtifacts If specified, it will be the model artifacts to be\n * stored in IndexedDB.\n * @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise`\n * of `ModelArtifacts`, if the action is get.\n */\n databaseAction(modelPath, modelArtifacts) {\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n if (modelArtifacts == null) {\n // Read model out from object store.\n const modelTx = db.transaction(MODEL_STORE_NAME, 'readonly');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const getRequest = modelStore.get(this.modelPath);\n getRequest.onsuccess = () => {\n if (getRequest.result == null) {\n db.close();\n return reject(new Error(`Cannot find model with path '${this.modelPath}' ` +\n `in IndexedDB.`));\n }\n else {\n resolve(getRequest.result.modelArtifacts);\n }\n };\n getRequest.onerror = error => {\n db.close();\n return reject(getRequest.error);\n };\n modelTx.oncomplete = () => db.close();\n }\n else {\n // Put model into object store.\n const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts);\n // First, put ModelArtifactsInfo into info store.\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n let infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const putInfoRequest = infoStore.put({ modelPath: this.modelPath, modelArtifactsInfo });\n let modelTx;\n putInfoRequest.onsuccess = () => {\n // Second, put model data into model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const putModelRequest = modelStore.put({\n modelPath: this.modelPath,\n modelArtifacts,\n modelArtifactsInfo\n });\n putModelRequest.onsuccess = () => resolve({ modelArtifactsInfo });\n putModelRequest.onerror = error => {\n // If the put-model request fails, roll back the info entry as\n // well.\n infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const deleteInfoRequest = infoStore.delete(this.modelPath);\n deleteInfoRequest.onsuccess = () => {\n db.close();\n return reject(putModelRequest.error);\n };\n deleteInfoRequest.onerror = error => {\n db.close();\n return reject(putModelRequest.error);\n };\n };\n };\n putInfoRequest.onerror = error => {\n db.close();\n return reject(putInfoRequest.error);\n };\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n }\n else {\n modelTx.oncomplete = () => db.close();\n }\n };\n }\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\nBrowserIndexedDB.URL_SCHEME = 'indexeddb://';\nexport const indexedDBRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserIndexedDB.URL_SCHEME)) {\n return browserIndexedDB(url.slice(BrowserIndexedDB.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(indexedDBRouter);\nIORouterRegistry.registerLoadRouter(indexedDBRouter);\n/**\n * Creates a browser IndexedDB IOHandler for saving and loading models.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save('indexeddb://MyModel'));\n * console.log(saveResult);\n * ```\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),\n * which can be used with, e.g., `tf.Model.save`.\n */\nexport function browserIndexedDB(modelPath) {\n return new BrowserIndexedDB(modelPath);\n}\nfunction maybeStripScheme(key) {\n return key.startsWith(BrowserIndexedDB.URL_SCHEME) ?\n key.slice(BrowserIndexedDB.URL_SCHEME.length) :\n key;\n}\nexport class BrowserIndexedDBManager {\n constructor() {\n this.indexedDB = getIndexedDBFactory();\n }\n async listModels() {\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const tx = db.transaction(INFO_STORE_NAME, 'readonly');\n const store = tx.objectStore(INFO_STORE_NAME);\n // tslint:disable:max-line-length\n // Need to cast `store` as `any` here because TypeScript's DOM\n // library does not have the `getAll()` method even though the\n // method is supported in the latest version of most mainstream\n // browsers:\n // https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll\n // tslint:enable:max-line-length\n // tslint:disable-next-line:no-any\n const getAllInfoRequest = store.getAll();\n getAllInfoRequest.onsuccess = () => {\n const out = {};\n for (const item of getAllInfoRequest.result) {\n out[item.modelPath] = item.modelArtifactsInfo;\n }\n resolve(out);\n };\n getAllInfoRequest.onerror = error => {\n db.close();\n return reject(getAllInfoRequest.error);\n };\n tx.oncomplete = () => db.close();\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n async removeModel(path) {\n path = maybeStripScheme(path);\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n const infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const getInfoRequest = infoStore.get(path);\n let modelTx;\n getInfoRequest.onsuccess = () => {\n if (getInfoRequest.result == null) {\n db.close();\n return reject(new Error(`Cannot find model with path '${path}' ` +\n `in IndexedDB.`));\n }\n else {\n // First, delete the entry in the info store.\n const deleteInfoRequest = infoStore.delete(path);\n const deleteModelData = () => {\n // Second, delete the entry in the model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const deleteModelRequest = modelStore.delete(path);\n deleteModelRequest.onsuccess = () => resolve(getInfoRequest.result.modelArtifactsInfo);\n deleteModelRequest.onerror = error => reject(getInfoRequest.error);\n };\n // Proceed with deleting model data regardless of whether deletion\n // of info data succeeds or not.\n deleteInfoRequest.onsuccess = deleteModelData;\n deleteInfoRequest.onerror = error => {\n deleteModelData();\n db.close();\n return reject(getInfoRequest.error);\n };\n }\n };\n getInfoRequest.onerror = error => {\n db.close();\n return reject(getInfoRequest.error);\n };\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n }\n else {\n modelTx.oncomplete = () => db.close();\n }\n };\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\n//# sourceMappingURL=indexed_db.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { assert } from '../util';\nimport { arrayBufferToBase64String, base64StringToArrayBuffer, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst PATH_SEPARATOR = '/';\nconst PATH_PREFIX = 'tensorflowjs_models';\nconst INFO_SUFFIX = 'info';\nconst MODEL_TOPOLOGY_SUFFIX = 'model_topology';\nconst WEIGHT_SPECS_SUFFIX = 'weight_specs';\nconst WEIGHT_DATA_SUFFIX = 'weight_data';\nconst MODEL_METADATA_SUFFIX = 'model_metadata';\n/**\n * Purge all tensorflow.js-saved model artifacts from local storage.\n *\n * @returns Paths of the models purged.\n */\nexport function purgeLocalStorageArtifacts() {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n throw new Error('purgeLocalStorageModels() cannot proceed because local storage is ' +\n 'unavailable in the current environment.');\n }\n const LS = window.localStorage;\n const purgedModelPaths = [];\n for (let i = 0; i < LS.length; ++i) {\n const key = LS.key(i);\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n if (key.startsWith(prefix) && key.length > prefix.length) {\n LS.removeItem(key);\n const modelName = getModelPathFromKey(key);\n if (purgedModelPaths.indexOf(modelName) === -1) {\n purgedModelPaths.push(modelName);\n }\n }\n }\n return purgedModelPaths;\n}\nfunction getModelKeys(path) {\n return {\n info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR),\n topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR),\n weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR),\n weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR),\n modelMetadata: [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR)\n };\n}\n/**\n * Get model path from a local-storage key.\n *\n * E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1'\n *\n * @param key\n */\nfunction getModelPathFromKey(key) {\n const items = key.split(PATH_SEPARATOR);\n if (items.length < 3) {\n throw new Error(`Invalid key format: ${key}`);\n }\n return items.slice(1, items.length - 1).join(PATH_SEPARATOR);\n}\nfunction maybeStripScheme(key) {\n return key.startsWith(BrowserLocalStorage.URL_SCHEME) ?\n key.slice(BrowserLocalStorage.URL_SCHEME.length) :\n key;\n}\n/**\n * IOHandler subclass: Browser Local Storage.\n *\n * See the doc string to `browserLocalStorage` for more details.\n */\nexport class BrowserLocalStorage {\n constructor(modelPath) {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n // TODO(cais): Add more info about what IOHandler subtypes are\n // available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error('The current environment does not support local storage.');\n }\n this.LS = window.localStorage;\n if (modelPath == null || !modelPath) {\n throw new Error('For local storage, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n this.keys = getModelKeys(this.modelPath);\n }\n /**\n * Save model artifacts to browser local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @param modelArtifacts The model artifacts to be stored.\n * @returns An instance of SaveResult.\n */\n async save(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n else {\n const topology = JSON.stringify(modelArtifacts.modelTopology);\n const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs);\n const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts);\n try {\n this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo));\n this.LS.setItem(this.keys.topology, topology);\n this.LS.setItem(this.keys.weightSpecs, weightSpecs);\n this.LS.setItem(this.keys.weightData, arrayBufferToBase64String(modelArtifacts.weightData));\n this.LS.setItem(this.keys.modelMetadata, JSON.stringify({\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n userDefinedMetadata: modelArtifacts.userDefinedMetadata\n }));\n return { modelArtifactsInfo };\n }\n catch (err) {\n // If saving failed, clean up all items saved so far.\n this.LS.removeItem(this.keys.info);\n this.LS.removeItem(this.keys.topology);\n this.LS.removeItem(this.keys.weightSpecs);\n this.LS.removeItem(this.keys.weightData);\n this.LS.removeItem(this.keys.modelMetadata);\n throw new Error(`Failed to save model '${this.modelPath}' to local storage: ` +\n `size quota being exceeded is a possible cause of this failure: ` +\n `modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` +\n `weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` +\n `weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`);\n }\n }\n }\n /**\n * Load a model from local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @returns The loaded model (if loading succeeds).\n */\n async load() {\n const info = JSON.parse(this.LS.getItem(this.keys.info));\n if (info == null) {\n throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);\n }\n if (info.modelTopologyType !== 'JSON') {\n throw new Error('BrowserLocalStorage does not support loading non-JSON model ' +\n 'topology yet.');\n }\n const out = {};\n // Load topology.\n const topology = JSON.parse(this.LS.getItem(this.keys.topology));\n if (topology == null) {\n throw new Error(`In local storage, the topology of model '${this.modelPath}' ` +\n `is missing.`);\n }\n out.modelTopology = topology;\n // Load weight specs.\n const weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs));\n if (weightSpecs == null) {\n throw new Error(`In local storage, the weight specs of model '${this.modelPath}' ` +\n `are missing.`);\n }\n out.weightSpecs = weightSpecs;\n // Load meta-data fields.\n const metadataString = this.LS.getItem(this.keys.modelMetadata);\n if (metadataString != null) {\n const metadata = JSON.parse(metadataString);\n out.format = metadata['format'];\n out.generatedBy = metadata['generatedBy'];\n out.convertedBy = metadata['convertedBy'];\n out.userDefinedMetadata = metadata['userDefinedMetadata'];\n }\n // Load weight data.\n const weightDataBase64 = this.LS.getItem(this.keys.weightData);\n if (weightDataBase64 == null) {\n throw new Error(`In local storage, the binary weight values of model ` +\n `'${this.modelPath}' are missing.`);\n }\n out.weightData = base64StringToArrayBuffer(weightDataBase64);\n return out;\n }\n}\nBrowserLocalStorage.URL_SCHEME = 'localstorage://';\nexport const localStorageRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserLocalStorage.URL_SCHEME)) {\n return browserLocalStorage(url.slice(BrowserLocalStorage.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(localStorageRouter);\nIORouterRegistry.registerLoadRouter(localStorageRouter);\n/**\n * Factory function for local storage IOHandler.\n *\n * This `IOHandler` supports both `save` and `load`.\n *\n * For each model's saved artifacts, four items are saved to local storage.\n * - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the\n * model, such as date saved, type of the topology, size in bytes, etc.\n * - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras-\n * style models, this is a stringized JSON.\n * - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the\n * model, can be used to decode the saved binary weight values (see\n * item below).\n * - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary\n * weight values, stored as a base64-encoded string.\n *\n * Saving may throw an `Error` if the total size of the artifacts exceed the\n * browser-specific quota.\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `IOHandler`, which can be used with, e.g.,\n * `tf.Model.save`.\n */\nexport function browserLocalStorage(modelPath) {\n return new BrowserLocalStorage(modelPath);\n}\nexport class BrowserLocalStorageManager {\n constructor() {\n assert(env().getBool('IS_BROWSER'), () => 'Current environment is not a web browser');\n assert(typeof window === 'undefined' ||\n typeof window.localStorage !== 'undefined', () => 'Current browser does not appear to support localStorage');\n this.LS = window.localStorage;\n }\n async listModels() {\n const out = {};\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n const suffix = PATH_SEPARATOR + INFO_SUFFIX;\n for (let i = 0; i < this.LS.length; ++i) {\n const key = this.LS.key(i);\n if (key.startsWith(prefix) && key.endsWith(suffix)) {\n const modelPath = getModelPathFromKey(key);\n out[modelPath] = JSON.parse(this.LS.getItem(key));\n }\n }\n return out;\n }\n async removeModel(path) {\n path = maybeStripScheme(path);\n const keys = getModelKeys(path);\n if (this.LS.getItem(keys.info) == null) {\n throw new Error(`Cannot find model at path '${path}'`);\n }\n const info = JSON.parse(this.LS.getItem(keys.info));\n this.LS.removeItem(keys.info);\n this.LS.removeItem(keys.topology);\n this.LS.removeItem(keys.weightSpecs);\n this.LS.removeItem(keys.weightData);\n return info;\n }\n}\n//# sourceMappingURL=local_storage.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Classes and functions for model management across multiple storage mediums.\n *\n * Supported client actions:\n * - Listing models on all registered storage mediums.\n * - Remove model by URL from any registered storage mediums, by using URL\n * string.\n * - Moving or copying model from one path to another in the same medium or from\n * one medium to another, by using URL strings.\n */\nimport { assert } from '../util';\nimport { IORouterRegistry } from './router_registry';\nconst URL_SCHEME_SUFFIX = '://';\nexport class ModelStoreManagerRegistry {\n constructor() {\n this.managers = {};\n }\n static getInstance() {\n if (ModelStoreManagerRegistry.instance == null) {\n ModelStoreManagerRegistry.instance = new ModelStoreManagerRegistry();\n }\n return ModelStoreManagerRegistry.instance;\n }\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerManager(scheme, manager) {\n assert(scheme != null, () => 'scheme must not be undefined or null.');\n if (scheme.endsWith(URL_SCHEME_SUFFIX)) {\n scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX));\n }\n assert(scheme.length > 0, () => 'scheme must not be an empty string.');\n const registry = ModelStoreManagerRegistry.getInstance();\n assert(registry.managers[scheme] == null, () => `A model store manager is already registered for scheme '${scheme}'.`);\n registry.managers[scheme] = manager;\n }\n static getManager(scheme) {\n const manager = this.getInstance().managers[scheme];\n if (manager == null) {\n throw new Error(`Cannot find model manager for scheme '${scheme}'`);\n }\n return manager;\n }\n static getSchemes() {\n return Object.keys(this.getInstance().managers);\n }\n}\n/**\n * Helper method for parsing a URL string into a scheme and a path.\n *\n * @param url E.g., 'localstorage://my-model'\n * @returns A dictionary with two fields: scheme and path.\n * Scheme: e.g., 'localstorage' in the example above.\n * Path: e.g., 'my-model' in the example above.\n */\nfunction parseURL(url) {\n if (url.indexOf(URL_SCHEME_SUFFIX) === -1) {\n throw new Error(`The url string provided does not contain a scheme. ` +\n `Supported schemes are: ` +\n `${ModelStoreManagerRegistry.getSchemes().join(',')}`);\n }\n return {\n scheme: url.split(URL_SCHEME_SUFFIX)[0],\n path: url.split(URL_SCHEME_SUFFIX)[1],\n };\n}\nasync function cloneModelInternal(sourceURL, destURL, deleteSource = false) {\n assert(sourceURL !== destURL, () => `Old path and new path are the same: '${sourceURL}'`);\n const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL);\n assert(loadHandlers.length > 0, () => `Copying failed because no load handler is found for source URL ${sourceURL}.`);\n assert(loadHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `load handlers for source URL ${sourceURL}.`);\n const loadHandler = loadHandlers[0];\n const saveHandlers = IORouterRegistry.getSaveHandlers(destURL);\n assert(saveHandlers.length > 0, () => `Copying failed because no save handler is found for destination ` +\n `URL ${destURL}.`);\n assert(saveHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `save handlers for destination URL ${destURL}.`);\n const saveHandler = saveHandlers[0];\n const sourceScheme = parseURL(sourceURL).scheme;\n const sourcePath = parseURL(sourceURL).path;\n const sameMedium = sourceScheme === parseURL(sourceURL).scheme;\n const modelArtifacts = await loadHandler.load();\n // If moving within the same storage medium, remove the old model as soon as\n // the loading is done. Without doing this, it is possible that the combined\n // size of the two models will cause the cloning to fail.\n if (deleteSource && sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n const saveResult = await saveHandler.save(modelArtifacts);\n // If moving between mediums, the deletion is done after the save succeeds.\n // This guards against the case in which saving to the destination medium\n // fails.\n if (deleteSource && !sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n return saveResult.modelArtifactsInfo;\n}\n/**\n * List all models stored in registered storage mediums.\n *\n * For a web browser environment, the registered mediums are Local Storage and\n * IndexedDB.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @returns A `Promise` of a dictionary mapping URLs of existing models to\n * their model artifacts info. URLs include medium-specific schemes, e.g.,\n * 'indexeddb://my/model/1'. Model artifacts info include type of the\n * model's topology, byte sizes of the topology, weights, etc.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function listModels() {\n const schemes = ModelStoreManagerRegistry.getSchemes();\n const out = {};\n for (const scheme of schemes) {\n const schemeOut = await ModelStoreManagerRegistry.getManager(scheme).listModels();\n for (const path in schemeOut) {\n const url = scheme + URL_SCHEME_SUFFIX + path;\n out[url] = schemeOut[path];\n }\n }\n return out;\n}\n/**\n * Remove a model specified by URL from a reigstered storage medium.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @param url A URL to a stored model, with a scheme prefix, e.g.,\n * 'localstorage://my-model-1', 'indexeddb://my/model/2'.\n * @returns ModelArtifactsInfo of the deleted model (if and only if deletion\n * is successful).\n * @throws Error if deletion fails, e.g., if no model exists at `path`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function removeModel(url) {\n const schemeAndPath = parseURL(url);\n const manager = ModelStoreManagerRegistry.getManager(schemeAndPath.scheme);\n return manager.removeModel(schemeAndPath.path);\n}\n/**\n * Copy a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Copying within a storage medium, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Copying between two storage mediums, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Copy the model, from Local Storage to IndexedDB.\n * await tf.io.copyModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove both models.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of copying.\n * @param destURL Destination URL of copying.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function copyModel(sourceURL, destURL) {\n const deleteSource = false;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\n/**\n * Move a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Moving within a storage medium, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Moving between two storage mediums, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Move the model, from Local Storage to IndexedDB.\n * await tf.io.moveModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove the moved model.\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of moving.\n * @param destURL Destination URL of moving.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function moveModel(sourceURL, destURL) {\n const deleteSource = true;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\nexport { moveModel, copyModel, removeModel, listModels };\n//# sourceMappingURL=model_management.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { BrowserIndexedDB, BrowserIndexedDBManager } from '../io/indexed_db';\nimport { BrowserLocalStorage, BrowserLocalStorageManager } from '../io/local_storage';\nimport { ModelStoreManagerRegistry } from '../io/model_management';\nexport class PlatformBrowser {\n fetch(path, init) {\n return fetch(path, init);\n }\n now() {\n return performance.now();\n }\n encode(text, encoding) {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(`Browser's encoder only supports utf-8, but got ${encoding}`);\n }\n if (this.textEncoder == null) {\n this.textEncoder = new TextEncoder();\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes, encoding) {\n return new TextDecoder(encoding).decode(bytes);\n }\n}\nif (env().get('IS_BROWSER')) {\n env().setPlatform('browser', new PlatformBrowser());\n // Register LocalStorage IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(BrowserLocalStorage.URL_SCHEME, new BrowserLocalStorageManager());\n }\n catch (err) {\n }\n // Register IndexedDB IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(BrowserIndexedDB.URL_SCHEME, new BrowserIndexedDBManager());\n }\n catch (err) {\n }\n}\n//# sourceMappingURL=platform_browser.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from '../environment';\n// We are wrapping this within an object so it can be stubbed by Jasmine.\nexport const getNodeFetch = {\n // tslint:disable-next-line:no-require-imports\n importFetch: () => require('node-fetch')\n};\nlet systemFetch;\n// These getters and setters are for testing so we don't export a mutable\n// variable.\nexport function resetSystemFetch() {\n systemFetch = null;\n}\nexport function setSystemFetch(fetchFn) {\n systemFetch = fetchFn;\n}\nexport function getSystemFetch() {\n return systemFetch;\n}\nexport class PlatformNode {\n constructor() {\n // tslint:disable-next-line:no-require-imports\n this.util = require('util');\n // According to the spec, the built-in encoder can do only UTF-8 encoding.\n // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder\n this.textEncoder = new this.util.TextEncoder();\n }\n fetch(path, requestInits) {\n if (env().global.fetch != null) {\n return env().global.fetch(path, requestInits);\n }\n if (systemFetch == null) {\n systemFetch = getNodeFetch.importFetch();\n }\n return systemFetch(path, requestInits);\n }\n now() {\n const time = process.hrtime();\n return time[0] * 1000 + time[1] / 1000000;\n }\n encode(text, encoding) {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(`Node built-in encoder only supports utf-8, but got ${encoding}`);\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes, encoding) {\n if (bytes.length === 0) {\n return '';\n }\n return new this.util.TextDecoder(encoding).decode(bytes);\n }\n}\nif (env().get('IS_NODE')) {\n env().setPlatform('node', new PlatformNode());\n}\n//# sourceMappingURL=platform_node.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { TensorBuffer } from '../tensor';\nimport * as util from '../util';\n/**\n * Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.\n *\n * The values are stored in CPU as `TypedArray`. Fill the buffer using\n * `buffer.set()`, or by modifying directly `buffer.values`.\n *\n * When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with\n * those values.\n *\n * ```js\n * // Create a buffer and set values at particular indices.\n * const buffer = tf.buffer([2, 2]);\n * buffer.set(3, 0, 0);\n * buffer.set(5, 1, 0);\n *\n * // Convert the buffer back to a tensor.\n * buffer.toTensor().print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param dtype The dtype of the buffer. Defaults to 'float32'.\n * @param values The values of the buffer as `TypedArray`. Defaults to\n * zeros.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function buffer(shape, dtype = 'float32', values) {\n dtype = dtype || 'float32';\n util.assertNonNegativeIntegerDimensions(shape);\n return new TensorBuffer(shape, dtype, values);\n}\n//# sourceMappingURL=buffer.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Cast } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Casts a `tf.Tensor` to a new dtype.\n *\n * ```js\n * const x = tf.tensor1d([1.5, 2.5, 3]);\n * tf.cast(x, 'int32').print();\n * ```\n * @param x The input tensor to be casted.\n * @param dtype The dtype to cast the input tensor to.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction cast_(x, dtype) {\n const $x = convertToTensor(x, 'x', 'cast');\n // Sanity checks.\n if (!util.isValidDtype(dtype)) {\n throw new Error(`Failed to cast to unknown dtype ${dtype}`);\n }\n if (dtype === 'string' && $x.dtype !== 'string' ||\n dtype !== 'string' && $x.dtype === 'string') {\n throw new Error('Only strings can be casted to strings');\n }\n const inputs = { x: $x };\n const attrs = { dtype };\n return ENGINE.runKernelFunc(backend => backend.cast($x, dtype), inputs, null /* grad */, Cast, attrs);\n}\nexport const cast = op({ cast_ });\n//# sourceMappingURL=cast.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Identity } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport { op } from './operation';\n/**\n * Creates a new tensor with the same values and shape as the specified\n * tensor.\n *\n * ```js\n * const x = tf.tensor([1, 2]);\n *\n * x.clone().print();\n * ```\n *\n * @param x The tensor to clone.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction clone_(x) {\n const $x = convertToTensor(x, 'x', 'clone', null);\n const forward = () => ENGINE.makeTensorFromDataId($x.dataId, $x.shape, $x.dtype);\n const inputs = { x: $x };\n // Note this op is called tf.identity in python. Hence the kernel name used\n // here.\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, Identity);\n}\nexport const clone = op({ clone_ });\n//# sourceMappingURL=clone.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Prints information about the `tf.Tensor` including its data.\n *\n * ```js\n * const verbose = true;\n * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);\n * ```\n * @param x The tensor to be printed.\n * @param verbose Whether to print verbose information about the ` Tensor`,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function print(x, verbose = false) {\n console.log(x.toString(verbose));\n}\n//# sourceMappingURL=print.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Required side effectful code for tfjs-core\n// Set up Engine and ENV\nimport { getOrMakeEngine } from './engine';\ngetOrMakeEngine();\n// Register backend-agnostic flags.\nimport './flags';\n// Register platforms\nimport './platforms/platform_browser';\nimport './platforms/platform_node';\n// Set up OpHandler\nimport { buffer } from './ops/buffer';\nimport { cast } from './ops/cast';\nimport { clone } from './ops/clone';\nimport { print } from './ops/print';\nimport { setOpHandler } from './tensor';\nconst opHandler = {\n buffer,\n cast,\n clone,\n print\n};\nsetOpHandler(opHandler);\n//# sourceMappingURL=base_side_effects.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * IOHandlers related to files, such as browser-triggered file downloads,\n * user-selected files in browser.\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { basename, concatenateArrayBuffers, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst DEFAULT_FILE_NAME_PREFIX = 'model';\nconst DEFAULT_JSON_EXTENSION_NAME = '.json';\nconst DEFAULT_WEIGHT_DATA_EXTENSION_NAME = '.weights.bin';\nfunction defer(f) {\n return new Promise(resolve => setTimeout(resolve)).then(f);\n}\nexport class BrowserDownloads {\n constructor(fileNamePrefix) {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Provide info on what IOHandlers are available under the\n // current environment.\n throw new Error('browserDownloads() cannot proceed because the current environment ' +\n 'is not a browser.');\n }\n if (fileNamePrefix.startsWith(BrowserDownloads.URL_SCHEME)) {\n fileNamePrefix = fileNamePrefix.slice(BrowserDownloads.URL_SCHEME.length);\n }\n if (fileNamePrefix == null || fileNamePrefix.length === 0) {\n fileNamePrefix = DEFAULT_FILE_NAME_PREFIX;\n }\n this.modelTopologyFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME;\n this.weightDataFileName =\n fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME;\n }\n async save(modelArtifacts) {\n if (typeof (document) === 'undefined') {\n throw new Error('Browser downloads are not supported in ' +\n 'this environment since `document` is not present');\n }\n const weightsURL = window.URL.createObjectURL(new Blob([modelArtifacts.weightData], { type: 'application/octet-stream' }));\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserDownloads.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n else {\n const weightsManifest = [{\n paths: ['./' + this.weightDataFileName],\n weights: modelArtifacts.weightSpecs\n }];\n const modelTopologyAndWeightManifest = {\n modelTopology: modelArtifacts.modelTopology,\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n weightsManifest\n };\n const modelTopologyAndWeightManifestURL = window.URL.createObjectURL(new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: 'application/json' }));\n // If anchor elements are not provided, create them without attaching them\n // to parents, so that the downloaded file names can be controlled.\n const jsonAnchor = this.jsonAnchor == null ? document.createElement('a') :\n this.jsonAnchor;\n jsonAnchor.download = this.modelTopologyFileName;\n jsonAnchor.href = modelTopologyAndWeightManifestURL;\n // Trigger downloads by evoking a click event on the download anchors.\n // When multiple downloads are started synchronously, Firefox will only\n // save the last one.\n await defer(() => jsonAnchor.dispatchEvent(new MouseEvent('click')));\n if (modelArtifacts.weightData != null) {\n const weightDataAnchor = this.weightDataAnchor == null ?\n document.createElement('a') :\n this.weightDataAnchor;\n weightDataAnchor.download = this.weightDataFileName;\n weightDataAnchor.href = weightsURL;\n await defer(() => weightDataAnchor.dispatchEvent(new MouseEvent('click')));\n }\n return { modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts) };\n }\n }\n}\nBrowserDownloads.URL_SCHEME = 'downloads://';\nclass BrowserFiles {\n constructor(files) {\n if (files == null || files.length < 1) {\n throw new Error(`When calling browserFiles, at least 1 file is required, ` +\n `but received ${files}`);\n }\n this.files = files;\n }\n async load() {\n const jsonFile = this.files[0];\n const weightFiles = this.files.slice(1);\n return new Promise((resolve, reject) => {\n const jsonReader = new FileReader();\n jsonReader.onload = (event) => {\n // tslint:disable-next-line:no-any\n const modelJSON = JSON.parse(event.target.result);\n const modelTopology = modelJSON.modelTopology;\n if (modelTopology == null) {\n reject(new Error(`modelTopology field is missing from file ${jsonFile.name}`));\n return;\n }\n if (weightFiles.length === 0) {\n resolve({ modelTopology });\n }\n const weightsManifest = modelJSON.weightsManifest;\n if (weightsManifest == null) {\n reject(new Error(`weightManifest field is missing from file ${jsonFile.name}`));\n return;\n }\n let pathToFile;\n try {\n pathToFile =\n this.checkManifestAndWeightFiles(weightsManifest, weightFiles);\n }\n catch (err) {\n reject(err);\n return;\n }\n const weightSpecs = [];\n const paths = [];\n const perFileBuffers = [];\n weightsManifest.forEach(weightsGroup => {\n weightsGroup.paths.forEach(path => {\n paths.push(path);\n perFileBuffers.push(null);\n });\n weightSpecs.push(...weightsGroup.weights);\n });\n weightsManifest.forEach(weightsGroup => {\n weightsGroup.paths.forEach(path => {\n const weightFileReader = new FileReader();\n weightFileReader.onload = (event) => {\n // tslint:disable-next-line:no-any\n const weightData = event.target.result;\n const index = paths.indexOf(path);\n perFileBuffers[index] = weightData;\n if (perFileBuffers.indexOf(null) === -1) {\n resolve({\n modelTopology,\n weightSpecs,\n weightData: concatenateArrayBuffers(perFileBuffers),\n format: modelJSON.format,\n generatedBy: modelJSON.generatedBy,\n convertedBy: modelJSON.convertedBy,\n userDefinedMetadata: modelJSON.userDefinedMetadata\n });\n }\n };\n weightFileReader.onerror = error => reject(`Failed to weights data from file of path '${path}'.`);\n weightFileReader.readAsArrayBuffer(pathToFile[path]);\n });\n });\n };\n jsonReader.onerror = error => reject(`Failed to read model topology and weights manifest JSON ` +\n `from file '${jsonFile.name}'. BrowserFiles supports loading ` +\n `Keras-style tf.Model artifacts only.`);\n jsonReader.readAsText(jsonFile);\n });\n }\n /**\n * Check the compatibility between weights manifest and weight files.\n */\n checkManifestAndWeightFiles(manifest, files) {\n const basenames = [];\n const fileNames = files.map(file => basename(file.name));\n const pathToFile = {};\n for (const group of manifest) {\n group.paths.forEach(path => {\n const pathBasename = basename(path);\n if (basenames.indexOf(pathBasename) !== -1) {\n throw new Error(`Duplicate file basename found in weights manifest: ` +\n `'${pathBasename}'`);\n }\n basenames.push(pathBasename);\n if (fileNames.indexOf(pathBasename) === -1) {\n throw new Error(`Weight file with basename '${pathBasename}' is not provided.`);\n }\n else {\n pathToFile[path] = files[fileNames.indexOf(pathBasename)];\n }\n });\n }\n if (basenames.length !== files.length) {\n throw new Error(`Mismatch in the number of files in weights manifest ` +\n `(${basenames.length}) and the number of weight files provided ` +\n `(${files.length}).`);\n }\n return pathToFile;\n }\n}\nexport const browserDownloadsRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserDownloads.URL_SCHEME)) {\n return browserDownloads(url.slice(BrowserDownloads.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(browserDownloadsRouter);\n/**\n * Creates an IOHandler that triggers file downloads from the browser.\n *\n * The returned `IOHandler` instance can be used as model exporting methods such\n * as `tf.Model.save` and supports only saving.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * const saveResult = await model.save('downloads://mymodel');\n * // This will trigger downloading of two files:\n * // 'mymodel.json' and 'mymodel.weights.bin'.\n * console.log(saveResult);\n * ```\n *\n * @param fileNamePrefix Prefix name of the files to be downloaded. For use with\n * `tf.Model`, `fileNamePrefix` should follow either of the following two\n * formats:\n * 1. `null` or `undefined`, in which case the default file\n * names will be used:\n * - 'model.json' for the JSON file containing the model topology and\n * weights manifest.\n * - 'model.weights.bin' for the binary file containing the binary weight\n * values.\n * 2. A single string or an Array of a single string, as the file name prefix.\n * For example, if `'foo'` is provided, the downloaded JSON\n * file and binary weights file will be named 'foo.json' and\n * 'foo.weights.bin', respectively.\n * @param config Additional configuration for triggering downloads.\n * @returns An instance of `BrowserDownloads` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserDownloads(fileNamePrefix = 'model') {\n return new BrowserDownloads(fileNamePrefix);\n}\n/**\n * Creates an IOHandler that loads model artifacts from user-selected files.\n *\n * This method can be used for loading from files such as user-selected files\n * in the browser.\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * // Note: This code snippet won't run properly without the actual file input\n * // elements in the HTML DOM.\n *\n * // Suppose there are two HTML file input (``)\n * // elements.\n * const uploadJSONInput = document.getElementById('upload-json');\n * const uploadWeightsInput = document.getElementById('upload-weights');\n * const model = await tf.loadLayersModel(tf.io.browserFiles(\n * [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));\n * ```\n *\n * @param files `File`s to load from. Currently, this function supports only\n * loading from files that contain Keras-style models (i.e., `tf.Model`s), for\n * which an `Array` of `File`s is expected (in that order):\n * - A JSON file containing the model topology and weight manifest.\n * - Optionally, One or more binary files containing the binary weights.\n * These files must have names that match the paths in the `weightsManifest`\n * contained by the aforementioned JSON file, or errors will be thrown\n * during loading. These weights files have the same format as the ones\n * generated by `tensorflowjs_converter` that comes with the `tensorflowjs`\n * Python PIP package. If no weights files are provided, only the model\n * topology will be loaded from the JSON file above.\n * @returns An instance of `Files` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserFiles(files) {\n return new BrowserFiles(files);\n}\n//# sourceMappingURL=browser_files.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { assert } from '../util';\n/**\n * Monitor Promise.all progress, fire onProgress callback function.\n *\n * @param promises Promise list going to be monitored\n * @param onProgress Callback function. Fired when a promise resolved.\n * @param startFraction Optional fraction start. Default to 0.\n * @param endFraction Optional fraction end. Default to 1.\n */\nexport function monitorPromisesProgress(promises, onProgress, startFraction, endFraction) {\n checkPromises(promises);\n startFraction = startFraction == null ? 0 : startFraction;\n endFraction = endFraction == null ? 1 : endFraction;\n checkFraction(startFraction, endFraction);\n let resolvedPromise = 0;\n const registerMonitor = (promise) => {\n promise.then(value => {\n const fraction = startFraction +\n ++resolvedPromise / promises.length * (endFraction - startFraction);\n // pass fraction as parameter to callback function.\n onProgress(fraction);\n return value;\n });\n return promise;\n };\n function checkPromises(promises) {\n assert(promises != null && Array.isArray(promises) && promises.length > 0, () => 'promises must be a none empty array');\n }\n function checkFraction(startFraction, endFraction) {\n assert(startFraction >= 0 && startFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` +\n `got startFraction ${startFraction}`);\n assert(endFraction >= 0 && endFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` +\n `got endFraction ${endFraction}`);\n assert(endFraction >= startFraction, () => `startFraction must be no more than endFraction, but ` +\n `got startFraction ${startFraction} and endFraction ` +\n `${endFraction}`);\n }\n return Promise.all(promises.map(registerMonitor));\n}\n//# sourceMappingURL=progress.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from '../environment';\nimport * as util from '../util';\nimport { decodeWeights } from './io_utils';\nimport { monitorPromisesProgress } from './progress';\nimport { DTYPE_VALUE_SIZE_MAP } from './types';\n/**\n * Reads binary weights data from a number of URLs.\n *\n * @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls.\n * @param requestOptions RequestInit (options) for the HTTP requests.\n * @param fetchFunc Optional overriding value for the `window.fetch` function.\n * @param onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same\n * length as `fetchURLs`.\n */\nexport async function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) {\n if (loadOptions == null) {\n loadOptions = {};\n }\n const fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch :\n loadOptions.fetchFunc;\n // Create the requests for all of the weights in parallel.\n const requests = fetchURLs.map(fetchURL => fetchFunc(fetchURL, loadOptions.requestInit, { isBinary: true }));\n const fetchStartFraction = 0;\n const fetchEndFraction = 0.5;\n const responses = loadOptions.onProgress == null ?\n await Promise.all(requests) :\n await monitorPromisesProgress(requests, loadOptions.onProgress, fetchStartFraction, fetchEndFraction);\n const bufferPromises = responses.map(response => response.arrayBuffer());\n const bufferStartFraction = 0.5;\n const bufferEndFraction = 1;\n const buffers = loadOptions.onProgress == null ?\n await Promise.all(bufferPromises) :\n await monitorPromisesProgress(bufferPromises, loadOptions.onProgress, bufferStartFraction, bufferEndFraction);\n return buffers;\n}\n/**\n * Reads a weights manifest JSON configuration, fetches the weights and\n * returns them as `Tensor`s.\n *\n * @param manifest The weights manifest JSON.\n * @param filePathPrefix The path prefix for filenames given in the manifest.\n * Defaults to the empty string.\n * @param weightNames The names of the weights to be fetched.\n */\nexport async function loadWeights(manifest, filePathPrefix = '', weightNames, requestInit) {\n // TODO(nsthorat): Groups are currently fetched atomically. If you need a\n // single weight from a group, the whole group will be fetched. At a future\n // date, we should support fetching only the individual shards within a\n // group that are needed to reconstruct the requested weight.\n // TODO(cais): Use `decodeWeights` for implementation.\n const fetchWeights = (fetchUrls) => loadWeightsAsArrayBuffer(fetchUrls, { requestInit });\n const loadWeights = weightsLoaderFactory(fetchWeights);\n return loadWeights(manifest, filePathPrefix, weightNames);\n}\n/**\n * Creates a function, which reads a weights manifest JSON configuration,\n * fetches the weight files using the specified function and returns them as\n * `Tensor`s.\n *\n * ```js\n * // example for creating a nodejs weight loader, which reads the weight files\n * // from disk using fs.readFileSync\n *\n * import * as fs from 'fs'\n *\n * const fetchWeightsFromDisk = (filePaths: string[]) =>\n * filePaths.map(filePath => fs.readFileSync(filePath).buffer)\n *\n * const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)\n *\n * const manifest = JSON.parse(\n * fs.readFileSync('./my_model-weights_manifest').toString()\n * )\n * const weightMap = await loadWeights(manifest, './')\n * ```\n * @param fetchWeightsFunction The function used for fetching the weight files.\n * @returns Weight loading function.\n */\nexport function weightsLoaderFactory(fetchWeightsFunction) {\n return async (manifest, filePathPrefix = '', weightNames) => {\n // Collect all the groups, weights, and their relative offsets to be\n // fetched.\n const groupIndicesToFetchMap = manifest.map(() => false);\n const groupWeightsToFetch = {};\n const weightsFound = weightNames != null ? weightNames.map(() => false) : [];\n const allManifestWeightNames = [];\n manifest.forEach((manifestGroupConfig, groupIndex) => {\n let groupOffset = 0;\n manifestGroupConfig.weights.forEach(weightsEntry => {\n const rawDtype = ('quantization' in weightsEntry) ?\n weightsEntry.quantization.dtype :\n weightsEntry.dtype;\n const weightsBytes = DTYPE_VALUE_SIZE_MAP[rawDtype] *\n util.sizeFromShape(weightsEntry.shape);\n const enqueueWeightsForFetchingFn = () => {\n groupIndicesToFetchMap[groupIndex] = true;\n if (groupWeightsToFetch[groupIndex] == null) {\n groupWeightsToFetch[groupIndex] = [];\n }\n groupWeightsToFetch[groupIndex].push({\n manifestEntry: weightsEntry,\n groupOffset,\n sizeBytes: weightsBytes\n });\n };\n if (weightNames != null) {\n weightNames.forEach((weightName, weightIndex) => {\n if (weightName === weightsEntry.name) {\n enqueueWeightsForFetchingFn();\n weightsFound[weightIndex] = true;\n }\n });\n }\n else {\n enqueueWeightsForFetchingFn();\n }\n allManifestWeightNames.push(weightsEntry.name);\n groupOffset += weightsBytes;\n });\n });\n if (!weightsFound.every(found => found)) {\n const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]);\n throw new Error(`Could not find weights in manifest with names: ` +\n `${weightsNotFound.join(', ')}. \\n` +\n `Manifest JSON has weights with names: ` +\n `${allManifestWeightNames.join(', ')}.`);\n }\n // Convert the one-hot boolean groupId => shouldFetch map to a list of group\n // IDs.\n const groupIndicesToFetch = groupIndicesToFetchMap.reduce((accumulator, shouldFetch, i) => {\n if (shouldFetch) {\n accumulator.push(i);\n }\n return accumulator;\n }, []);\n const fetchUrls = [];\n groupIndicesToFetch.forEach(i => {\n manifest[i].paths.forEach(filepath => {\n const fetchUrl = filePathPrefix +\n (!filePathPrefix.endsWith('/') ? '/' : '') + filepath;\n fetchUrls.push(fetchUrl);\n });\n });\n const buffers = await fetchWeightsFunction(fetchUrls);\n const weightsTensorMap = {};\n let bufferIndexOffset = 0;\n groupIndicesToFetch.forEach(i => {\n const numBuffers = manifest[i].paths.length;\n let groupBytes = 0;\n for (let i = 0; i < numBuffers; i++) {\n groupBytes += buffers[bufferIndexOffset + i].byteLength;\n }\n // Create a buffer for the whole group.\n const groupBuffer = new ArrayBuffer(groupBytes);\n const groupByteBuffer = new Uint8Array(groupBuffer);\n let groupBufferOffset = 0;\n for (let i = 0; i < numBuffers; i++) {\n const buffer = new Uint8Array(buffers[bufferIndexOffset + i]);\n groupByteBuffer.set(buffer, groupBufferOffset);\n groupBufferOffset += buffer.byteLength;\n }\n const weightsEntries = groupWeightsToFetch[i];\n weightsEntries.forEach(weightsEntry => {\n const byteBuffer = groupBuffer.slice(weightsEntry.groupOffset, weightsEntry.groupOffset + weightsEntry.sizeBytes);\n const nameToTensorMap = decodeWeights(byteBuffer, [weightsEntry.manifestEntry]);\n for (const name in nameToTensorMap) {\n weightsTensorMap[name] = nameToTensorMap[name];\n }\n });\n bufferIndexOffset += numBuffers;\n });\n return weightsTensorMap;\n };\n}\n//# sourceMappingURL=weights_loader.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * IOHandler implementations based on HTTP requests in the web browser.\n *\n * Uses [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n */\nimport { env } from '../environment';\nimport { assert } from '../util';\nimport { concatenateArrayBuffers, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nimport { loadWeightsAsArrayBuffer } from './weights_loader';\nconst OCTET_STREAM_MIME_TYPE = 'application/octet-stream';\nconst JSON_TYPE = 'application/json';\nexport class HTTPRequest {\n constructor(path, loadOptions) {\n this.DEFAULT_METHOD = 'POST';\n if (loadOptions == null) {\n loadOptions = {};\n }\n this.weightPathPrefix = loadOptions.weightPathPrefix;\n this.onProgress = loadOptions.onProgress;\n this.weightUrlConverter = loadOptions.weightUrlConverter;\n if (loadOptions.fetchFunc != null) {\n assert(typeof loadOptions.fetchFunc === 'function', () => 'Must pass a function that matches the signature of ' +\n '`fetch` (see ' +\n 'https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)');\n this.fetch = loadOptions.fetchFunc;\n }\n else {\n this.fetch = env().platform.fetch;\n }\n assert(path != null && path.length > 0, () => 'URL path for http must not be null, undefined or ' +\n 'empty.');\n if (Array.isArray(path)) {\n assert(path.length === 2, () => 'URL paths for http must have a length of 2, ' +\n `(actual length is ${path.length}).`);\n }\n this.path = path;\n if (loadOptions.requestInit != null &&\n loadOptions.requestInit.body != null) {\n throw new Error('requestInit is expected to have no pre-existing body, but has one.');\n }\n this.requestInit = loadOptions.requestInit || {};\n }\n async save(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserHTTPRequest.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n const init = Object.assign({ method: this.DEFAULT_METHOD }, this.requestInit);\n init.body = new FormData();\n const weightsManifest = [{\n paths: ['./model.weights.bin'],\n weights: modelArtifacts.weightSpecs,\n }];\n const modelTopologyAndWeightManifest = {\n modelTopology: modelArtifacts.modelTopology,\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n userDefinedMetadata: modelArtifacts.userDefinedMetadata,\n weightsManifest\n };\n init.body.append('model.json', new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: JSON_TYPE }), 'model.json');\n if (modelArtifacts.weightData != null) {\n init.body.append('model.weights.bin', new Blob([modelArtifacts.weightData], { type: OCTET_STREAM_MIME_TYPE }), 'model.weights.bin');\n }\n const response = await this.fetch(this.path, init);\n if (response.ok) {\n return {\n modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts),\n responses: [response],\n };\n }\n else {\n throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ` +\n `${response.status}.`);\n }\n }\n /**\n * Load model artifacts via HTTP request(s).\n *\n * See the documentation to `tf.io.http` for details on the saved\n * artifacts.\n *\n * @returns The loaded model artifacts (if loading succeeds).\n */\n async load() {\n const modelConfigRequest = await this.fetch(this.path, this.requestInit);\n if (!modelConfigRequest.ok) {\n throw new Error(`Request to ${this.path} failed with status code ` +\n `${modelConfigRequest.status}. Please verify this URL points to ` +\n `the model JSON of the model to load.`);\n }\n let modelConfig;\n try {\n modelConfig = await modelConfigRequest.json();\n }\n catch (e) {\n let message = `Failed to parse model JSON of response from ${this.path}.`;\n // TODO(nsthorat): Remove this after some time when we're comfortable that\n // .pb files are mostly gone.\n if (this.path.endsWith('.pb')) {\n message += ' Your path contains a .pb file extension. ' +\n 'Support for .pb models have been removed in TensorFlow.js 1.0 ' +\n 'in favor of .json models. You can re-convert your Python ' +\n 'TensorFlow model using the TensorFlow.js 1.0 conversion scripts ' +\n 'or you can convert your.pb models with the \\'pb2json\\'' +\n 'NPM script in the tensorflow/tfjs-converter repository.';\n }\n else {\n message += ' Please make sure the server is serving valid ' +\n 'JSON for this request.';\n }\n throw new Error(message);\n }\n const modelTopology = modelConfig.modelTopology;\n const weightsManifest = modelConfig.weightsManifest;\n const generatedBy = modelConfig.generatedBy;\n const convertedBy = modelConfig.convertedBy;\n const format = modelConfig.format;\n const userDefinedMetadata = modelConfig.userDefinedMetadata;\n // We do not allow both modelTopology and weightsManifest to be missing.\n if (modelTopology == null && weightsManifest == null) {\n throw new Error(`The JSON from HTTP path ${this.path} contains neither model ` +\n `topology or manifest for weights.`);\n }\n let weightSpecs;\n let weightData;\n if (weightsManifest != null) {\n const results = await this.loadWeights(weightsManifest);\n [weightSpecs, weightData] = results;\n }\n const artifacts = {\n modelTopology,\n weightSpecs,\n weightData,\n userDefinedMetadata,\n generatedBy,\n convertedBy,\n format\n };\n const initializer = modelConfig.modelInitializer;\n if (initializer) {\n artifacts.modelInitializer = initializer;\n }\n return artifacts;\n }\n async loadWeights(weightsManifest) {\n const weightPath = Array.isArray(this.path) ? this.path[1] : this.path;\n const [prefix, suffix] = parseUrl(weightPath);\n const pathPrefix = this.weightPathPrefix || prefix;\n const weightSpecs = [];\n for (const entry of weightsManifest) {\n weightSpecs.push(...entry.weights);\n }\n const fetchURLs = [];\n const urlPromises = [];\n for (const weightsGroup of weightsManifest) {\n for (const path of weightsGroup.paths) {\n if (this.weightUrlConverter != null) {\n urlPromises.push(this.weightUrlConverter(path));\n }\n else {\n fetchURLs.push(pathPrefix + path + suffix);\n }\n }\n }\n if (this.weightUrlConverter) {\n fetchURLs.push(...await Promise.all(urlPromises));\n }\n const buffers = await loadWeightsAsArrayBuffer(fetchURLs, {\n requestInit: this.requestInit,\n fetchFunc: this.fetch,\n onProgress: this.onProgress\n });\n return [weightSpecs, concatenateArrayBuffers(buffers)];\n }\n}\nHTTPRequest.URL_SCHEME_REGEX = /^https?:\\/\\//;\n/**\n * Extract the prefix and suffix of the url, where the prefix is the path before\n * the last file, and suffix is the search params after the last file.\n * ```\n * const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file'\n * [prefix, suffix] = parseUrl(url)\n * // prefix = 'http://tfhub.dev/model/1/'\n * // suffix = '?tfjs-format=file'\n * ```\n * @param url the model url to be parsed.\n */\nexport function parseUrl(url) {\n const lastSlash = url.lastIndexOf('/');\n const lastSearchParam = url.lastIndexOf('?');\n const prefix = url.substring(0, lastSlash);\n const suffix = lastSearchParam > lastSlash ? url.substring(lastSearchParam) : '';\n return [prefix + '/', suffix];\n}\nexport function isHTTPScheme(url) {\n return url.match(HTTPRequest.URL_SCHEME_REGEX) != null;\n}\nexport const httpRouter = (url, loadOptions) => {\n if (typeof fetch === 'undefined' &&\n (loadOptions == null || loadOptions.fetchFunc == null)) {\n // `http` uses `fetch` or `node-fetch`, if one wants to use it in\n // an environment that is not the browser or node they have to setup a\n // global fetch polyfill.\n return null;\n }\n else {\n let isHTTP = true;\n if (Array.isArray(url)) {\n isHTTP = url.every(urlItem => isHTTPScheme(urlItem));\n }\n else {\n isHTTP = isHTTPScheme(url);\n }\n if (isHTTP) {\n return http(url, loadOptions);\n }\n }\n return null;\n};\nIORouterRegistry.registerSaveRouter(httpRouter);\nIORouterRegistry.registerLoadRouter(httpRouter);\n/**\n * Creates an IOHandler subtype that sends model artifacts to HTTP server.\n *\n * An HTTP request of the `multipart/form-data` mime type will be sent to the\n * `path` URL. The form data includes artifacts that represent the topology\n * and/or weights of the model. In the case of Keras-style `tf.Model`, two\n * blobs (files) exist in form-data:\n * - A JSON file consisting of `modelTopology` and `weightsManifest`.\n * - A binary weights file consisting of the concatenated weight values.\n * These files are in the same format as the one generated by\n * [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).\n *\n * The following code snippet exemplifies the client-side code that uses this\n * function:\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save(tf.io.http(\n * 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));\n * console.log(saveResult);\n * ```\n *\n * If the default `POST` method is to be used, without any custom parameters\n * such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`:\n *\n * ```js\n * const saveResult = await model.save('http://model-server:5000/upload');\n * ```\n *\n * The following GitHub Gist\n * https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864\n * implements a server based on [flask](https://github.com/pallets/flask) that\n * can receive the request. Upon receiving the model artifacts via the requst,\n * this particular server reconsistutes instances of [Keras\n * Models](https://keras.io/models/model/) in memory.\n *\n *\n * @param path A URL path to the model.\n * Can be an absolute HTTP path (e.g.,\n * 'http://localhost:8000/model-upload)') or a relative path (e.g.,\n * './model-upload').\n * @param requestInit Request configurations to be used when sending\n * HTTP request to server using `fetch`. It can contain fields such as\n * `method`, `credentials`, `headers`, `mode`, etc. See\n * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request\n * for more information. `requestInit` must not have a body, because the\n * body will be set by TensorFlow.js. File blobs representing the model\n * topology (filename: 'model.json') and the weights of the model (filename:\n * 'model.weights.bin') will be appended to the body. If `requestInit` has a\n * `body`, an Error will be thrown.\n * @param loadOptions Optional configuration for the loading. It includes the\n * following fields:\n * - weightPathPrefix Optional, this specifies the path prefix for weight\n * files, by default this is calculated from the path param.\n * - fetchFunc Optional, custom `fetch` function. E.g., in Node.js,\n * the `fetch` from node-fetch can be used here.\n * - onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns An instance of `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function http(path, loadOptions) {\n return new HTTPRequest(path, loadOptions);\n}\n/**\n * Deprecated. Use `tf.io.http`.\n * @param path\n * @param loadOptions\n */\nexport function browserHTTPRequest(path, loadOptions) {\n return http(path, loadOptions);\n}\n//# sourceMappingURL=http.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nclass PassthroughLoader {\n constructor(modelArtifacts) {\n this.modelArtifacts = modelArtifacts;\n }\n async load() {\n return this.modelArtifacts;\n }\n}\nclass PassthroughSaver {\n constructor(saveHandler) {\n this.saveHandler = saveHandler;\n }\n async save(modelArtifacts) {\n return this.saveHandler(modelArtifacts);\n }\n}\n/**\n * Creates an IOHandler that loads model artifacts from memory.\n *\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * const model = await tf.loadLayersModel(tf.io.fromMemory(\n * modelTopology, weightSpecs, weightData));\n * ```\n *\n * @param modelArtifacts a object containing model topology (i.e., parsed from\n * the JSON format).\n * @param weightSpecs An array of `WeightsManifestEntry` objects describing the\n * names, shapes, types, and quantization of the weight data.\n * @param weightData A single `ArrayBuffer` containing the weight data,\n * concatenated in the order described by the weightSpecs.\n * @param trainingConfig Model training configuration. Optional.\n *\n * @returns A passthrough `IOHandler` that simply loads the provided data.\n */\nexport function fromMemory(modelArtifacts, weightSpecs, weightData, trainingConfig) {\n if (arguments.length === 1) {\n const isModelArtifacts = modelArtifacts.modelTopology != null ||\n modelArtifacts.weightSpecs != null;\n if (isModelArtifacts) {\n return new PassthroughLoader(modelArtifacts);\n }\n else {\n // Legacy support: with only modelTopology.\n // TODO(cais): Remove this deprecated API.\n console.warn('Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({ modelTopology: modelArtifacts });\n }\n }\n else {\n // Legacy support.\n // TODO(cais): Remove this deprecated API.\n console.warn('Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({\n modelTopology: modelArtifacts,\n weightSpecs,\n weightData,\n trainingConfig\n });\n }\n}\n/**\n * Creates an IOHandler that passes saved model artifacts to a callback.\n *\n * ```js\n * function handleSave(artifacts) {\n * // ... do something with the artifacts ...\n * return {modelArtifactsInfo: {...}, ...};\n * }\n *\n * const saveResult = model.save(tf.io.withSaveHandler(handleSave));\n * ```\n *\n * @param saveHandler A function that accepts a `ModelArtifacts` and returns a\n * `SaveResult`.\n */\nexport function withSaveHandler(saveHandler) {\n return new PassthroughSaver(saveHandler);\n}\n//# sourceMappingURL=passthrough.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Importing local_storage and indexed_db is necessary for the routers to be\n// registered.\nimport './indexed_db';\nimport './local_storage';\nimport { browserFiles } from './browser_files';\nimport { browserHTTPRequest, http, isHTTPScheme } from './http';\nimport { concatenateArrayBuffers, decodeWeights, encodeWeights, getModelArtifactsInfoForJSON } from './io_utils';\nimport { fromMemory, withSaveHandler } from './passthrough';\nimport { getLoadHandlers, getSaveHandlers, registerLoadRouter, registerSaveRouter } from './router_registry';\nimport { loadWeights, weightsLoaderFactory } from './weights_loader';\nexport { copyModel, listModels, moveModel, removeModel } from './model_management';\nexport { browserFiles, browserHTTPRequest, concatenateArrayBuffers, decodeWeights, encodeWeights, fromMemory, getLoadHandlers, getModelArtifactsInfoForJSON, getSaveHandlers, http, isHTTPScheme, loadWeights, registerLoadRouter, registerSaveRouter, weightsLoaderFactory, withSaveHandler };\n//# sourceMappingURL=io.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Reshape } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Reshapes a `tf.Tensor` to a given shape.\n *\n * Given an input tensor, returns a new tensor with the same values as the\n * input tensor with shape `shape`.\n *\n * If one component of shape is the special value -1, the size of that\n * dimension is computed so that the total size remains constant. In\n * particular, a shape of [-1] flattens into 1-D. At most one component of\n * shape can be -1.\n *\n * If shape is 1-D or higher, then the operation returns a tensor with shape\n * shape filled with the values of tensor. In this case, the number of\n * elements implied by shape must be the same as the number of elements in\n * tensor.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n * x.reshape([2, 2]).print();\n * ```\n *\n * @param x The input tensor to be reshaped.\n * @param shape An array of integers defining the output tensor shape.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction reshape_(x, shape) {\n const $x = convertToTensor(x, 'x', 'reshape', null);\n const inputs = { x: $x };\n const attrs = { shape };\n const forward = (backend, save) => {\n shape = util.inferFromImplicitShape(shape, $x.size);\n util.assert($x.size === util.sizeFromShape(shape), () => 'new shape and old shape must have the same number of elements.');\n save([$x]);\n return backend.reshape($x, shape);\n };\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, Reshape, attrs);\n}\nexport const reshape = op({ reshape_ });\n//# sourceMappingURL=reshape.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { BatchMatMul } from '../kernel_names';\nimport { makeTypesMatch } from '../tensor_util';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\nimport { reshape } from './reshape';\n/**\n * Computes the dot product of two matrices, A * B. These must be matrices.\n *\n * ```js\n * const a = tf.tensor2d([1, 2], [1, 2]);\n * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * a.matMul(b).print(); // or tf.matMul(a, b)\n * ```\n * @param a First matrix in dot product operation.\n * @param b Second matrix in dot product operation.\n * @param transposeA If true, `a` is transposed before multiplication.\n * @param transposeB If true, `b` is transposed before multiplication.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction matMul_(a, b, transposeA = false, transposeB = false) {\n let $a = convertToTensor(a, 'a', 'matMul');\n let $b = convertToTensor(b, 'b', 'matMul');\n [$a, $b] = makeTypesMatch($a, $b);\n util.assert($a.rank >= 2 && $b.rank >= 2 && $a.rank === $b.rank, () => `Error in matMul: inputs must have the same rank of at least 2, ` +\n `got ranks ${$a.rank} and ${$b.rank}.`);\n const innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1];\n const innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2];\n const outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2];\n const outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1];\n const outerDimsA = $a.shape.slice(0, -2);\n const outerDimsB = $b.shape.slice(0, -2);\n const batchDimA = util.sizeFromShape(outerDimsA);\n const batchDimB = util.sizeFromShape(outerDimsB);\n util.assert(util.arraysEqual(outerDimsA, outerDimsB), () => `Error in matMul: outer dimensions (${outerDimsA}) and (` +\n `${outerDimsB}) of Tensors with shapes ${$a.shape} and ` +\n `${$b.shape} must match.`);\n util.assert(innerShapeA === innerShapeB, () => `Error in matMul: inner shapes (${innerShapeA}) and (` +\n `${innerShapeB}) of Tensors with shapes ${$a.shape} and ` +\n `${$b.shape} and transposeA=${transposeA}` +\n ` and transposeB=${transposeB} must match.`);\n const outShape = $a.shape.slice(0, -2).concat([outerShapeA, outerShapeB]);\n const a3D = transposeA ? reshape($a, [batchDimA, innerShapeA, outerShapeA]) :\n reshape($a, [batchDimA, outerShapeA, innerShapeA]);\n const b3D = transposeB ? reshape($b, [batchDimB, outerShapeB, innerShapeB]) :\n reshape($b, [batchDimB, innerShapeB, outerShapeB]);\n const forward = (backend, save) => {\n save([a3D, b3D]);\n return backend.batchMatMul(a3D, b3D, transposeA, transposeB);\n };\n const inputs = { a: a3D, b: b3D };\n const attrs = { transposeA, transposeB };\n const res = ENGINE.runKernelFunc(forward, inputs, null /* grad */, BatchMatMul, attrs);\n return reshape(res, outShape);\n}\nexport const matMul = op({ matMul_ });\n//# sourceMappingURL=mat_mul.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { OneHot } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport { op } from './operation';\nimport { reshape } from './reshape';\n/**\n * Creates a one-hot `tf.Tensor`. The locations represented by `indices` take\n * value `onValue` (defaults to 1), while all other locations take value\n * `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank\n * `R+1` with the last axis of size `depth`.\n *\n * ```js\n * tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print();\n * ```\n *\n * @param indices `tf.Tensor` of indices with dtype `int32`.\n * @param depth The depth of the one hot dimension.\n * @param onValue A number used to fill in the output when the index matches\n * the location.\n * @param offValue A number used to fill in the output when the index does\n * not match the location.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction oneHot_(indices, depth, onValue = 1, offValue = 0) {\n if (depth < 2) {\n throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`);\n }\n const $indices = convertToTensor(indices, 'indices', 'oneHot', 'int32');\n const outShape = [...$indices.shape, depth];\n const forward = (backend, save) => {\n save([$indices]);\n return reshape(backend.oneHot(reshape($indices, [$indices.size]), depth, onValue, offValue), outShape);\n };\n const inputs = { indices: $indices };\n const attrs = { depth, onValue, offValue };\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, OneHot, attrs);\n}\nexport const oneHot = op({ oneHot_ });\n//# sourceMappingURL=one_hot.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Transpose } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`.\n *\n * The returned `tf.Tensor`'s dimension `i` will correspond to the input\n * dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`,\n * where `n` is the rank of the input `tf.Tensor`. Hence by default, this\n * operation performs a regular matrix transpose on 2-D input `tf.Tensor`s.\n *\n * ```js\n * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);\n *\n * a.transpose().print(); // or tf.transpose(a)\n * ```\n *\n * @param x The tensor to transpose.\n * @param perm The permutation of the dimensions of a.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction transpose_(x, perm) {\n const $x = convertToTensor(x, 'x', 'transpose');\n if (perm == null) {\n perm = $x.shape.map((s, i) => i).reverse();\n }\n util.assert($x.rank === perm.length, () => `Error in transpose: rank of input ${$x.rank} ` +\n `must match length of perm ${perm}.`);\n perm.forEach(axis => {\n util.assert(axis >= 0 && axis < $x.rank, () => `All entries in 'perm' must be between 0 and ${$x.rank - 1}` +\n ` but got ${perm}`);\n });\n if ($x.rank <= 1) {\n return $x.clone();\n }\n const inputs = { x: $x };\n const attrs = { perm };\n return ENGINE.runKernelFunc(backend => backend.transpose($x, perm), inputs, null /* gradient */, Transpose, attrs);\n}\nexport const transpose = op({ transpose_ });\n//# sourceMappingURL=transpose.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { cast } from './cast';\nimport { matMul } from './mat_mul';\nimport { oneHot } from './one_hot';\nimport { op } from './operation';\nimport { transpose } from './transpose';\n/**\n * Computes the confusion matrix from true labels and predicted labels.\n *\n * ```js\n * const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32');\n * const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32');\n * const numClasses = 3;\n * const out = tf.math.confusionMatrix(labels, predictions, numClasses);\n * out.print();\n * // Expected output matrix:\n * // [[2, 0, 0],\n * // [0, 1, 1],\n * // [0, 0, 1]]\n * ```\n *\n * @param labels The target labels, assumed to be 0-based integers\n * for the classes. The shape is `[numExamples]`, where\n * `numExamples` is the number of examples included.\n * @param predictions The predicted classes, assumed to be\n * 0-based integers for the classes. Must have the same shape as `labels`.\n * @param numClasses Number of all classes, as an integer.\n * Its value must be larger than the largest element in `labels` and\n * `predictions`.\n * @returns The confusion matrix as a int32-type 2D tensor. The value at\n * row `r` and column `c` is the number of times examples of actual class\n * `r` were predicted as class `c`.\n *\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nexport function confusionMatrix_(labels, predictions, numClasses) {\n const $labels = convertToTensor(labels, 'labels', 'confusionMatrix');\n const $predictions = convertToTensor(predictions, 'predictions', 'confusionMatrix');\n util.assert(numClasses == null || numClasses > 0 && Number.isInteger(numClasses), () => `If provided, numClasses must be a positive integer, ` +\n `but got ${numClasses}`);\n util.assert($labels.rank === 1, () => `Expected the rank of labels to be 1, but got ${$labels.rank}`);\n util.assert($predictions.rank === 1, () => `Expected the rank of predictions to be 1, ` +\n `but got ${$predictions.rank}`);\n util.assert($labels.shape[0] === $predictions.shape[0], () => `Mismatch in the number of examples: ` +\n `${$labels.shape[0]} vs. ${$predictions.shape[0]}. ` +\n `Labels and predictions should have the same number of elements.`);\n util.assert(numClasses > 0 && Number.isInteger(numClasses), () => `numClasses is required to be a positive integer, but got ` +\n `${numClasses}`);\n // TODO(cais): In the future, if oneHot supports tensors inputs for\n // `numClasses`, `confusionMatrix` can make `numClasses` optional.\n const oneHotLabels = oneHot(cast($labels, 'int32'), numClasses);\n const oneHotPredictions = oneHot(cast($predictions, 'int32'), numClasses);\n const oneHotLabelsT = transpose(oneHotLabels);\n return cast(matMul(oneHotLabelsT, oneHotPredictions), 'int32');\n}\nexport const confusionMatrix = op({ confusionMatrix_ });\n//# sourceMappingURL=confusion_matrix.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Exports under the tf.math.* namespace.\n */\nimport { confusionMatrix } from './ops/confusion_matrix';\nexport { confusionMatrix };\n//# sourceMappingURL=math.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { inferShape } from '../tensor_util_env';\nimport { assertNonNull } from '../util';\nimport { makeTensor } from './tensor_ops_util';\n/**\n * Creates rank-3 `tf.Tensor` with the provided values, shape and dtype.\n *\n * The same functionality can be achieved with `tf.tensor`, but in general\n * we recommend using `tf.tensor3d` as it makes the code more readable.\n *\n * ```js\n * // Pass a nested array.\n * tf.tensor3d([[[1], [2]], [[3], [4]]]).print();\n * ```\n * ```js\n * // Pass a flat array and specify a shape.\n * tf.tensor3d([1, 2, 3, 4], [2, 2, 1]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`.\n * @param shape The shape of the tensor. If not provided, it is inferred from\n * `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor3d(values, shape, dtype) {\n assertNonNull(values);\n if (shape != null && shape.length !== 3) {\n throw new Error('tensor3d() requires shape to have three numbers');\n }\n const inferredShape = inferShape(values, dtype);\n if (inferredShape.length !== 3 && inferredShape.length !== 1) {\n throw new Error('tensor3d() requires values to be number[][][] or flat/TypedArray');\n }\n if (inferredShape.length === 1 && shape == null) {\n throw new Error('tensor3d() requires shape to be provided when `values` ' +\n 'are a flat array');\n }\n return makeTensor(values, shape, inferredShape, dtype);\n}\n//# sourceMappingURL=tensor3d.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { FromPixels } from '../kernel_names';\nimport { getKernel } from '../kernel_registry';\nimport { Tensor } from '../tensor';\nimport { convertToTensor } from '../tensor_util_env';\nimport { cast } from './cast';\nimport { op } from './operation';\nimport { tensor3d } from './tensor3d';\nlet fromPixels2DContext;\n/**\n * Creates a `tf.Tensor` from an image.\n *\n * ```js\n * const image = new ImageData(1, 1);\n * image.data[0] = 100;\n * image.data[1] = 150;\n * image.data[2] = 200;\n * image.data[3] = 255;\n *\n * tf.browser.fromPixels(image).print();\n * ```\n *\n * @param pixels The input image to construct the tensor from. The\n * supported image types are all 4-channel. You can also pass in an image\n * object with following attributes:\n * `{data: Uint8Array; width: number; height: number}`\n * @param numChannels The number of channels of the output tensor. A\n * numChannels value less than 4 allows you to ignore channels. Defaults to\n * 3 (ignores alpha channel of input image).\n *\n * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}\n */\nfunction fromPixels_(pixels, numChannels = 3) {\n // Sanity checks.\n if (numChannels > 4) {\n throw new Error('Cannot construct Tensor with more than 4 channels from pixels.');\n }\n if (pixels == null) {\n throw new Error('pixels passed to tf.browser.fromPixels() can not be null');\n }\n let isPixelData = false;\n let isImageData = false;\n let isVideo = false;\n let isImage = false;\n let isCanvasLike = false;\n if (pixels.data instanceof Uint8Array) {\n isPixelData = true;\n }\n else if (typeof (ImageData) !== 'undefined' && pixels instanceof ImageData) {\n isImageData = true;\n }\n else if (typeof (HTMLVideoElement) !== 'undefined' &&\n pixels instanceof HTMLVideoElement) {\n isVideo = true;\n }\n else if (typeof (HTMLImageElement) !== 'undefined' &&\n pixels instanceof HTMLImageElement) {\n isImage = true;\n // tslint:disable-next-line: no-any\n }\n else if (pixels.getContext != null) {\n isCanvasLike = true;\n }\n else {\n throw new Error('pixels passed to tf.browser.fromPixels() must be either an ' +\n `HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` +\n `in browser, or OffscreenCanvas, ImageData in webworker` +\n ` or {data: Uint32Array, width: number, height: number}, ` +\n `but was ${pixels.constructor.name}`);\n }\n if (isVideo) {\n const HAVE_CURRENT_DATA_READY_STATE = 2;\n if (isVideo &&\n pixels.readyState <\n HAVE_CURRENT_DATA_READY_STATE) {\n throw new Error('The video element has not loaded data yet. Please wait for ' +\n '`loadeddata` event on the